-
Notifications
You must be signed in to change notification settings - Fork 70
/
Copy pathgluster.conf.sample
1613 lines (1428 loc) · 38.9 KB
/
gluster.conf.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
## This is just a sample configuration. Passing it directly to gdeploy will throw error ##
[hosts]
10.70.46.13
10.70.46.15
10.70.46.17
10.70.46.19
# NOTE: Patterns are supported for all the multiple values options and sections.
# For example: hosts can be given as 10.70.46.1{3,5,7,9} specifying the 4 hosts
# Or These can be specifies as ranges also: 10.70.46.1{1..4}. This
# means the four IPs ranging from 10.70.46.11 to 10.70.46.14
#------------------------------
##-- Back-end setup --##
# Gluster specific backend-setup [We follow some recommended performance
# efficient calculations here]
#
# Back-end setup in one or more remote machines can be done using the
# section 'backend-setup'.
#
# Backend setup data can be given specific to each host or common to all
# of them.
#
# Host specific configuration:
#
# [backend-setup:10.70.46.13]
# devices=/dev/sdb,/dev/vdb,/dev/vda
# vgs=CUSTOM_vg{1..3}
# pools=CUSTOM_pool1,CUSTOM_pool2,CUSTOM_pool3
# lvs=CUSTOM_lv1,CUSTOM_lv2,CUSTOM_lv3
# mountpoints=/gluster/brick1,/gluster/brick2,/gluster/brick3
# brick_dirs=glusterbrick{1,2,3}
#
#
# Common configuration for all the hosts
#
# [backend-setup]
# devices=/dev/sdb
# vgs=CUSTOM_vg1
# pools=CUSTOM_pool1
# lvs=CUSTOM_lv1
# mountpoints=/gluster/brick1
# brick_dirs=glusterbrick1
#
# With the above configuration, the setup will be done on all the
# machines specified under the 'hosts' section with the above data.
#
# Both these can be mixed up and used.
# Example:
#
# [backend-setup:10.70.46.13]
# devices=/dev/sdb,/dev/vdb,/dev/vda
# vgs=CUSTOM_vg1,CUSTOM_vg2,CUSTOM_vg3
# pools=CUSTOM_pool1,CUSTOM_pool2,CUSTOM_pool3
# lvs=CUSTOM_lv1,CUSTOM_lv2,CUSTOM_lv3
# mountpoints=/gluster/brick1,/gluster/brick2,/gluster/brick3
# brick_dirs=glusterbrick{1,2,3}
#
# [backend-setup]
# devices=/dev/sdb
# vgs=CUSTOM_vg1
# pools=CUSTOM_pool1
# lvs=CUSTOM_lv1
# mountpoints=/gluster/brick1
# brick_dirs=glusterbrick1
#
# With this configuration, for the host 10.70.46.13, the first
# configuration will be used and for the rest of the machines specified
# under 'hosts' section the second configuration will be used.
#
#
# Section 'snapshot-reserve'
#
# While creating the thin pools, if the user intents not to use the
# entire volume group and leave some space out for snapshots, she can
# use this section. Under this section user needs to specify how much
# space(in percentage) should be left unused in the Volume Group
#
# Example
#
# [snapshot-reserve]
# 20%
#
# By default, the entire Volume Group will be used up for thin pooling.
#
# Section 'default'
#
#
# If the user want to stop setup at a particular step, say, at VG
# creation, it can be done as follows:
#
# [default]
# no
#
# [backend-setup]
# devices=/dev/sdb
# vgs=CUSTOM_vg1
#
# Here, it will create the PVs and the VGs and exit. Setting 'default'
# section to no is important. It's default value is 'yes' and will take
# default values for lvs, pools, mountpoints, etc. and continue taking
# defaults value for all these.
#
#
#
# Non-GlusteFS back-end setup
#
# For this, to avoid the automatic computations being done an additional
# section 'gluster' set to no must be added
#
# [gluster]
# no
#
#
# To setup back-end, Configuration similar to following example can be
# followed:
#
# To setup back-end for hyperconvergence:
#
# [backend-setup]
# devices=sdb
# vgs=RHS_vg1
# lvs=lv_ctdb:1G,lv_engine:50G,lv_data:100%FREE
# mountpoints=/rhgs/ctdb,/rhgs/engine,/rhgs/data
#
# To add SSD for caching:
#
# [backend-setup]
# ssd=sdc
# vgs=RHS_vg1
# datalv=lv_data
# chachedatalv=lv_cachedata:1G
# chachemetalv=lv_cachemeta:230G
#
# NOTE: Specifying the name of the data LV is necessary while adding
# SSD. Make sure the datalv is created already. Otherwise just give it
# in the lvs section as well.
#
#
#
#
# SeLinux section
#
# If selinux is enabled, provide the value 'yes' in this
# section, so gdeploy can set selinux labels for the
# brick directories. By default it is 'no'.
#
#
# [selinux]
# yes
#
#
#
# Old/Soon to be deprecated configuration option
#
#
#
#
# Generic section [devices] is applicable to all the hosts listed in the [hosts]
# section. However, if sections of hosts [hostname] or [ip-address] is present,
# then the data in generic sections like [devices] are ignored. Host specific
# data take precedence.
# [devices]
# /dev/sda
# /dev/vdb
# /dev/vda
#------------------------------
# vg names for the above devices
# The number of vgs in the [vgs] should match the devices
# [vgs]
# CUSTOM_vg
#------------------------------
# pool names for the above volume groups
# The number of pools listed in the [pools] section should match the number of
# vgs.
# [pools]
# CUSTOM_pool
#------------------------------
# lv names for the above volume groups
# The number of logical volumes listed in the [lvs] section should match the
# number of vgs.
# [lvs]
# CUSTOM_lv
#-------------------------------
# Brick mountpoints for the logical volumes
# The number of mountpoints should match the number of logical volumes listed
# above.
# [mountpoints]
# /gluster/brick
#-------------------------------
# brick_dirs is the directory which is to be used as brick while creating the
# volume. A mountpoint cannot be used as a brick directory, so brick_dirs
# specifies the directory to be made inside the LV mount point that will be
# used as a brick.
# This field can be left empty in which case a directory will be created
# inside the mountpoint with a default name. If backend setup is not being done
# this field will be ignored.
# IMP: If only gluster deployment is to be done and not back-end setup, just
# provide value for 'brick_dirs' option, skipping all the above data other than
# 'hosts' and skip ahead to the 'volume' section
# [brick_dirs]
# gluster_brick
# Host specific data are to be given as follows
[10.70.46.13]
devices=/dev/sdb,/dev/vdb,/dev/vda
vgs=CUSTOM_vg1,CUSTOM_vg2,CUSTOM_vg3
pools=CUSTOM_pool1,CUSTOM_pool2,CUSTOM_pool3
lvs=CUSTOM_lv1,CUSTOM_lv2,CUSTOM_lv3
mountpoints=/gluster/brick1,/gluster/brick2,/gluster/brick3
brick_dirs=glusterbrick{1,2,3}
# NOTE: The host specific data of 10.70.46.15 and 10.70.46.17 are
# the same. It can be fed either of the ways. Two separate
# ways defines are just to demonstrate the format of
# patterns usage
[10.70.46.15]
devices=/dev/vd{b,c}
vgs=CUSTOM_vg{1,2}
pools=CUSTOM_pool{1,2}
lvs=CUSTOM_lv{1,2}
mountpoints=/gluster/brick{1,2}
[10.70.46.17]
devices=/dev/vdc,/dev/vdb
vgs=CUSTOM_vg1,CUSTOM_vg2
pools=CUSTOM_pool1,CUSTOM_pool2
lvs=CUSTOM_lv1,CUSTOM_lv2
mountpoints=/gluster/brick1,/gluster/brick2
[10.70.46.19]
devices=/dev/vdb
vgs=CUSTOM_vg1
pools=CUSTOM_pool1
lvs=CUSTOM_lv1
mountpoints=/gluster/brick1
#------------------------------
##-- pv, vg, and lv sections --##
# 'backend-setup' section works pretty well for the recommended
# provisioning for GlusterFS, but if a user needs more control over the
# creation of pv, vg, and lv(say, specifying the poolsize), then this
# section becomes a bit flaky. In such cases, individually one can
# control PV, VG, and LV
#------------------------------
##-- Disktype, Disk count and stripe size --##
# Section [disktype] specifies which disk configuration is used while
# setting up the back-end. Supports RAID 10, RAID 6 and JBOD configurations.
# If this field is left empty, it will be by default taken as JBOD.
# This is common for all the hosts.
# [disktype]
# raid6
# Section [diskcount] specifies the number of data disks in the setup. This is a
# mandatory field if the disk configuration specified is either RAID 10 or
# RAID 6 and will be ignored if architecture is JBOD. This is host specific
# data.
# [diskcount]
# 10
# Section [stripesize] specifies the stripe_unit size in KB. This is a mandatory
# field if disk configuration is RAID 6. If this is not specified in case of
# RAID 10 configurations, it will take the default value 256K. This field is
# not necessary for JBOD configuration of disks. Do not add any suffixes like
# K, KB, M, etc.This is host specific data.
# [stripesize]
# 128
#----------------------------------
##-- Tune profile --##
# The section 'tune-profile' takes in the name of the performance tuning
# profile to be used. If not specified, gdeploy will not set any tuning
# profiles.
#
# Please look up the available tunes-adm profiles in your version of
# RHEL using the command 'tuned-adm list'
[tune-profile]
rhgs-sequential-io
#----------------------------------
##-- backend-reset --##
# This section allows backend reset in remote machines. Backend reset includes
# unmouting of LVs and deletion of LVs, VGs, and PVs.
#
# NOTE: Make sure you have your data backed up before using this
#
# To delete PV, VG, LV and to unmount it:
#
# [backend-reset]
# pvs=/dev/sdb,/dev/vdb
# unmount=yes
#
# This will automatically figure out the VGs and LVs associated and delete them
# in all the hosts specified under 'hosts' section.
# Giving the option unmount=yes is necessary. This is to make sure the user knows
# what she/he is up to. If not specified or given 'no', then gdeploy will try
# removing lv directly without unmounting and if mounted this will fail,
#
# To unmount bricks without going for other resets, one can use:
#
# [backend-reset]
# mountpoints=/dev/GLUSTER_vg1/GLUSTER_lv1,/dev/GLUSTER_vg2/GLUSTER_lv2
# unmount=yes
#
# To remove LVs only:
#
# With unmount:
#
# [backend-reset]
# lvs=GLUSTER_lv{1,2}
# unmount=yes
#
#
# Without unmount:
#
# [backend-reset]
# lvs=GLUSTER_lv1,GLUSTER_lv2
#
#
# To remove VGs and associated LVs only:
#
# With unmount:
#
# [backend-reset]
# vgs=GLUSTER_vg1,GLUSTER_vg2
# unmount=yes
#
#
# Without unmount:
#
# [backend-reset]
# vgs=GLUSTER_vg{1,2}
#
#
# To remove backend in n different machines with different configurations:
#
# [backend-reset:10.70.46.15]
# pvs=/dev/sdb,/dev/vdb
# unmount=yes
#
#
# [backend-reset:10.70.46.13]
# pvs=/dev/sdb
# unmount=yes
#
# Here 'hosts' section in not necessary
#----------------------------------
##-- peer --##
# The section peer specifies the configurations for the Trusted Storage
# Pool management(TSP)
# This section helps in making all the hosts specified in the section 'hosts'
# to either probe each other making the TSP or detach all of them from TSP
# The only option in this section is the option names 'action' which can have
# it's values to be [probe, detach, ignore]
# If provided 'ignore' it will skip the peer probing completely even for volume
# creation and add-brick for which usually they run by default
# To do a peer probe
[peer]
action=probe
# To do a peer detach
# [peer]
# action=detach
#
#
# NOTE: In the previous releases, we used the option 'manage' instead of
# 'action'. This option is deprecated and will be removed in the future
# release.
#-----------------------------------
##-- Volume --##
# The section volume specifies the configuration options for the volume.
# 'action' option specifies what action id to be performed in the volume.
# The choices are: [create, delete, add-brick, remove-brick, rebalance,
# set].
# If delete is provided all the options other than 'volname' will be ignored.
# If 'add-brick' or 'remove-brick' is chosen, extra option 'bricks' with a
# comma separated list of brick names(in the format <hostname>:<brick path>)
# is be provided.
# In case of remove-brick and rebalance, 'state' option should also
# be provided. Choices for 'state' are:
# For remove-brick: [start, stop, commit, force]
# For rebalance: [start, stop, fix-layout]
# 'volname' option specifies the volume name. Default is glustervol
# If the user wishes to do just a volume operation, she can omit the
# 'hosts' section if the volname is provided in the format
# <hostname>:<volname>, where hostname is the hostname or IP of one of
# the nodes in the cluster
# IMP: Only single volume creation/deletion/configuration is supported
# as of now.
# 'transport' option specifies the transport type. Default is tcp. Options are
# tcp or rdma or tcp,rdma
# 'replica' option will specify if the volume should be of type replica or not.
# options are yes and no. Default is no.
# If 'replica' is given as yes, 'replica_count' should be given.
# Option 'arbiter_count' is optional.
# 'disperse' option will specify if the volume should be of type disperse.
# options are yes and no. Default is no.
# 'disperse_count' is optional even if the 'disperse' is yes. if not specified,
# the number of bricks specified in the command line is taken as the
# disperse_count value.
# If 'redundancy_count' is not specified, and if 'disperse' is yes, it's
# default value is computed so that it generates an optimal configuration.
# An option 'force' can be used, in case the brick_dirs specified are
# some mountpoints and must be used anyway.
# For creating a volume of type disperse:
[volume]
action=create
volname=glustervol
transport=tcp,rdma
disperse=yes
disperse_count=0
redundancy_count=2
force=yes
# For creating a volume of type replicate:
# [volume]
# action=create
# volname=glustervol
# transport=tcp,rdma
# replica=yes
# replica_count=2
# arbiter_count=1
# force=yes
# To start a volume
# [volume]
# action=start
# volname=10.70.46.13:glustervol
# To stop a volume
# [volume]
# action=stop
# volname=10.70.46.13:glustervol
# To delete a volume
# [volume]
# action=delete
# volname=10.70.46.13:glustervol
# Add brick operation can be done by using:
# [volume]
# action=add-brick
# volname=glustervol
# bricks=10.70.46.13:/mnt/new_brick{1,8}
# Remove brick operation can be done by using:
# [volume]
# action=remove-brick
# volname=10.70.46.13:glustervol
# bricks=10.70.46.17:/mnt/brick{1,8}
# state=commit
# Rebalance operation can be done by using:
# [volume]
# action=rebalance
# volname=10.70.46.13:glustervol
# state=start
# Setting option to the volume can be done by using:
# [volume]
# action=set
# volname=10.70.46.13:glustervol
# key=cluster.nufa
# value=on
#
# NOTE: If multiple volumes are to be created, or multiple actions are
# to be done on volume using a single configuration file, suffix the
# section name with a number.
#
# For example:
#
# [volume-1]
# action=create
# volname=kyloren
# brick_dirs=/mnt/snook,/mnt/vader
#
# [volume-2]
# action=create
# volname=rey
# brick_dirs=/mnt/luke,/mnt/kenobi
#
# If there are multiple volumes to be created, provide brick_dirs under
# each section. Otherwise, it will error out.
#
#
#
#
#
# ## SMB volume share
# To enable SMB volume share, along with volume creation
# 'smb' option should be set to yes in 'volume' section.
# To setup SMB share separately, use the action 'smb-setup'.
# The other options necessary are:
#
# smb_username: The Samba username. Default value is 'smbuser'
#
# smb_password: Password for the Samba user. Default value is 'password'
#
# smb_mountpoint: This mountpoint will be used to mount the to be shared
# volume on one of the Samba servers itself. For more info refer Samba
# documentation. Default value is /mnt/smbserver
#
# path: It represents the path that is relative to the root of the
# gluster volume that is being shared. Default value is '/'
#
# glusterfs:logfile : Path to the log file that will be used by the
# gluster modules that are loaded by the vfs plugin. Default value is
# /var/log/samba/<volname>.log
#
# glusterfs:loglevel : This option is equivalent to the
# client-log-level option of gluster. 7 is the default value and
# corresponds to the INFO level.
#
# glusterfs:volfile_server : The gluster server to be contacted to fetch
# the volfile for the volume. Default is 'localhost'.
#
# NOTE: Samba needs CTDB setup as well to work. Please refer the CTDB
# section below to setup this.
#
# Example:
# [volume]
# action=smb-setup
# volname=smbvolume
# smb_username=bilbo
# smb_password=shireeleventyone
# smb_mountpoint=/mnt/precious
#
# To disable automatic SMB sharing per-volume
#
# [volume]
# action=smb-disable
# volname=smbvolume
#-----------------------------------
##-- Clients --##
# IMP: If only back-end setup is to be done but not GlusterFs
# deployment omit the following section
# Specifies the client hosts and client_mount_points to mount the gluster
# volume created.
# 'action' option is to be specified for the framework to understand
# what action is to be done.
# The choices are: ['mount', 'unmount']
# 'hosts' field is mandatory.
# The option 'fstype' specifies how gluster volume is to be mounted.
# Choices are: [glusterfs, nfs] (Default is glusterfs)
# If the fstype is given as nfs, the version by default will be taken as
# 3. This can be changed by providing the option 'nfs-version'.
# eg: nfs-version=4
# Each client can have different types of volume mount. Just specify it comma
# separated.
# Option 'client_mount_points' specifies where the clients are to be mounted
# in each host. Each host can have a separate mountpoint, in which case it will
# be given comma separated or else every mountpoint can have a mountpoint of the
# same name. If 'client_mount_points'
# are not specified, default will be taken as /mnt/gluster
# for all the hosts
# Mounting a volume using native fuse on 3 client hosts
[clients]
action=mount
volname=glustervol
hosts=10.70.46.1{3,5}
fstype=glusterfs
client_mount_points=/mnt/gluster{1,3}
# Mounting a volume using nfs on a single client host
# [clients]
# action=mount
# # volname=glustervol
# hosts=10.70.46.13
# fstype=nfs
# nfs-version=4
# client_mount_points=/mnt/gluster
#
# NOTE: If multiple volumes are to be mounted or unmounted,
# using a single configuration file, use separate 'clients' sections with the
# section name suffixed with appropriate numbers.
#
# For example:
#
# [clients-1]
# action=mount
# volname=kyloren
# hosts=host1,host2
# client_mount_points=/mnt/snook,/mnt/vader
#
# [clients-2]
# action=mount
# volname=rey
# hosts=host1,host1
# client_mount_points=/mnt/luke,/mnt/kenobi
#
#-----------------------------------
##-- Snapshot --##
# 'snapshot' section can be used if the user wants to create or delete
# a snapshot.
# The option 'action' is to be used to specify which snapshot action is to be
# executed.
# The choices are [create, delete, clone, config, and restore]
# For snapshot creation:
# The name of the snapshot can be specified as the value to the snapname option.
# If the action is create the name of the volume is to specified as the value
# to the option 'volname'.
# [snapshot]
# action=create
# volname=10.70.46.13:glustervol
# snapname=glustersnap
# For snapshot activation:
# [snapshot]
# action=activate
# snapname=glustersnap
# For snapshot deactivation:
# [snapshot]
# action=deactivate
# snapname=glustersnap
# For snapshot deletion:
# [snapshot]
# action=delete
# snapname=glustersnap
## volname=glustervol [alternative]
# For snapshot cloning:
# [snapshot]
# action=clone
# snapname=glustersnap
# clonename=an_old_snap
# For snapshot configure:
# [snapshot]
# action=config
# snap_max_soft_limit=92
# snap_max_hard_limit=95
# auto_delete=disable
# activate_on_create=enable
# For snapshot restore:
# [snapshot]
# action=restore
# snapname=glustersnap
#-----------------------------------
##-- Quota --##
# 'quota' section can be used to set quota limits on mounted
# volume directories and sub directories. The actions supported are
# [enable, disable, remove, remove-objects, default-soft-limit,
# limit-usage, limit-objects, alert-time, soft-timeout, hard-timeout].
#
# For enabling quota:
#
# [quota]
# action=enable
# volname=10.70.46.15:glustervol
#
#
# For disabling quota:
#
# [quota]
# action=disable
# volname=10.70.46.15:glustervol
#
#
# For removing quota limits on a path
#
# [quota]
# action=remove
# volname=glustervol
# path=/,/dir1
#
#
# For removing quota objects on a path
#
# [quota]
# action=remove-objects
# volname=glustervol
# path=/,/dir1
#
# For setting default soft limits
#
# [quota]
# action=default-soft-limit
# volname=glustervol
# percent=85
#
#
# For limiting usage for volume
#
# [quota]
# action=limit-usage
# volname=glustervol
# path=/,/dir1
# size=5MB,6MB
#
#
# For limiting object count for volume
#
# [quota]
# action=limit-objects
# volname=glustervol
# path=/,/dir1
# number=10,20
#
#
# For setting alert-time
#
# [quota]
# action=alert-time
# volname=glustervol
# time=1W
#
#
# For setting soft-timeout
#
# [quota]
# action=soft-timeout
# volname=glustervol
# client_hosts=10.70.46.23,10.70.46.24
# time=100
#
#
#
# For setting hard-timeout
#
# [quota]
# action=hard-timeout
# volname=glustervol
# client_hosts=10.70.46.23,10.70.46.24
# time=100
#-----------------------------------
##-- Geo-replication --##
# section 'geo-replication' can be used to setup geo-replication.
# The option 'action' specifies which geo-replication operation is
# to be performed. The choices available are: [create, start, stop, delete,
# pause, resume]
#
# NOTE: As of now, a single slave volume is supported
#
#
# To create a geo-rep session
#
# [geo-replication]
# action=create
# mastervol=10.70.43.219:master
# slavevol=10.70.43.25:slave
# slavenodes=10.70.43.25,10.70.43.86
# force=yes
#
# This will automatically enable password less ssh between master and slave and
# creates the geo-rep session
#
#
# To create a secure geo-rep session
#
# [geo-replication]
# action=create
# georepuser=testgeorep
# mastervol=10.70.43.219:master
# slavevol=10.70.43.25:slave
# slavenodes=10.70.43.25,10.70.43.86
# force=no
#
#
#
# To start a geo-rep session
#
# [geo-replication]
# action=start
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# force=yes
#
#
#
# To pause a geo-rep session
#
# [geo-replication]
# action=pause
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# force=yes
#
#
#
# To resume a geo-rep session
#
# [geo-replication]
# action=resume
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# force=yes
#
#
#
# To stop a geo-rep session
#
# [geo-replication]
# action=stop
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# force=yes
#
#
#
# To delete a geo-rep session
#
# [geo-replication]
# action=delete
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# force=yes
#
#
#
# To configure a geo-rep session
#
# Available configuration options are:
#
# gluster-log-file - The path to the geo-replication glusterfs log file.
# gluster-log-level - The log level for glusterfs processes.
# log-file - The path to the geo-replication log file.
# log-level - The log level for geo-replication.
# ssh-command - The SSH command to connect to the remote machine
# (the default is SSH).
# rsync-command - The rsync command to use for synchronizing the files
# (the default is rsync).
# use-tarssh - The use-tarssh option allows tar over Secure Shell
# protocol. Use this option to handle workloads of
# files that have not undergone edits. Value of
# this option can be [true, false]
# volume-id - The option to delete the existing master UID for the
# intermediate/slave node. Value to this option should
# be a UID
# timeout - The timeout period in seconds.
# sync-jobs - The number of simultaneous files/directories that
# can be synchronized.
# ignore-deletes - If this option is set to 1, a file deleted on the
# master will
# not trigger a delete operation on the slave.
# checkpoint - Sets a checkpoint with the given value. If the
# option is set as now, then the current time will
# be used as the label.
#
# Use only one configuration option at a time.
#
# If the value of any of the above option(other than volume-id) in set to
# 'reset', the setting of that config option will be deleted
#
#
# Examples:
#
#
# To reset log-level to the default value:
#
# [geo-replication]
# action=config
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# log-level=reset
#
#
#
#
# To set checkpoint as the current time
#
# [geo-replication]
# action=config
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
# checkpoint=now
#
#
#
#
# Disaster Recovery
#
#
# 1. Failover
# [geo-replication]
# action=failover
# mastervol=10.70.46.13:mastervolname
# slavevol=10.70.46.15:slavevolname
#
# Here mastervol and slavevol are the original master and slave volumes
# respectively.
#
# This configuration will first promote the slave to be the master and
# then sets necessary configurations
#