Skip to content

Commit c8e8bd1

Browse files
Merge pull request #9027 from romayalon/romy-5.18-backports
NC | 5.18.4 backports
2 parents 5448562 + a793873 commit c8e8bd1

37 files changed

+5787
-1550
lines changed

config.js

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -951,6 +951,33 @@ config.NSFS_GLACIER_FORCE_EXPIRE_ON_GET = false;
951951
// interval
952952
config.NSFS_GLACIER_MIGRATE_LOG_THRESHOLD = 50 * 1024;
953953

954+
/**
955+
* NSFS_GLACIER_RESERVED_BUCKET_TAGS defines an object of bucket tags which will be reserved
956+
* by the system and PUT operations for them via S3 API would be limited - as in they would be
957+
* mutable only if specified and only under certain conditions.
958+
*
959+
* @type {Record<string, {
960+
* schema: Record<any, any> & { $id: string },
961+
* immutable: true | false | 'if-data',
962+
* default: any,
963+
* event: boolean
964+
* }>}
965+
*
966+
* @example
967+
* {
968+
'deep-archive-copies': {
969+
schema: {
970+
$id: 'deep-archive-copies-schema-v0',
971+
enum: ['1', '2']
972+
}, // JSON Schema
973+
immutable: 'if-data',
974+
default: '1',
975+
event: true
976+
}
977+
* }
978+
*/
979+
config.NSFS_GLACIER_RESERVED_BUCKET_TAGS = {};
980+
954981
// anonymous account name
955982
config.ANONYMOUS_ACCOUNT_NAME = 'anonymous';
956983

@@ -1030,7 +1057,7 @@ config.NC_LIFECYCLE_TZ = 'LOCAL';
10301057
config.NC_LIFECYCLE_LIST_BATCH_SIZE = 1000;
10311058
config.NC_LIFECYCLE_BUCKET_BATCH_SIZE = 10000;
10321059

1033-
config.NC_LIFECYCLE_GPFS_ILM_ENABLED = false;
1060+
config.NC_LIFECYCLE_GPFS_ILM_ENABLED = true;
10341061
////////// GPFS //////////
10351062
config.GPFS_DOWN_DELAY = 1000;
10361063

docs/NooBaaNonContainerized/CI&Tests.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,8 @@ The following is a list of `NC jest tests` files -
114114
17. `test_nc_upgrade_manager.test.js` - Tests of the NC upgrade manager.
115115
18. `test_cli_upgrade.test.js` - Tests of the upgrade CLI commands.
116116
19. `test_nc_online_upgrade_cli_integrations.test.js` - Tests CLI commands during mocked config directory upgrade.
117+
21. `test_nc_lifecycle_posix_integration.test` - Tests NC lifecycle POSIX related configuration.
118+
(Note: in this layer we do not test the validation related to lifecycle configuration and it is done in `test_lifecycle.js` - which currently is running only in containerized deployment, but it is mutual code)
117119

118120
#### nc_index.js File
119121
* The `nc_index.js` is a file that runs several NC and NSFS mocha related tests.

docs/NooBaaNonContainerized/Events.md

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,10 @@ The following list includes events that indicate on a normal / successful operat
3232
- Description: NooBaa account was deleted successfully using NooBaa CLI.
3333

3434
#### 4. `noobaa_bucket_created`
35-
- Arguments: `bucket_name`
35+
- Arguments:
36+
- `bucket_name`
37+
- `account_name`
38+
- `<tag_value>` (if `event` is `true` for the reserved tag)
3639
- Description: NooBaa bucket was created successfully using NooBaa CLI or S3.
3740

3841
#### 5. `noobaa_bucket_deleted`
@@ -43,6 +46,11 @@ The following list includes events that indicate on a normal / successful operat
4346
- Arguments: `whitelist_ips`
4447
- Description: Whitelist Server IPs updated successfully using NooBaa CLI.
4548

49+
#### 7. `noobaa_bucket_reserved_tag_modified`
50+
- Arguments:
51+
- `bucket_name`
52+
- `<tag_value>` (if `event` is `true` for the reserved tag)
53+
- Description: NooBaa bucket reserved tag was modified successfully using NooBaa CLI or S3.
4654

4755
### Error Indicating Events
4856

@@ -219,4 +227,4 @@ The following list includes events that indicate on some sort of malfunction or
219227
- Reasons:
220228
- Free space in notification log dir FS is below threshold.
221229
- Resolutions:
222-
- Free up space is FS.
230+
- Free up space is FS.

docs/NooBaaNonContainerized/NooBaaCLI.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -376,6 +376,13 @@ noobaa-cli bucket update --name <bucket_name> [--new_name] [--owner]
376376
- Type: Boolean
377377
- Description: Set the bucket to force md5 ETag calculation.
378378

379+
- `tag`
380+
- Type: String
381+
- Description: Set the bucket tags, type is a string of valid JSON. Behaviour is similar to `put-bucket-tagging` S3 API.
382+
383+
- `merge_tag`
384+
- Type: String
385+
- Description: Merge the bucket tags with previous bucket tags, type is a string of valid JSON.
379386

380387
### Bucket Status
381388

docs/dev_guide/ceph_s3_tests/ceph_s3_tests_pending_list_status.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,4 +54,5 @@ change in our repo) - stopped passing between the update of commit hash 6861c3d8
5454
| test_get_bucket_encryption_s3 | Faulty Test | [613](https://github.com/ceph/s3-tests/issues/613) |
5555
| test_get_bucket_encryption_kms | Faulty Test | [613](https://github.com/ceph/s3-tests/issues/613) |
5656
| test_delete_bucket_encryption_s3 | Faulty Test | [613](https://github.com/ceph/s3-tests/issues/613) |
57-
| test_delete_bucket_encryption_kms | Faulty Test | [613](https://github.com/ceph/s3-tests/issues/613) |
57+
| test_delete_bucket_encryption_kms | Faulty Test | [613](https://github.com/ceph/s3-tests/issues/613) |
58+
| test_lifecycle_expiration_tags1 | Faulty Test | [638](https://github.com/ceph/s3-tests/issues/638) | There can be more such tests having the same issue (`Filter` is not aligned with aws structure in bucket lifecycle configuration) |

src/cmd/manage_nsfs.js

Lines changed: 87 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ const { account_id_cache } = require('../sdk/accountspace_fs');
2828
const ManageCLIError = require('../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError;
2929
const ManageCLIResponse = require('../manage_nsfs/manage_nsfs_cli_responses').ManageCLIResponse;
3030
const manage_nsfs_glacier = require('../manage_nsfs/manage_nsfs_glacier');
31-
const noobaa_cli_lifecycle = require('../manage_nsfs/nc_lifecycle');
31+
const { NCLifecycle } = require('../manage_nsfs/nc_lifecycle');
3232
const manage_nsfs_logging = require('../manage_nsfs/manage_nsfs_logging');
3333
const noobaa_cli_diagnose = require('../manage_nsfs/diagnose');
3434
const noobaa_cli_upgrade = require('../manage_nsfs/upgrade');
@@ -40,6 +40,8 @@ const { throw_cli_error, get_bucket_owner_account_by_name,
4040
const manage_nsfs_validations = require('../manage_nsfs/manage_nsfs_validations');
4141
const nc_mkm = require('../manage_nsfs/nc_master_key_manager').get_instance();
4242
const notifications_util = require('../util/notifications_util');
43+
const BucketSpaceFS = require('../sdk/bucketspace_fs');
44+
const NoobaaEvent = require('../manage_nsfs/manage_nsfs_events_utils').NoobaaEvent;
4345

4446
let config_fs;
4547

@@ -123,7 +125,6 @@ async function fetch_bucket_data(action, user_input) {
123125
force_md5_etag: user_input.force_md5_etag === undefined || user_input.force_md5_etag === '' ? user_input.force_md5_etag : get_boolean_or_string_value(user_input.force_md5_etag),
124126
notifications: user_input.notifications
125127
};
126-
127128
if (user_input.bucket_policy !== undefined) {
128129
if (typeof user_input.bucket_policy === 'string') {
129130
// bucket_policy deletion specified with empty string ''
@@ -142,6 +143,27 @@ async function fetch_bucket_data(action, user_input) {
142143
data = await merge_new_and_existing_config_data(data);
143144
}
144145

146+
if ((action === ACTIONS.UPDATE && user_input.tag) || (action === ACTIONS.ADD)) {
147+
const tags = JSON.parse(user_input.tag || '[]');
148+
data.tag = BucketSpaceFS._merge_reserved_tags(
149+
data.tag || BucketSpaceFS._default_bucket_tags(),
150+
tags,
151+
action === ACTIONS.ADD ? true : await _is_bucket_empty(data),
152+
);
153+
}
154+
155+
if ((action === ACTIONS.UPDATE && user_input.merge_tag) || (action === ACTIONS.ADD)) {
156+
const merge_tags = JSON.parse(user_input.merge_tag || '[]');
157+
data.tag = _.merge(
158+
data.tag,
159+
BucketSpaceFS._merge_reserved_tags(
160+
data.tag || BucketSpaceFS._default_bucket_tags(),
161+
merge_tags,
162+
action === ACTIONS.ADD ? true : await _is_bucket_empty(data),
163+
)
164+
);
165+
}
166+
145167
//if we're updating the owner, needs to override owner in file with the owner from user input.
146168
//if we're adding a bucket, need to set its owner id field
147169
if ((action === ACTIONS.UPDATE && user_input.owner) || (action === ACTIONS.ADD)) {
@@ -189,7 +211,14 @@ async function add_bucket(data) {
189211
data._id = mongo_utils.mongoObjectId();
190212
const parsed_bucket_data = await config_fs.create_bucket_config_file(data);
191213
await set_bucker_owner(parsed_bucket_data);
192-
return { code: ManageCLIResponse.BucketCreated, detail: parsed_bucket_data, event_arg: { bucket: data.name }};
214+
215+
const [reserved_tag_event_args] = BucketSpaceFS._generate_reserved_tag_event_args({}, data.tag);
216+
217+
return {
218+
code: ManageCLIResponse.BucketCreated,
219+
detail: parsed_bucket_data,
220+
event_arg: { ...(reserved_tag_event_args || {}), bucket: data.name, account: parsed_bucket_data.bucket_owner },
221+
};
193222
}
194223

195224
/**
@@ -245,25 +274,14 @@ async function update_bucket(data) {
245274
*/
246275
async function delete_bucket(data, force) {
247276
try {
248-
const temp_dir_name = native_fs_utils.get_bucket_tmpdir_name(data._id);
277+
const bucket_empty = await _is_bucket_empty(data);
278+
if (!bucket_empty && !force) {
279+
throw_cli_error(ManageCLIError.BucketDeleteForbiddenHasObjects, data.name);
280+
}
281+
249282
const bucket_temp_dir_path = native_fs_utils.get_bucket_tmpdir_full_path(data.path, data._id);
250-
// fs_contexts for bucket temp dir (storage path)
251283
const fs_context_fs_backend = native_fs_utils.get_process_fs_context(data.fs_backend);
252-
let entries;
253-
try {
254-
entries = await nb_native().fs.readdir(fs_context_fs_backend, data.path);
255-
} catch (err) {
256-
dbg.warn(`delete_bucket: bucket name ${data.name},` +
257-
`got an error on readdir with path: ${data.path}`, err);
258-
// if the bucket's path was deleted first (encounter ENOENT error) - continue deletion
259-
if (err.code !== 'ENOENT') throw err;
260-
}
261-
if (entries) {
262-
const object_entries = entries.filter(element => !element.name.endsWith(temp_dir_name));
263-
if (object_entries.length > 0 && !force) {
264-
throw_cli_error(ManageCLIError.BucketDeleteForbiddenHasObjects, data.name);
265-
}
266-
}
284+
267285
await native_fs_utils.folder_delete(bucket_temp_dir_path, fs_context_fs_backend, true);
268286
await config_fs.delete_bucket_config_file(data.name);
269287
return { code: ManageCLIResponse.BucketDeleted, detail: { name: data.name }, event_arg: { bucket: data.name } };
@@ -273,6 +291,33 @@ async function delete_bucket(data, force) {
273291
}
274292
}
275293

294+
/**
295+
* _is_bucket_empty returns true if the given bucket is empty
296+
*
297+
* @param {*} data
298+
* @returns {Promise<boolean>}
299+
*/
300+
async function _is_bucket_empty(data) {
301+
const temp_dir_name = native_fs_utils.get_bucket_tmpdir_name(data._id);
302+
// fs_contexts for bucket temp dir (storage path)
303+
const fs_context_fs_backend = native_fs_utils.get_process_fs_context(data.fs_backend);
304+
let entries;
305+
try {
306+
entries = await nb_native().fs.readdir(fs_context_fs_backend, data.path);
307+
} catch (err) {
308+
dbg.warn(`_is_bucket_empty: bucket name ${data.name},` +
309+
`got an error on readdir with path: ${data.path}`, err);
310+
// if the bucket's path was deleted first (encounter ENOENT error) - continue deletion
311+
if (err.code !== 'ENOENT') throw err;
312+
}
313+
if (entries) {
314+
const object_entries = entries.filter(element => !element.name.endsWith(temp_dir_name));
315+
return object_entries.length === 0;
316+
}
317+
318+
return true;
319+
}
320+
276321
/**
277322
* bucket_management does the following -
278323
* 1. fetches the bucket data if this is not a list operation
@@ -294,7 +339,24 @@ async function bucket_management(action, user_input) {
294339
} else if (action === ACTIONS.STATUS) {
295340
response = await get_bucket_status(data);
296341
} else if (action === ACTIONS.UPDATE) {
297-
response = await update_bucket(data);
342+
const bucket_path = config_fs.get_bucket_path_by_name(user_input.name);
343+
const bucket_lock_file = `${bucket_path}.lock`;
344+
await native_fs_utils.lock_and_run(config_fs.fs_context, bucket_lock_file, async () => {
345+
const prev_bucket_info = await fetch_bucket_data(action, _.omit(user_input, ['tag', 'merge_tag']));
346+
const bucket_info = await fetch_bucket_data(action, user_input);
347+
348+
const tagging_object = BucketSpaceFS._objectify_tagging_arr(prev_bucket_info.tag);
349+
const [
350+
reserved_tag_event_args,
351+
reserved_tag_modified,
352+
] = BucketSpaceFS._generate_reserved_tag_event_args(tagging_object, bucket_info.tag);
353+
354+
response = await update_bucket(bucket_info);
355+
if (reserved_tag_modified) {
356+
new NoobaaEvent(NoobaaEvent.BUCKET_RESERVED_TAG_MODIFIED)
357+
.create_event(undefined, { ...reserved_tag_event_args, bucket_name: user_input.name });
358+
}
359+
});
298360
} else if (action === ACTIONS.DELETE) {
299361
const force = get_boolean_or_string_value(user_input.force);
300362
response = await delete_bucket(data, force);
@@ -814,9 +876,11 @@ async function lifecycle_management(args) {
814876
const disable_service_validation = get_boolean_or_string_value(args.disable_service_validation);
815877
const disable_runtime_validation = get_boolean_or_string_value(args.disable_runtime_validation);
816878
const short_status = get_boolean_or_string_value(args.short_status);
879+
const should_continue_last_run = get_boolean_or_string_value(args.continue);
817880
try {
818-
const options = { disable_service_validation, disable_runtime_validation, short_status };
819-
const { should_run, lifecycle_run_status } = await noobaa_cli_lifecycle.run_lifecycle_under_lock(config_fs, options);
881+
const options = { disable_service_validation, disable_runtime_validation, short_status, should_continue_last_run };
882+
const nc_lifecycle = new NCLifecycle(config_fs, options);
883+
const { should_run, lifecycle_run_status } = await nc_lifecycle.run_lifecycle_under_lock();
820884
if (should_run) {
821885
write_stdout_response(ManageCLIResponse.LifecycleSuccessful, lifecycle_run_status);
822886
} else {

src/endpoint/s3/ops/s3_put_bucket_lifecycle.js

Lines changed: 61 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,63 @@ const S3Error = require('../s3_errors').S3Error;
99

1010
const true_regex = /true/i;
1111

12+
/**
13+
* validate_lifecycle_rule validates lifecycle rule structure and logical constraints
14+
*
15+
* validations:
16+
* - ID must be ≤ MAX_RULE_ID_LENGTH
17+
* - Status must be "Enabled" or "Disabled"
18+
* - multiple Filters must be under "And"
19+
* - only one Expiration field is allowed
20+
* - Expiration.Date must be midnight UTC format
21+
* - AbortIncompleteMultipartUpload cannot be combined with Tags or ObjectSize filters
22+
*
23+
* @param {Object} rule - lifecycle rule to validate
24+
* @throws {S3Error} - on validation failure
25+
*/
26+
function validate_lifecycle_rule(rule) {
27+
28+
if (rule.ID?.length === 1 && rule.ID[0].length > s3_const.MAX_RULE_ID_LENGTH) {
29+
dbg.error('Rule should not have ID length exceed allowed limit of ', s3_const.MAX_RULE_ID_LENGTH, ' characters', rule);
30+
throw new S3Error({ ...S3Error.InvalidArgument, message: `ID length should not exceed allowed limit of ${s3_const.MAX_RULE_ID_LENGTH}` });
31+
}
32+
33+
if (!rule.Status || rule.Status.length !== 1 ||
34+
(rule.Status[0] !== s3_const.LIFECYCLE_STATUS.STAT_ENABLED && rule.Status[0] !== s3_const.LIFECYCLE_STATUS.STAT_DISABLED)) {
35+
dbg.error(`Rule should have a status value of "${s3_const.LIFECYCLE_STATUS.STAT_ENABLED}" or "${s3_const.LIFECYCLE_STATUS.STAT_DISABLED}".`, rule);
36+
throw new S3Error(S3Error.MalformedXML);
37+
}
38+
39+
if (rule.Filter?.[0] && Object.keys(rule.Filter[0]).length > 1 && !rule.Filter[0]?.And) {
40+
dbg.error('Rule should combine multiple filters using "And"', rule);
41+
throw new S3Error(S3Error.MalformedXML);
42+
}
43+
44+
if (rule.Expiration?.[0] && Object.keys(rule.Expiration[0]).length > 1) {
45+
dbg.error('Rule should specify only one expiration field: Days, Date, or ExpiredObjectDeleteMarker', rule);
46+
throw new S3Error(S3Error.MalformedXML);
47+
}
48+
49+
if (rule.Expiration?.length === 1 && rule.Expiration[0]?.Date) {
50+
const date = new Date(rule.Expiration[0].Date[0]);
51+
if (isNaN(date.getTime()) || date.getTime() !== Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate())) {
52+
dbg.error('Date value must conform to the ISO 8601 format and at midnight UTC (00:00:00). Provided:', rule.Expiration[0].Date[0]);
53+
throw new S3Error({ ...S3Error.InvalidArgument, message: "'Date' must be at midnight GMT" });
54+
}
55+
}
56+
57+
if (rule.AbortIncompleteMultipartUpload?.length === 1 && rule.Filter?.length === 1) {
58+
if (rule.Filter[0]?.Tag) {
59+
dbg.error('Rule should not include AbortIncompleteMultipartUpload with Tags', rule);
60+
throw new S3Error({ ...S3Error.InvalidArgument, message: 'AbortIncompleteMultipartUpload cannot be specified with Tags' });
61+
}
62+
if (rule.Filter[0]?.ObjectSizeGreaterThan || rule.Filter[0]?.ObjectSizeLessThan) {
63+
dbg.error('Rule should not include AbortIncompleteMultipartUpload with Object Size', rule);
64+
throw new S3Error({ ...S3Error.InvalidArgument, message: 'AbortIncompleteMultipartUpload cannot be specified with Object Size' });
65+
}
66+
}
67+
}
68+
1269
// parse lifecycle rule filter
1370
function parse_filter(filter) {
1471
const current_rule_filter = {};
@@ -89,13 +146,11 @@ async function put_bucket_lifecycle(req) {
89146
filter: {},
90147
};
91148

149+
// validate rule
150+
validate_lifecycle_rule(rule);
151+
92152
if (rule.ID?.length === 1) {
93-
if (rule.ID[0].length > s3_const.MAX_RULE_ID_LENGTH) {
94-
dbg.error('Rule should not have ID length exceed allowed limit of ', s3_const.MAX_RULE_ID_LENGTH, ' characters', rule);
95-
throw new S3Error({ ...S3Error.InvalidArgument, message: `ID length should not exceed allowed limit of ${s3_const.MAX_RULE_ID_LENGTH}` });
96-
} else {
97-
current_rule.id = rule.ID[0];
98-
}
153+
current_rule.id = rule.ID[0];
99154
} else {
100155
// Generate a random ID if missing
101156
current_rule.id = crypto.randomUUID();
@@ -108,11 +163,6 @@ async function put_bucket_lifecycle(req) {
108163
}
109164
id_set.add(current_rule.id);
110165

111-
if (!rule.Status || rule.Status.length !== 1 ||
112-
(rule.Status[0] !== s3_const.LIFECYCLE_STATUS.STAT_ENABLED && rule.Status[0] !== s3_const.LIFECYCLE_STATUS.STAT_DISABLED)) {
113-
dbg.error(`Rule should have a status value of "${s3_const.LIFECYCLE_STATUS.STAT_ENABLED}" or "${s3_const.LIFECYCLE_STATUS.STAT_DISABLED}".`, rule);
114-
throw new S3Error(S3Error.MalformedXML);
115-
}
116166
current_rule.status = rule.Status[0];
117167

118168
if (rule.Prefix) {

0 commit comments

Comments
 (0)