diff --git a/man/rules/meson.build b/man/rules/meson.build index 95687afce8..26eddb7791 100644 --- a/man/rules/meson.build +++ b/man/rules/meson.build @@ -828,6 +828,29 @@ manpages = [ 'sd_journal_seek_tail'], ''], ['sd_journal_stream_fd', '3', ['sd_journal_stream_fd_with_namespace'], ''], + ['sd_json_dispatch_string', + '3', + ['sd_json_dispatch_const_string', + 'sd_json_dispatch_double', + 'sd_json_dispatch_id128', + 'sd_json_dispatch_int16', + 'sd_json_dispatch_int32', + 'sd_json_dispatch_int64', + 'sd_json_dispatch_int8', + 'sd_json_dispatch_intbool', + 'sd_json_dispatch_signal', + 'sd_json_dispatch_stdbool', + 'sd_json_dispatch_strv', + 'sd_json_dispatch_tristate', + 'sd_json_dispatch_uid_gid', + 'sd_json_dispatch_uint16', + 'sd_json_dispatch_uint32', + 'sd_json_dispatch_uint64', + 'sd_json_dispatch_uint8', + 'sd_json_dispatch_unsupported', + 'sd_json_dispatch_variant', + 'sd_json_dispatch_variant_noref'], + ''], ['sd_listen_fds', '3', ['SD_LISTEN_FDS_START', 'sd_listen_fds_with_names'], diff --git a/man/sd_json_dispatch_string.xml b/man/sd_json_dispatch_string.xml new file mode 100644 index 0000000000..f42ff276aa --- /dev/null +++ b/man/sd_json_dispatch_string.xml @@ -0,0 +1,364 @@ + + + + + + + + sd_json_dispatch_string + systemd + + + + sd_json_dispatch_string + 3 + + + + sd_json_dispatch_string + sd_json_dispatch_const_string + sd_json_dispatch_strv + sd_json_dispatch_stdbool + sd_json_dispatch_intbool + sd_json_dispatch_tristate + sd_json_dispatch_variant + sd_json_dispatch_variant_noref + sd_json_dispatch_int64 + sd_json_dispatch_int32 + sd_json_dispatch_int16 + sd_json_dispatch_int8 + sd_json_dispatch_uint64 + sd_json_dispatch_uint32 + sd_json_dispatch_uint16 + sd_json_dispatch_uint8 + sd_json_dispatch_double + sd_json_dispatch_uid_gid + sd_json_dispatch_id128 + sd_json_dispatch_signal + sd_json_dispatch_unsupported + + Decode JSON variant values and write them to the specified memory + + + + + #include <systemd/sd-varlink.h> + + + int sd_json_dispatch_string + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_const_string + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_strv + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_stdbool + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_intbool + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_tristate + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_variant + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_variant_noref + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_int64 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_int32 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_int16 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_int8 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_uint64 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_uint32 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_uint16 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_uint8 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_double + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_uid_gid + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_id128 + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_signal + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + int sd_json_dispatch_unsupported + const char *name + sd_json_variant *variant + sd_dispatch_flags flags + void *userdata + + + + + + Description + + The various functions described here are intended for use in the + sd_json_dispatch_field structure arrays the + sd_json_dispatch3 and + sd_varlink_dispatch3 + functions accept; they decode the provided JSON variant object's value, and write it to the memory + indicated by the userdata pointer. The name parameter + contains the field name (in the JSON object it is contained in) of the value being decoded. For details + on the flags parameter see the sd_json_dispatch() + documentation. + + Note that all these functions not only accept the native JSON type they are intended for, but also + accept null JSON values, in which case they assign an appropriate invalid/unset/null value, as + appropriate for the type (for details see below). + + sd_json_dispatch_string() decodes a JSON string value, and allocates a + NUL terminated copy in dynamic memory. The userdata pointer + must point to a pointer to a string, which is freed if non-NULL, and then replaced + by the newly allocated one. If a JSON null value is passed, the existing string is freed and + NULL is assigned. + + sd_json_dispatch_const_string() is very similar to + sd_json_dispatch_string(), but does not allocate a string in dynamic + memory. Instead, it just writes a pointer into the JSON object into the indicated memory (or + NULL in case a JSON null object is passed). The memory remains valid only as long as + the indicated variant object is kept allocated (which can happen via direct reference, or via an indirect + one via an object that references the specified variant). The memory userdata + points to on input is not freed before the new value is assigned. + + sd_json_dispatch_stdbool() and sd_json_dispatch_intbool() + decode JSON boolean values and write them to the indicated memory. The former expects a variable of the + C99 bool type in the indicated memory, the latter an int (which will only + receive the values 0 and 1). The JSON null value is treated equivalent to a JSON false. + + sd_json_dispatch_tristate() is very similar + tosd_json_dispatch_intbool(), but will assign -1 if a JSON null value is passed. Or + in other words, the integer will have a value > 0, == 0 or < 0, for the cases true, false or + invalid/unset/null. + + sd_json_dispatch_variant() takes an additional reference to the passed JSON + object (via sd_json_variant_ref()) and writes the pointer to the indicated + memory. No decoding is done. If the indicated pointer is non-NULL on input it is + freed (via sd_json_variant_unref()) before the new pointer is written. + + sd_json_dispatch_variant_noref() is similar, but does not + take a new reference to the JSON variant object. The pointer hence only remains valid as long as the + original object stays referenced. If the indicated pointer is non-NULL on input it + is not freed before the new pointer is written. + + The sd_json_dispatch_int64(), sd_json_dispatch_int32(), + sd_json_dispatch_int16(), sd_json_dispatch_int8(), + sd_json_dispatch_uint64(), sd_json_dispatch_uint32(), + sd_json_dispatch_uint16() and sd_json_dispatch_uint8() + functions decode a JSON integer value, and write the value to the indicated memory. The function names + indicate the word width and signedness of the integers being parsed. If the JSON null value is passed the + functions for the unsigned integer types will assign the maximum value the type takes + (i.e. UINT64_MAX, UINT32_MAX …), and the signed versions assign + -1. Instead of a JSON integer value these functions also accept JSON strings that contain formatted + decimal numbers, in order to improve compatibility for encoding integer values that cannot be represented + in 64bit double precision floating point numbers in other programming languages that encode JSON numerals + this way. + + The sd_json_dispatch_double() function decodes a 64bit double precision + floating point number. If a JSON null value is passed, assigns NaN. + + The sd_json_dispatch_uid_gid() function is similar to + sd_json_dispatch_uint32(), and is intended to decode 32bit UNIX UID/GID numbers, as + used on Linux. It will decode a JSON null value as 4294967295 (i.e. (uid_t) -1), and + will refuse the values 65535 and 4294967295 when passed as JSON numerals (i.e. both the 16bit and 32bit + "invalid" UID/GID, as these values have special meaning for various UNIX syscalls, on different OSes and + file systems). + + sd_json_dispatch_id128() decodes a 128bit ID formatted as a JSON string. It + supports both RFC9562 UUID formatting, as well as 64 hexadecimal characters without separators, the same + way as + sd_id128_from_string3. If + the JSON null value is passed, the all-zero ID is assigned. + + sd_json_dispatch_signal() decodes a UNIX process signal specification. It + expects either an JSON string containing a signal name such as SIGINT or + SIGTERM, or an unsigned JSON integer value with the signal number (in the Linux + definition). The indicated memory must point to an int variable to write the signal number + to. If the JSON null value is passed a negative value will be written to the memory. + + sd_json_dispatch_unsupported() will always fail with the + -EINVAL error. + + + + Return Value + + On success, these functions return a non-negative integer. On failure, they return a negative + errno-style error code. + + + Errors + + Returned errors may indicate the following problems: + + + + -EINVAL + + An argument is invalid. + + + + -ENOMEM + + Memory allocation failed. + + + + + + + + + History + + sd_json_dispatch_string(), sd_json_dispatch_const_string(), + sd_json_dispatch_strv(), sd_json_dispatch_stdbool(), + sd_json_dispatch_intbool(), sd_json_dispatch_tristate(), + sd_json_dispatch_variant(), sd_json_dispatch_variant_noref(), + sd_json_dispatch_int64(), sd_json_dispatch_int32(), + sd_json_dispatch_int16(), sd_json_dispatch_int8(), + sd_json_dispatch_uint64(), sd_json_dispatch_uint32(), + sd_json_dispatch_uint16(), sd_json_dispatch_uint8(), + sd_json_dispatch_double(), sd_json_dispatch_uid_gid(), + sd_json_dispatch_id128(), sd_json_dispatch_signal(), + sd_json_dispatch_unsupported() were added in version 257. + + + + See Also + + + systemd1 + sd-json3 + sd-varlink3 + sd_json_dispatch3 + sd_variant_dispatch3 + + + diff --git a/man/systemd-repart.xml b/man/systemd-repart.xml index 317ae05826..a38f518669 100644 --- a/man/systemd-repart.xml +++ b/man/systemd-repart.xml @@ -26,7 +26,7 @@ systemd-repart OPTIONS - BLOCKDEVICE + BLOCKDEVICE systemd-repart.service @@ -35,10 +35,10 @@ Description - systemd-repart creates partition tables, and adds or grows partitions, - based on the configuration files described in - repart.d5. - + systemd-repart creates partition tables, and adds or grows partitions, based on + the configuration files described in + repart.d5. It operates + on the block device or file image specified on the command line. systemd-repart is used when building OS images, and also when deploying images to automatically adjust them, during boot, to the system they @@ -53,6 +53,11 @@ systemd-repart.service service is generally run at boot in the initrd, in order to augment the partition table of the OS before its partitions are mounted. + If the block device is specified as - (or as an empty string), + systemd-repart will not operate on any block device or image file, and instead + determine and output the minimum disk/image size for the specified partition configuration, taking all + configured size constraints into account. + systemd-repart operations are mostly incremental: it grows existing partitions or adds new ones, but does not shrink, delete, or move existing partitions. The service is intended to be run on every boot, but when it detects that the partition table already matches the installed @@ -495,6 +500,30 @@ + + + + This is very similar to but automatically + selects all partitions for deferral that have set. It may be used in + conjunction with or + , in which case all matching partitions are + deferred. + + + + + + + + This is very similar to but automatically + selects all partitions for deferral that have set. It may be used + in conjunction with or + , in which case all matching partitions are + deferred. + + + + diff --git a/rules.d/60-block.rules b/rules.d/60-block.rules index cf307389b8..9d54a65e02 100644 --- a/rules.d/60-block.rules +++ b/rules.d/60-block.rules @@ -14,3 +14,11 @@ ACTION!="remove", SUBSYSTEM=="block", \ # Reset access rights to each loopback device once it gets detached. ACTION=="change", SUBSYSTEM=="block", KERNEL=="loop*", ENV{DISK_MEDIA_CHANGE}=="1", TEST!="loop/backing_file", GROUP="disk", MODE="660" + +# Provide a somewhat cleaned up field indicating the subsystem various +# 'virtual' block devices belong too, in order to avoid replicating name based +# pattern matching in every consumer +ACTION!="remove", SUBSYSTEM=="block", KERNEL=="dm-*", ENV{ID_BLOCK_SUBSYSTEM}="dm" +ACTION!="remove", SUBSYSTEM=="block", KERNEL=="loop*", ENV{ID_BLOCK_SUBSYSTEM}="loop" +ACTION!="remove", SUBSYSTEM=="block", KERNEL=="md*", ENV{ID_BLOCK_SUBSYSTEM}="md" +ACTION!="remove", SUBSYSTEM=="block", KERNEL=="zram*", ENV{ID_BLOCK_SUBSYSTEM}="zram" diff --git a/src/libsystemd/sd-json/json-util.c b/src/libsystemd/sd-json/json-util.c index 556d4c786b..6f42239b91 100644 --- a/src/libsystemd/sd-json/json-util.c +++ b/src/libsystemd/sd-json/json-util.c @@ -31,6 +31,11 @@ int json_dispatch_unhex_iovec(const char *name, sd_json_variant *variant, sd_jso size_t sz; int r; + if (sd_json_variant_is_null(variant)) { + iovec_done(iov); + return 0; + } + if (!sd_json_variant_is_string(variant)) return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a string.", strna(name)); @@ -49,6 +54,11 @@ int json_dispatch_unbase64_iovec(const char *name, sd_json_variant *variant, sd_ size_t sz; int r; + if (sd_json_variant_is_null(variant)) { + iovec_done(iov); + return 0; + } + if (!sd_json_variant_is_string(variant)) return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a string.", strna(name)); @@ -68,6 +78,11 @@ int json_dispatch_byte_array_iovec(const char *name, sd_json_variant *variant, s assert(variant); + if (sd_json_variant_is_null(variant)) { + iovec_done(iov); + return 0; + } + if (!sd_json_variant_is_array(variant)) return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not an array.", strna(name)); @@ -169,6 +184,11 @@ int json_dispatch_in_addr(const char *name, sd_json_variant *variant, sd_json_di _cleanup_(iovec_done) struct iovec iov = {}; int r; + if (sd_json_variant_is_null(variant)) { + *address = (struct in_addr) {}; + return 0; + } + r = json_dispatch_byte_array_iovec(name, variant, flags, &iov); if (r < 0) return r; diff --git a/src/libsystemd/sd-json/sd-json.c b/src/libsystemd/sd-json/sd-json.c index 79cfa4cc60..776df25b8e 100644 --- a/src/libsystemd/sd-json/sd-json.c +++ b/src/libsystemd/sd-json/sd-json.c @@ -5292,6 +5292,11 @@ _public_ int sd_json_dispatch_stdbool(const char *name, sd_json_variant *variant assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *b = false; + return 0; + } + if (!sd_json_variant_is_boolean(variant)) return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a boolean.", strna(name)); @@ -5305,6 +5310,11 @@ _public_ int sd_json_dispatch_intbool(const char *name, sd_json_variant *variant assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *b = false; + return 0; + } + if (!sd_json_variant_is_boolean(variant)) return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a boolean.", strna(name)); @@ -5336,6 +5346,11 @@ _public_ int sd_json_dispatch_int64(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *i = -1; + return 0; + } + /* Also accept numbers formatted as string, to increase compatibility with less capable JSON * implementations that cannot do 64bit integers. */ if (sd_json_variant_is_string(variant) && safe_atoi64(sd_json_variant_string(variant), i) >= 0) @@ -5354,6 +5369,11 @@ _public_ int sd_json_dispatch_uint64(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *u = UINT64_MAX; + return 0; + } + /* Since 64bit values (in particular unsigned ones) in JSON are problematic, let's also accept them * formatted as strings. If this is not desired make sure to set the .type field in * sd_json_dispatch_field to SD_JSON_UNSIGNED rather than _SD_JSON_VARIANT_TYPE_INVALID, so that @@ -5377,6 +5397,11 @@ _public_ int sd_json_dispatch_uint32(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *u = UINT32_MAX; + return 0; + } + r = sd_json_dispatch_uint64(name, variant, flags, &u64); if (r < 0) return r; @@ -5399,6 +5424,11 @@ _public_ int sd_json_dispatch_int32(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *i = -1; + return 0; + } + r = sd_json_dispatch_int64(name, variant, flags, &i64); if (r < 0) return r; @@ -5421,6 +5451,11 @@ _public_ int sd_json_dispatch_int16(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *i = -1; + return 0; + } + r = sd_json_dispatch_int64(name, variant, flags, &i64); if (r < 0) return r; @@ -5440,6 +5475,11 @@ _public_ int sd_json_dispatch_uint16(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *u = UINT16_MAX; + return 0; + } + r = sd_json_dispatch_uint64(name, variant, flags, &u64); if (r < 0) return r; @@ -5459,6 +5499,11 @@ _public_ int sd_json_dispatch_int8(const char *name, sd_json_variant *variant, s assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *i = -1; + return 0; + } + r = sd_json_dispatch_int64(name, variant, flags, &i64); if (r < 0) return r; @@ -5478,6 +5523,11 @@ _public_ int sd_json_dispatch_uint8(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *u = UINT8_MAX; + return 0; + } + r = sd_json_dispatch_uint64(name, variant, flags, &u64); if (r < 0) return r; @@ -5495,6 +5545,11 @@ _public_ int sd_json_dispatch_double(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *d = NAN; + return 0; + } + /* Note, this will take care of parsing NaN, -Infinity, Infinity for us */ if (sd_json_variant_is_string(variant) && safe_atod(sd_json_variant_string(variant), d) >= 0) return 0; @@ -5514,6 +5569,11 @@ _public_ int sd_json_dispatch_string(const char *name, sd_json_variant *variant, assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); + if (sd_json_variant_is_null(variant)) { + *s = mfree(*s); + return 0; + } + r = sd_json_dispatch_const_string(name, variant, flags, &n); if (r < 0) return r; @@ -5616,7 +5676,8 @@ _public_ int sd_json_dispatch_variant_noref(const char *name, sd_json_variant *v _public_ int sd_json_dispatch_uid_gid(const char *name, sd_json_variant *variant, sd_json_dispatch_flags_t flags, void *userdata) { uid_t *uid = userdata; - uint64_t k; + uint32_t k; + int r; assert_return(variant, -EINVAL); assert_return(userdata, -EINVAL); @@ -5633,11 +5694,10 @@ _public_ int sd_json_dispatch_uid_gid(const char *name, sd_json_variant *variant return 0; } - if (!sd_json_variant_is_unsigned(variant)) - return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not an integer.", strna(name)); - - k = sd_json_variant_unsigned(variant); - if (k > UINT32_MAX || !uid_is_valid(k)) + r = sd_json_dispatch_uint32(name, variant, flags, &k); + if (r < 0) + return r; + if (!uid_is_valid(k)) return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a valid UID/GID.", strna(name)); *uid = k; @@ -5678,12 +5738,18 @@ _public_ int sd_json_dispatch_signal(const char *name, sd_json_variant *variant, } int k; - r = sd_json_dispatch_int(name, variant, flags, &k); - if (r < 0) - return r; + if (sd_json_variant_is_string(variant)) { + k = signal_from_string(sd_json_variant_string(variant)); + if (k < 0) + return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a valid signal.", strna(name)); + } else { + r = sd_json_dispatch_int(name, variant, flags, &k); + if (r < 0) + return r; - if (!SIGNAL_VALID(k)) - return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a valid signal.", strna(name)); + if (!SIGNAL_VALID(k)) + return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not a valid signal.", strna(name)); + } *signo = k; return 0; diff --git a/src/libsystemd/sd-varlink/sd-varlink.c b/src/libsystemd/sd-varlink/sd-varlink.c index f55112d1c4..18b26f657d 100644 --- a/src/libsystemd/sd-varlink/sd-varlink.c +++ b/src/libsystemd/sd-varlink/sd-varlink.c @@ -2421,6 +2421,15 @@ _public_ int sd_varlink_collect_full( if (sd_json_variant_elements(collected) >= VARLINK_COLLECT_MAX) return varlink_log_errno(v, SYNTHETIC_ERRNO(E2BIG), "Number of reply messages grew too large (%zu) while collecting.", sd_json_variant_elements(collected)); + _cleanup_(sd_json_variant_unrefp) sd_json_variant *empty = NULL; + if (!p) { + r = sd_json_variant_new_array(&empty, /* array= */ NULL, /* n= */ 0); + if (r < 0) + return r; + + p = empty; + } + r = sd_json_variant_append_array(&collected, p); if (r < 0) return varlink_log_errno(v, r, "Failed to append JSON object to array: %m"); diff --git a/src/repart/repart.c b/src/repart/repart.c index 1ca825f39d..6aab17ff0a 100644 --- a/src/repart/repart.c +++ b/src/repart/repart.c @@ -7,6 +7,7 @@ #include #include +#include "sd-daemon.h" #include "sd-id128.h" #include "sd-json.h" #include "sd-varlink.h" @@ -159,6 +160,7 @@ typedef enum AppendMode { static EmptyMode arg_empty = EMPTY_UNSET; static bool arg_dry_run = true; static char *arg_node = NULL; +static bool arg_node_none = false; static char *arg_root = NULL; static char *arg_image = NULL; static char **arg_definitions = NULL; @@ -194,6 +196,8 @@ static size_t arg_n_filter_partitions = 0; static FilterPartitionsType arg_filter_partitions_type = FILTER_PARTITIONS_NONE; static GptPartitionType *arg_defer_partitions = NULL; static size_t arg_n_defer_partitions = 0; +static bool arg_defer_partitions_empty = false; +static bool arg_defer_partitions_factory_reset = false; static uint64_t arg_sector_size = 0; static ImagePolicy *arg_image_policy = NULL; static Architecture arg_architecture = _ARCHITECTURE_INVALID; @@ -232,6 +236,24 @@ STATIC_DESTRUCTOR_REGISTER(arg_generate_fstab, freep); STATIC_DESTRUCTOR_REGISTER(arg_generate_crypttab, freep); STATIC_DESTRUCTOR_REGISTER(arg_verity_settings, set_freep); +typedef enum ProgressPhase { + PROGRESS_LOADING_DEFINITIONS, + PROGRESS_LOADING_TABLE, + PROGRESS_OPENING_COPY_BLOCK_SOURCES, + PROGRESS_ACQUIRING_PARTITION_LABELS, + PROGRESS_MINIMIZING, + PROGRESS_PLACING, + PROGRESS_WIPING_DISK, + PROGRESS_WIPING_PARTITION, + PROGRESS_COPYING_PARTITION, + PROGRESS_FORMATTING_PARTITION, + PROGRESS_ADJUSTING_PARTITION, + PROGRESS_WRITING_TABLE, + PROGRESS_REREADING_TABLE, + _PROGRESS_PHASE_MAX, + _PROGRESS_PHASE_INVALID = -EINVAL, +} ProgressPhase; + typedef struct FreeArea FreeArea; typedef enum EncryptMode { @@ -369,7 +391,11 @@ static Subvolume* subvolume_free(Subvolume *s) { DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(subvolume_hash_ops, char, path_hash_func, path_compare, Subvolume, subvolume_free); +typedef struct Context Context; + typedef struct Partition { + Context *context; + char *definition_path; char **drop_in_files; @@ -470,7 +496,9 @@ struct FreeArea { uint64_t allocated; }; -typedef struct Context { +struct Context { + char **definitions; + LIST_HEAD(Partition, partitions); size_t n_partitions; @@ -488,11 +516,19 @@ typedef struct Context { bool node_is_our_file; int backing_fd; + EmptyMode empty; + bool dry_run; + bool from_scratch; X509 *certificate; EVP_PKEY *private_key; -} Context; + + bool defer_partitions_empty; + bool defer_partitions_factory_reset; + + sd_varlink *link; /* If 'more' is used on the Varlink call, we'll send progress info over this link */ +}; static const char *empty_mode_table[_EMPTY_MODE_MAX] = { [EMPTY_UNSET] = "unset", @@ -529,11 +565,28 @@ static const char *minimize_mode_table[_MINIMIZE_MODE_MAX] = { [MINIMIZE_GUESS] = "guess", }; +static const char *progress_phase_table[_PROGRESS_PHASE_MAX] = { + [PROGRESS_LOADING_DEFINITIONS] = "loading-definitions", + [PROGRESS_LOADING_TABLE] = "loading-table", + [PROGRESS_OPENING_COPY_BLOCK_SOURCES] = "opening-copy-block-sources", + [PROGRESS_ACQUIRING_PARTITION_LABELS] = "acquiring-partition-labels", + [PROGRESS_MINIMIZING] = "minimizing", + [PROGRESS_PLACING] = "placing", + [PROGRESS_WIPING_DISK] = "wiping-disk", + [PROGRESS_WIPING_PARTITION] = "wiping-partition", + [PROGRESS_COPYING_PARTITION] = "copying-partition", + [PROGRESS_FORMATTING_PARTITION] = "formatting-partition", + [PROGRESS_ADJUSTING_PARTITION] = "adjusting-partition", + [PROGRESS_WRITING_TABLE] = "writing-table", + [PROGRESS_REREADING_TABLE] = "rereading-table", +}; + DEFINE_PRIVATE_STRING_TABLE_LOOKUP(empty_mode, EmptyMode); DEFINE_PRIVATE_STRING_TABLE_LOOKUP(append_mode, AppendMode); DEFINE_PRIVATE_STRING_TABLE_LOOKUP_FROM_STRING_WITH_BOOLEAN(encrypt_mode, EncryptMode, ENCRYPT_KEY_FILE); DEFINE_PRIVATE_STRING_TABLE_LOOKUP(verity_mode, VerityMode); DEFINE_PRIVATE_STRING_TABLE_LOOKUP_FROM_STRING_WITH_BOOLEAN(minimize_mode, MinimizeMode, MINIMIZE_BEST); +DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(progress_phase, ProgressPhase); static uint64_t round_down_size(uint64_t v, uint64_t p) { return (v / p) * p; @@ -597,7 +650,7 @@ static int calculate_verity_hash_size( return 0; } -static Partition *partition_new(void) { +static Partition *partition_new(Context *c) { Partition *p; p = new(Partition, 1); @@ -605,6 +658,7 @@ static Partition *partition_new(void) { return NULL; *p = (Partition) { + .context = c, .weight = 1000, .padding_weight = 0, .current_size = UINT64_MAX, @@ -795,25 +849,37 @@ static Partition* partition_unlink_and_free(Context *context, Partition *p) { DEFINE_TRIVIAL_CLEANUP_FUNC(Partition*, partition_free); static Context* context_new( + char **definitions, + EmptyMode empty, + bool dry_run, sd_id128_t seed, X509 *certificate, EVP_PKEY *private_key) { - Context *context; - /* Note: This function takes ownership of the certificate and private_key arguments. */ - context = new(Context, 1); + _cleanup_strv_free_ char **d = NULL; + if (!strv_isempty(definitions)) { + d = strv_copy(definitions); + if (!d) + return NULL; + } + + Context *context = new(Context, 1); if (!context) return NULL; *context = (Context) { + .definitions = TAKE_PTR(d), .start = UINT64_MAX, .end = UINT64_MAX, .total = UINT64_MAX, .seed = seed, .certificate = certificate, .private_key = private_key, + .empty = empty, + .dry_run = dry_run, + .backing_fd = -EBADF, }; return context; @@ -833,6 +899,8 @@ static Context* context_free(Context *context) { if (!context) return NULL; + strv_free(context->definitions); + while (context->partitions) partition_unlink_and_free(context, context->partitions); assert(context->n_partitions == 0); @@ -851,6 +919,8 @@ static Context* context_free(Context *context) { X509_free(context->certificate); EVP_PKEY_free(context->private_key); + context->link = sd_varlink_unref(context->link); + return mfree(context); } @@ -2635,7 +2705,53 @@ static MakeFileSystemFlags partition_mkfs_flags(const Partition *p) { return flags; } -static int partition_read_definition(Partition *p, const char *path, const char *const *conf_file_dirs) { +static int context_notify( + Context *c, + ProgressPhase phase, + const char *object, + unsigned percent) { + + int r; + + assert(c); + assert(phase >= 0); + assert(phase < _PROGRESS_PHASE_MAX); + + /* Send progress information, via sd_notify() and via varlink (if client asked for it by setting "more" flag) */ + + _cleanup_free_ char *n = NULL; + if (asprintf(&n, + "STATUS=Phase %1$s\n" + "X_SYSTEMD_PHASE=%1$s", + progress_phase_to_string(phase)) < 0) + return log_oom_debug(); + + if (percent != UINT_MAX) + if (strextendf(&n, "\nX_SYSTEMD_PHASE_PROGRESS=%u", percent) < 0) + return log_oom_debug(); + + r = sd_notify(/* unset_environment= */ false, n); + if (r < 0) + log_debug_errno(r, "Failed to send sd_notify() progress notification, ignoring: %m"); + + if (c->link) { + r = sd_varlink_notifybo( + c->link, + SD_JSON_BUILD_PAIR("phase", JSON_BUILD_STRING_UNDERSCORIFY(progress_phase_to_string(phase))), + JSON_BUILD_PAIR_STRING_NON_EMPTY("object", object), + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("progress", percent, UINT_MAX)); + if (r < 0) + log_debug_errno(r, "Failed to send varlink notify progress notification, ignoring: %m"); + } + + return 0; +} + +static int partition_read_definition( + Context *c, + Partition *p, + const char *path, + const char *const *conf_file_dirs) { ConfigTableItem table[] = { { "Partition", "Type", config_parse_type, 0, &p->type }, @@ -2684,6 +2800,10 @@ static int partition_read_definition(Partition *p, const char *path, const char const char* dropin_dirname; int r; + assert(c); + assert(p); + assert(path); + r = path_extract_filename(path, &filename); if (r < 0) return log_error_errno(r, "Failed to extract filename from path '%s': %m", path); @@ -2694,7 +2814,7 @@ static int partition_read_definition(Partition *p, const char *path, const char STRV_MAKE_CONST(path), conf_file_dirs, dropin_dirname, - arg_definitions ? NULL : arg_root, + c->definitions ? NULL : arg_root, "Partition\0", config_item_table_lookup, table, CONFIG_PARSE_WARN, @@ -3082,7 +3202,7 @@ static int context_copy_from_one(Context *context, const char *src) { if (partition_type_exclude(&type)) continue; - np = partition_new(); + np = partition_new(context); if (!np) return log_oom(); @@ -3203,16 +3323,23 @@ static int context_read_definitions(Context *context) { assert(context); - dirs = (const char* const*) (arg_definitions ?: CONF_PATHS_STRV("repart.d")); + (void) context_notify(context, PROGRESS_LOADING_DEFINITIONS, /* object= */ NULL, UINT_MAX); - r = conf_files_list_strv(&files, ".conf", arg_definitions ? NULL : arg_root, CONF_FILES_REGULAR|CONF_FILES_FILTER_MASKED, dirs); + dirs = (const char* const*) (context->definitions ?: CONF_PATHS_STRV("repart.d")); + + r = conf_files_list_strv( + &files, + ".conf", + context->definitions ? NULL : arg_root, + CONF_FILES_REGULAR|CONF_FILES_FILTER_MASKED, + dirs); if (r < 0) return log_error_errno(r, "Failed to enumerate *.conf files: %m"); STRV_FOREACH(f, files) { _cleanup_(partition_freep) Partition *p = NULL; - p = partition_new(); + p = partition_new(context); if (!p) return log_oom(); @@ -3220,7 +3347,7 @@ static int context_read_definitions(Context *context) { if (!p->definition_path) return log_oom(); - r = partition_read_definition(p, *f, dirs); + r = partition_read_definition(context, p, *f, dirs); if (r < 0) return r; if (r == 0) @@ -3379,6 +3506,15 @@ static void derive_salt(sd_id128_t base, const char *token, uint8_t ret[static S hmac_sha256(base.bytes, sizeof(base.bytes), token, strlen(token), ret); } +static int context_load_fallback_metrics(Context *context) { + assert(context); + + context->sector_size = arg_sector_size > 0 ? arg_sector_size : 512; + context->grain_size = MAX(context->sector_size, 4096U); + context->default_fs_sector_size = arg_sector_size > 0 ? arg_sector_size : DEFAULT_FILESYSTEM_SECTOR_SIZE; + return 1; /* Starting from scratch */ +} + static int context_load_partition_table(Context *context) { _cleanup_(fdisk_unref_contextp) struct fdisk_context *c = NULL; _cleanup_(fdisk_unref_tablep) struct fdisk_table *t = NULL; @@ -3391,12 +3527,15 @@ static int context_load_partition_table(Context *context) { int r; assert(context); + assert(context->node); assert(!context->fdisk_context); assert(!context->free_areas); assert(context->start == UINT64_MAX); assert(context->end == UINT64_MAX); assert(context->total == UINT64_MAX); + context_notify(context, PROGRESS_LOADING_TABLE, /* object= */ NULL, UINT_MAX); + c = fdisk_new_context(); if (!c) return log_oom(); @@ -3410,7 +3549,7 @@ static int context_load_partition_table(Context *context) { r = context_open_and_lock_backing_fd( context->node, - arg_dry_run ? LOCK_SH : LOCK_EX, + context->dry_run ? LOCK_SH : LOCK_EX, &context->backing_fd); if (r < 0) return r; @@ -3418,7 +3557,7 @@ static int context_load_partition_table(Context *context) { if (fstat(context->backing_fd, &st) < 0) return log_error_errno(errno, "Failed to stat %s: %m", context->node); - if (IN_SET(arg_empty, EMPTY_REQUIRE, EMPTY_FORCE, EMPTY_CREATE) && S_ISREG(st.st_mode)) + if (IN_SET(context->empty, EMPTY_REQUIRE, EMPTY_FORCE, EMPTY_CREATE) && S_ISREG(st.st_mode)) /* Don't probe sector size from partition table if we are supposed to start from an empty disk */ ssz = 512; else { @@ -3446,7 +3585,7 @@ static int context_load_partition_table(Context *context) { r = fdisk_assign_device( c, context->backing_fd >= 0 ? FORMAT_PROC_FD_PATH(context->backing_fd) : context->node, - arg_dry_run); + context->dry_run); if (r == -EINVAL && arg_size_auto) { struct stat st; @@ -3476,7 +3615,7 @@ static int context_load_partition_table(Context *context) { if (context->backing_fd < 0) { /* If we have no fd referencing the device yet, make a copy of the fd now, so that we have one */ r = context_open_and_lock_backing_fd(FORMAT_PROC_FD_PATH(fdisk_get_devfd(c)), - arg_dry_run ? LOCK_SH : LOCK_EX, + context->dry_run ? LOCK_SH : LOCK_EX, &context->backing_fd); if (r < 0) return r; @@ -3499,7 +3638,7 @@ static int context_load_partition_table(Context *context) { log_debug("Sector size of device is %lu bytes. Using default filesystem sector size of %" PRIu64 " and grain size of %" PRIu64 ".", secsz, fs_secsz, grainsz); - switch (arg_empty) { + switch (context->empty) { case EMPTY_REFUSE: /* Refuse empty disks, insist on an existing GPT partition table */ @@ -3666,7 +3805,7 @@ static int context_load_partition_table(Context *context) { if (!found) { _cleanup_(partition_freep) Partition *np = NULL; - np = partition_new(); + np = partition_new(context); if (!np) return log_oom(); @@ -4507,12 +4646,28 @@ static int context_discard_gap_after(Context *context, Partition *p) { return 0; } +static bool partition_defer(Context *c, const Partition *p) { + assert(c); + assert(p); + + if (partition_type_defer(&p->type)) + return true; + + if (c->defer_partitions_empty && streq_ptr(p->new_label, "_empty")) + return true; + + if (c->defer_partitions_factory_reset && p->factory_reset) + return true; + + return false; +} + static int context_wipe_and_discard(Context *context) { int r; assert(context); - if (arg_empty == EMPTY_CREATE) /* If we just created the image, no need to wipe */ + if (context->empty == EMPTY_CREATE) /* If we just created the image, no need to wipe */ return 0; /* Wipe and discard the contents of all partitions we are about to create. We skip the discarding if @@ -4524,9 +4679,11 @@ static int context_wipe_and_discard(Context *context) { if (!p->allocated_to_area) continue; - if (partition_type_defer(&p->type)) + if (partition_defer(context, p)) continue; + (void) context_notify(context, PROGRESS_WIPING_PARTITION, p->definition_path, UINT_MAX); + r = context_wipe_partition(context, p); if (r < 0) return r; @@ -4654,14 +4811,16 @@ static int prepare_temporary_file(Context *context, PartitionTarget *t, uint64_t if (fd < 0) return log_error_errno(fd, "Failed to create temporary file: %m"); - r = read_attr_fd(fdisk_get_devfd(context->fdisk_context), &attrs); - if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) - log_warning_errno(r, "Failed to read file attributes of %s, ignoring: %m", arg_node); + if (context->fdisk_context) { + r = read_attr_fd(fdisk_get_devfd(context->fdisk_context), &attrs); + if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) + log_warning_errno(r, "Failed to read file attributes of %s, ignoring: %m", context->node); - if (FLAGS_SET(attrs, FS_NOCOW_FL)) { - r = chattr_fd(fd, FS_NOCOW_FL, FS_NOCOW_FL); - if (r < 0 && !ERRNO_IS_IOCTL_NOT_SUPPORTED(r)) - return log_error_errno(r, "Failed to disable copy-on-write on %s: %m", temp); + if (FLAGS_SET(attrs, FS_NOCOW_FL)) { + r = chattr_fd(fd, FS_NOCOW_FL, FS_NOCOW_FL); + if (r < 0 && !ERRNO_IS_IOCTL_NOT_SUPPORTED(r)) + return log_error_errno(r, "Failed to disable copy-on-write on %s: %m", temp); + } } if (ftruncate(fd, size) < 0) @@ -5532,6 +5691,8 @@ static int progress_bytes(uint64_t n_bytes, uint64_t bps, void *userdata) { p->last_percent = percent; + (void) context_notify(p->context, PROGRESS_COPYING_PARTITION, p->definition_path, percent); + return 0; } @@ -5551,7 +5712,7 @@ static int context_copy_blocks(Context *context) { if (PARTITION_EXISTS(p)) /* Never copy over existing partitions */ continue; - if (partition_type_defer(&p->type)) + if (partition_defer(context, p)) continue; /* For offline signing case */ @@ -5561,6 +5722,8 @@ static int context_copy_blocks(Context *context) { if (p->copy_blocks_fd < 0) continue; + (void) context_notify(context, PROGRESS_COPYING_PARTITION, p->definition_path, UINT_MAX); + assert(p->new_size != UINT64_MAX); size_t extra = p->encrypt != ENCRYPT_OFF ? LUKS2_METADATA_KEEP_FREE : 0; @@ -5620,14 +5783,14 @@ static int context_copy_blocks(Context *context) { log_info("Block level copying and synchronization of partition %" PRIu64 " complete in %s.", p->partno, FORMAT_TIMESPAN(time_spent, 0)); - if (p->siblings[VERITY_HASH] && !partition_type_defer(&p->siblings[VERITY_HASH]->type)) { + if (p->siblings[VERITY_HASH] && !partition_defer(context, p->siblings[VERITY_HASH])) { r = partition_format_verity_hash(context, p->siblings[VERITY_HASH], /* node = */ NULL, partition_target_path(t)); if (r < 0) return r; } - if (p->siblings[VERITY_SIG] && !partition_type_defer(&p->siblings[VERITY_SIG]->type)) { + if (p->siblings[VERITY_SIG] && !partition_defer(context, p->siblings[VERITY_SIG])) { r = partition_format_verity_sig(context, p->siblings[VERITY_SIG]); if (r < 0) return r; @@ -6524,7 +6687,7 @@ static int context_mkfs(Context *context) { if (!p->format) continue; - if (partition_type_defer(&p->type)) + if (partition_defer(context, p)) continue; /* For offline signing case */ @@ -6535,6 +6698,8 @@ static int context_mkfs(Context *context) { if (p->copy_blocks_fd >= 0) continue; + (void) context_notify(context, PROGRESS_FORMATTING_PARTITION, p->definition_path, UINT_MAX); + assert(p->offset != UINT64_MAX); assert(p->new_size != UINT64_MAX); assert(p->new_size >= (p->encrypt != ENCRYPT_OFF ? LUKS2_METADATA_KEEP_FREE : 0)); @@ -6633,14 +6798,14 @@ static int context_mkfs(Context *context) { if (r < 0) return r; - if (p->siblings[VERITY_HASH] && !partition_type_defer(&p->siblings[VERITY_HASH]->type)) { + if (p->siblings[VERITY_HASH] && !partition_defer(context, p->siblings[VERITY_HASH])) { r = partition_format_verity_hash(context, p->siblings[VERITY_HASH], /* node = */ NULL, partition_target_path(t)); if (r < 0) return r; } - if (p->siblings[VERITY_SIG] && !partition_type_defer(&p->siblings[VERITY_SIG]->type)) { + if (p->siblings[VERITY_SIG] && !partition_defer(context, p->siblings[VERITY_SIG])) { r = partition_format_verity_sig(context, p->siblings[VERITY_SIG]); if (r < 0) return r; @@ -6795,6 +6960,8 @@ static int context_acquire_partition_uuids_and_labels(Context *context) { continue; } + (void) context_notify(context, PROGRESS_ACQUIRING_PARTITION_LABELS, p->definition_path, UINT_MAX); + if (!sd_id128_is_null(p->current_uuid)) p->new_uuid = uuid = p->current_uuid; /* Never change initialized UUIDs */ else if (p->new_uuid_is_set) @@ -6919,9 +7086,11 @@ static int context_mangle_partitions(Context *context) { if (p->dropped) continue; - if (partition_type_defer(&p->type)) + if (partition_defer(context, p)) continue; + (void) context_notify(context, PROGRESS_ADJUSTING_PARTITION, p->definition_path, UINT_MAX); + assert(p->new_size != UINT64_MAX); assert(p->offset != UINT64_MAX); assert(p->partno != UINT64_MAX); @@ -7169,7 +7338,7 @@ static int context_split(Context *context) { if (!p->split_path) continue; - if (partition_type_defer(&p->type)) + if (partition_defer(context, p)) continue; if (fd < 0) { @@ -7177,7 +7346,7 @@ static int context_split(Context *context) { r = read_attr_fd(fd, &attrs); if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) - log_warning_errno(r, "Failed to read file attributes of %s, ignoring: %m", arg_node); + log_warning_errno(r, "Failed to read file attributes of %s, ignoring: %m", context->node); } fdt = xopenat_full( @@ -7211,14 +7380,17 @@ static int context_write_partition_table(Context *context) { return 0; } - if (arg_dry_run) { + if (context->dry_run) { log_notice("Refusing to repartition, please re-run with --dry-run=no."); return 0; } log_info("Applying changes to %s.", context->node); - if (context->from_scratch && arg_empty != EMPTY_CREATE) { + if (context->from_scratch && context->empty != EMPTY_CREATE) { + + (void) context_notify(context, PROGRESS_WIPING_DISK, /* object= */ NULL, UINT_MAX); + /* Erase everything if we operate from scratch, except if the image was just created anyway, and thus is definitely empty. */ r = context_wipe_range(context, 0, context->total); if (r < 0) @@ -7261,6 +7433,8 @@ static int context_write_partition_table(Context *context) { log_info("Writing new partition table."); + (void) context_notify(context, PROGRESS_WRITING_TABLE, /* object= */ NULL, UINT_MAX); + r = fdisk_write_disklabel(context->fdisk_context); if (r < 0) return log_error_errno(r, "Failed to write partition table: %m"); @@ -7272,6 +7446,8 @@ static int context_write_partition_table(Context *context) { return log_error_errno(capable, "Failed to check if block device supports partition scanning: %m"); else if (capable > 0) { log_info("Informing kernel about changed partitions..."); + (void) context_notify(context, PROGRESS_REREADING_TABLE, /* object= */ NULL, UINT_MAX); + r = reread_partition_table_fd(fdisk_get_devfd(context->fdisk_context), /* flags= */ 0); if (r < 0) return log_error_errno(r, "Failed to reread partition table: %m"); @@ -7321,7 +7497,7 @@ static int context_factory_reset(Context *context) { if (context->from_scratch) /* Nothing to reset if we start from scratch */ return 0; - if (arg_dry_run) { + if (context->dry_run) { log_notice("Refusing to factory reset, please re-run with --dry-run=no."); return 0; } @@ -7764,6 +7940,9 @@ static int context_open_copy_block_paths( assert(context); + if (!context->partitions) + return 0; + LIST_FOREACH(partitions, p, context->partitions) { _cleanup_close_ int source_fd = -EBADF; _cleanup_free_ char *opened = NULL; @@ -7810,6 +7989,8 @@ static int context_open_copy_block_paths( } else continue; + (void) context_notify(context, PROGRESS_OPENING_COPY_BLOCK_SOURCES, p->definition_path, UINT_MAX); + if (S_ISDIR(st.st_mode)) { _cleanup_free_ char *bdev = NULL; dev_t devt; @@ -8211,9 +8392,11 @@ static int context_minimize(Context *context) { assert(context); - r = read_attr_fd(context->backing_fd, &attrs); - if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) - log_warning_errno(r, "Failed to read file attributes of %s, ignoring: %m", arg_node); + if (context->backing_fd >= 0) { + r = read_attr_fd(context->backing_fd, &attrs); + if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) + log_warning_errno(r, "Failed to read file attributes of %s, ignoring: %m", context->node); + } LIST_FOREACH(partitions, p, context->partitions) { _cleanup_(rm_rf_physical_and_freep) char *root = NULL; @@ -8244,6 +8427,8 @@ static int context_minimize(Context *context) { if (!partition_needs_populate(p)) continue; + (void) context_notify(context, PROGRESS_MINIMIZING, p->definition_path, UINT_MAX); + assert(!p->copy_blocks_path); (void) partition_hint(p, context->node, &hint); @@ -8674,6 +8859,10 @@ static int help(void) { " --defer-partitions=PARTITION1,PARTITION2,PARTITION3,…\n" " Take partitions of the specified types into account\n" " but don't populate them yet\n" + " --defer-partitions-empty=yes\n" + " Defer all partitions marked for formatting as empty\n" + " --defer-partitions-factory-reset=yes\n" + " Defer all partitions marked for factory reset\n" "\n%3$sCopying:%4$s\n" " -s --copy-source=PATH Specify the primary source tree to copy files from\n" " --copy-from=IMAGE Copy partitions from the given image(s)\n" @@ -8739,6 +8928,8 @@ static int parse_argv( ARG_INCLUDE_PARTITIONS, ARG_EXCLUDE_PARTITIONS, ARG_DEFER_PARTITIONS, + ARG_DEFER_PARTITIONS_EMPTY, + ARG_DEFER_PARTITIONS_FACTORY_RESET, ARG_SECTOR_SIZE, ARG_SKIP_PARTITIONS, ARG_ARCHITECTURE, @@ -8753,50 +8944,52 @@ static int parse_argv( }; static const struct option options[] = { - { "help", no_argument, NULL, 'h' }, - { "version", no_argument, NULL, ARG_VERSION }, - { "no-pager", no_argument, NULL, ARG_NO_PAGER }, - { "no-legend", no_argument, NULL, ARG_NO_LEGEND }, - { "dry-run", required_argument, NULL, ARG_DRY_RUN }, - { "empty", required_argument, NULL, ARG_EMPTY }, - { "discard", required_argument, NULL, ARG_DISCARD }, - { "factory-reset", required_argument, NULL, ARG_FACTORY_RESET }, - { "can-factory-reset", no_argument, NULL, ARG_CAN_FACTORY_RESET }, - { "root", required_argument, NULL, ARG_ROOT }, - { "image", required_argument, NULL, ARG_IMAGE }, - { "image-policy", required_argument, NULL, ARG_IMAGE_POLICY }, - { "seed", required_argument, NULL, ARG_SEED }, - { "pretty", required_argument, NULL, ARG_PRETTY }, - { "definitions", required_argument, NULL, ARG_DEFINITIONS }, - { "size", required_argument, NULL, ARG_SIZE }, - { "json", required_argument, NULL, ARG_JSON }, - { "key-file", required_argument, NULL, ARG_KEY_FILE }, - { "private-key", required_argument, NULL, ARG_PRIVATE_KEY }, - { "private-key-source", required_argument, NULL, ARG_PRIVATE_KEY_SOURCE }, - { "certificate", required_argument, NULL, ARG_CERTIFICATE }, - { "certificate-source", required_argument, NULL, ARG_CERTIFICATE_SOURCE }, - { "tpm2-device", required_argument, NULL, ARG_TPM2_DEVICE }, - { "tpm2-device-key", required_argument, NULL, ARG_TPM2_DEVICE_KEY }, - { "tpm2-seal-key-handle", required_argument, NULL, ARG_TPM2_SEAL_KEY_HANDLE }, - { "tpm2-pcrs", required_argument, NULL, ARG_TPM2_PCRS }, - { "tpm2-public-key", required_argument, NULL, ARG_TPM2_PUBLIC_KEY }, - { "tpm2-public-key-pcrs", required_argument, NULL, ARG_TPM2_PUBLIC_KEY_PCRS }, - { "tpm2-pcrlock", required_argument, NULL, ARG_TPM2_PCRLOCK }, - { "split", required_argument, NULL, ARG_SPLIT }, - { "include-partitions", required_argument, NULL, ARG_INCLUDE_PARTITIONS }, - { "exclude-partitions", required_argument, NULL, ARG_EXCLUDE_PARTITIONS }, - { "defer-partitions", required_argument, NULL, ARG_DEFER_PARTITIONS }, - { "sector-size", required_argument, NULL, ARG_SECTOR_SIZE }, - { "architecture", required_argument, NULL, ARG_ARCHITECTURE }, - { "offline", required_argument, NULL, ARG_OFFLINE }, - { "copy-from", required_argument, NULL, ARG_COPY_FROM }, - { "copy-source", required_argument, NULL, 's' }, - { "make-ddi", required_argument, NULL, ARG_MAKE_DDI }, - { "append-fstab", required_argument, NULL, ARG_APPEND_FSTAB }, - { "generate-fstab", required_argument, NULL, ARG_GENERATE_FSTAB }, - { "generate-crypttab", required_argument, NULL, ARG_GENERATE_CRYPTTAB }, - { "list-devices", no_argument, NULL, ARG_LIST_DEVICES }, - { "join-signature", required_argument, NULL, ARG_JOIN_SIGNATURE }, + { "help", no_argument, NULL, 'h' }, + { "version", no_argument, NULL, ARG_VERSION }, + { "no-pager", no_argument, NULL, ARG_NO_PAGER }, + { "no-legend", no_argument, NULL, ARG_NO_LEGEND }, + { "dry-run", required_argument, NULL, ARG_DRY_RUN }, + { "empty", required_argument, NULL, ARG_EMPTY }, + { "discard", required_argument, NULL, ARG_DISCARD }, + { "factory-reset", required_argument, NULL, ARG_FACTORY_RESET }, + { "can-factory-reset", no_argument, NULL, ARG_CAN_FACTORY_RESET }, + { "root", required_argument, NULL, ARG_ROOT }, + { "image", required_argument, NULL, ARG_IMAGE }, + { "image-policy", required_argument, NULL, ARG_IMAGE_POLICY }, + { "seed", required_argument, NULL, ARG_SEED }, + { "pretty", required_argument, NULL, ARG_PRETTY }, + { "definitions", required_argument, NULL, ARG_DEFINITIONS }, + { "size", required_argument, NULL, ARG_SIZE }, + { "json", required_argument, NULL, ARG_JSON }, + { "key-file", required_argument, NULL, ARG_KEY_FILE }, + { "private-key", required_argument, NULL, ARG_PRIVATE_KEY }, + { "private-key-source", required_argument, NULL, ARG_PRIVATE_KEY_SOURCE }, + { "certificate", required_argument, NULL, ARG_CERTIFICATE }, + { "certificate-source", required_argument, NULL, ARG_CERTIFICATE_SOURCE }, + { "tpm2-device", required_argument, NULL, ARG_TPM2_DEVICE }, + { "tpm2-device-key", required_argument, NULL, ARG_TPM2_DEVICE_KEY }, + { "tpm2-seal-key-handle", required_argument, NULL, ARG_TPM2_SEAL_KEY_HANDLE }, + { "tpm2-pcrs", required_argument, NULL, ARG_TPM2_PCRS }, + { "tpm2-public-key", required_argument, NULL, ARG_TPM2_PUBLIC_KEY }, + { "tpm2-public-key-pcrs", required_argument, NULL, ARG_TPM2_PUBLIC_KEY_PCRS }, + { "tpm2-pcrlock", required_argument, NULL, ARG_TPM2_PCRLOCK }, + { "split", required_argument, NULL, ARG_SPLIT }, + { "include-partitions", required_argument, NULL, ARG_INCLUDE_PARTITIONS }, + { "exclude-partitions", required_argument, NULL, ARG_EXCLUDE_PARTITIONS }, + { "defer-partitions", required_argument, NULL, ARG_DEFER_PARTITIONS }, + { "defer-partitions-empty", required_argument, NULL, ARG_DEFER_PARTITIONS_EMPTY }, + { "defer-partitions-factory-reset", required_argument, NULL, ARG_DEFER_PARTITIONS_FACTORY_RESET }, + { "sector-size", required_argument, NULL, ARG_SECTOR_SIZE }, + { "architecture", required_argument, NULL, ARG_ARCHITECTURE }, + { "offline", required_argument, NULL, ARG_OFFLINE }, + { "copy-from", required_argument, NULL, ARG_COPY_FROM }, + { "copy-source", required_argument, NULL, 's' }, + { "make-ddi", required_argument, NULL, ARG_MAKE_DDI }, + { "append-fstab", required_argument, NULL, ARG_APPEND_FSTAB }, + { "generate-fstab", required_argument, NULL, ARG_GENERATE_FSTAB }, + { "generate-crypttab", required_argument, NULL, ARG_GENERATE_CRYPTTAB }, + { "list-devices", no_argument, NULL, ARG_LIST_DEVICES }, + { "join-signature", required_argument, NULL, ARG_JOIN_SIGNATURE }, {} }; @@ -9094,6 +9287,20 @@ static int parse_argv( break; + case ARG_DEFER_PARTITIONS_EMPTY: + r = parse_boolean_argument("--defer-partitions-empty=", optarg, &arg_defer_partitions_empty); + if (r < 0) + return r; + + break; + + case ARG_DEFER_PARTITIONS_FACTORY_RESET: + r = parse_boolean_argument("--defer-partitions-factory-reset=", optarg, &arg_defer_partitions_factory_reset); + if (r < 0) + return r; + + break; + case ARG_SECTOR_SIZE: r = parse_sector_size(optarg, &arg_sector_size); if (r < 0) @@ -9292,9 +9499,14 @@ static int parse_argv( } if (argc > optind) { - arg_node = strdup(argv[optind]); - if (!arg_node) - return log_oom(); + if (empty_or_dash(argv[optind])) + arg_node_none = true; + else { + arg_node = strdup(argv[optind]); + if (!arg_node) + return log_oom(); + arg_node_none = false; + } } if (IN_SET(arg_empty, EMPTY_FORCE, EMPTY_REQUIRE, EMPTY_CREATE) && !arg_node && !arg_image) @@ -9548,8 +9760,11 @@ static int find_root(Context *context) { assert(context); + if (arg_node_none) + return 0; + if (arg_node) { - if (arg_empty == EMPTY_CREATE) { + if (context->empty == EMPTY_CREATE) { _cleanup_close_ int fd = -EBADF; _cleanup_free_ char *s = NULL; @@ -9578,7 +9793,7 @@ static int find_root(Context *context) { return 0; } - assert(IN_SET(arg_empty, EMPTY_REFUSE, EMPTY_ALLOW)); + assert(IN_SET(context->empty, EMPTY_REFUSE, EMPTY_ALLOW)); /* If the root mount has been replaced by some form of volatile file system (overlayfs), the * original root block device node is symlinked in /run/systemd/volatile-root. Let's read that @@ -9775,7 +9990,12 @@ done: return 1; } -static int determine_auto_size(Context *c) { +static int determine_auto_size( + Context *c, + int level, + bool ignore_allocated, /* If true, determines unallocated space needed */ + uint64_t *ret) { + uint64_t sum; assert(c); @@ -9792,19 +10012,80 @@ static int determine_auto_size(Context *c) { if (m > UINT64_MAX - sum) return log_error_errno(SYNTHETIC_ERRNO(EOVERFLOW), "Image would grow too large, refusing."); + if (ignore_allocated && PARTITION_EXISTS(p)) + m = LESS_BY(m, p->current_size + p->current_padding); + sum += m; } if (c->total != UINT64_MAX) /* Image already allocated? Then show its size. */ - log_info("Automatically determined minimal disk image size as %s, current image size is %s.", + log_full(level, + "Automatically determined minimal disk image size as %s, current block device/image size is %s.", FORMAT_BYTES(sum), FORMAT_BYTES(c->total)); else /* If the image is being created right now, then it has no previous size, suppress any comment about it hence. */ - log_info("Automatically determined minimal disk image size as %s.", + log_full(level, + "Automatically determined minimal disk image size as %s.", FORMAT_BYTES(sum)); - arg_size = sum; + if (ret) + *ret = sum; + return 0; +} + +static int context_ponder(Context *context) { + int r; + + assert(context); + + (void) context_notify(context, PROGRESS_PLACING, /* object= */ NULL, UINT_MAX); + + /* First try to fit new partitions in, dropping by priority until it fits */ + for (;;) { + uint64_t largest_free_area; + + if (context_allocate_partitions(context, &largest_free_area)) + break; /* Success! */ + + if (context_unmerge_and_allocate_partitions(context)) + break; /* We had to un-suppress a supplement or few, but still success! */ + + if (context_drop_or_foreignize_one_priority(context)) + continue; /* Still no luck. Let's drop a priority and try again. */ + + /* No more priorities left to drop. This configuration just doesn't fit on this disk... */ + return log_error_errno(SYNTHETIC_ERRNO(ENOSPC), + "Can't fit requested partitions into available free space (%s), refusing.", + FORMAT_BYTES(largest_free_area)); + } + + LIST_FOREACH(partitions, p, context->partitions) { + if (!p->supplement_for) + continue; + + if (PARTITION_SUPPRESSED(p)) { + assert(!p->allocated_to_area); + p->dropped = true; + + log_debug("Partition %s can be merged into %s, suppressing supplement.", + p->definition_path, p->supplement_for->definition_path); + } else if (PARTITION_EXISTS(p)) + log_info("Partition %s already exists on disk, using supplement verbatim.", + p->definition_path); + else + log_info("Couldn't allocate partitions with %s merged into %s, using supplement verbatim.", + p->definition_path, p->supplement_for->definition_path); + } + + /* Now assign free space according to the weight logic */ + r = context_grow_partitions(context); + if (r < 0) + return r; + + /* Now calculate where each new partition gets placed */ + context_place_partitions(context); + return 0; } @@ -9844,6 +10125,7 @@ static int vl_method_list_candidate_devices( BLOCKDEV_LIST_SHOW_SYMLINKS| BLOCKDEV_LIST_REQUIRE_PARTITION_SCANNING| BLOCKDEV_LIST_IGNORE_ZRAM| + BLOCKDEV_LIST_METADATA| (p.ignore_empty ? BLOCKDEV_LIST_IGNORE_EMPTY : 0)| (p.ignore_root ? BLOCKDEV_LIST_IGNORE_ROOT : 0), &l, @@ -9868,8 +10150,11 @@ static int vl_method_list_candidate_devices( &v, SD_JSON_BUILD_PAIR_STRING("node", d->node), JSON_BUILD_PAIR_STRV_NON_EMPTY("symlinks", d->symlinks), - SD_JSON_BUILD_PAIR_CONDITION(d->diskseq != UINT64_MAX, "diskseq", SD_JSON_BUILD_INTEGER(d->diskseq)), - SD_JSON_BUILD_PAIR_CONDITION(d->size != UINT64_MAX, "sizeBytes", SD_JSON_BUILD_INTEGER(d->size))); + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("diskseq", d->diskseq, UINT64_MAX), + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("sizeBytes", d->size, UINT64_MAX), + JSON_BUILD_PAIR_STRING_NON_EMPTY("model", d->model), + JSON_BUILD_PAIR_STRING_NON_EMPTY("vendor", d->vendor), + JSON_BUILD_PAIR_STRING_NON_EMPTY("subsystem", d->subsystem)); if (r < 0) return r; } @@ -9878,6 +10163,172 @@ static int vl_method_list_candidate_devices( return sd_varlink_reply(link, v); } +static JSON_DISPATCH_ENUM_DEFINE(json_dispatch_empty_mode, EmptyMode, empty_mode_from_string); + +typedef struct RunParameters { + char *node; + EmptyMode empty; + bool dry_run; + sd_id128_t seed; + char **definitions; + bool defer_partitions_empty; + bool defer_partitions_factory_reset; +} RunParameters; + +static void run_parameters_done(RunParameters *p) { + assert(p); + + p->node = mfree(p->node); + p->definitions = strv_free(p->definitions); +} + +static int vl_method_run( + sd_varlink *link, + sd_json_variant *parameters, + sd_varlink_method_flags_t flags, + void *userdata) { + + static const sd_json_dispatch_field dispatch_table[] = { + { "node", SD_JSON_VARIANT_STRING, sd_json_dispatch_string, offsetof(RunParameters, node), SD_JSON_NULLABLE }, + { "empty", SD_JSON_VARIANT_STRING, json_dispatch_empty_mode, offsetof(RunParameters, empty), SD_JSON_MANDATORY }, + { "seed", SD_JSON_VARIANT_STRING, sd_json_dispatch_id128, offsetof(RunParameters, seed), SD_JSON_NULLABLE }, + { "dryRun", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_stdbool, offsetof(RunParameters, dry_run), SD_JSON_MANDATORY }, + { "definitions", SD_JSON_VARIANT_ARRAY, json_dispatch_strv_path, offsetof(RunParameters, definitions), SD_JSON_MANDATORY|SD_JSON_STRICT }, + { "deferPartitionsEmpty", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_stdbool, offsetof(RunParameters, defer_partitions_empty), SD_JSON_NULLABLE }, + { "deferPartitionsFactoryReset", SD_JSON_VARIANT_BOOLEAN, sd_json_dispatch_stdbool, offsetof(RunParameters, defer_partitions_factory_reset), SD_JSON_NULLABLE }, + {} + }; + + int r; + + assert(link); + + _cleanup_(run_parameters_done) RunParameters p = { + .empty = _EMPTY_MODE_INVALID, + .dry_run = true, + }; + r = sd_varlink_dispatch(link, parameters, dispatch_table, &p); + if (r != 0) + return r; + + /* If no device node is specified, this is a dry run. Refuse if the caller claims otherwise. */ + if (!p.node && !p.dry_run) + return sd_varlink_error_invalid_parameter_name(link, "dryRun"); + + _cleanup_(context_freep) Context* context = NULL; + context = context_new( + p.definitions, + p.empty, + p.dry_run, + p.seed, + /* certificate= */ NULL, + /* private_key= */ NULL); + if (!context) + return log_oom(); + + context->defer_partitions_empty = p.defer_partitions_empty; + context->defer_partitions_factory_reset = p.defer_partitions_factory_reset; + + if (FLAGS_SET(flags, SD_VARLINK_METHOD_MORE)) + context->link = sd_varlink_ref(link); + + r = context_read_seed(context, arg_root); + if (r < 0) + return r; + + r = context_read_definitions(context); + if (r < 0) + return r; + + if (p.node) { + context->node = TAKE_PTR(p.node); + + r = context_load_partition_table(context); + if (r == -EHWPOISON) + return sd_varlink_error(link, "io.systemd.Repart.ConflictingDiskLabelPresent", NULL); + } else + r = context_load_fallback_metrics(context); + if (r < 0) + return r; + context->from_scratch = r > 0; /* Starting from scratch */ + + r = context_open_copy_block_paths(context, (dev_t) -1); + if (r < 0) + return r; + + r = context_acquire_partition_uuids_and_labels(context); + if (r < 0) + return r; + + r = context_update_verity_size(context); + if (r < 0) + return r; + + r = context_minimize(context); + if (r < 0) + return r; + + /* If we have no node, just sum up how much space we need */ + if (!context->node) { + /* Check if space issue is caused by the whole disk being too small */ + uint64_t size; + r = determine_auto_size(context, LOG_DEBUG, /* ignore_allocated= */ false, &size); + if (r < 0) + return r; + + return sd_varlink_replybo( + link, + SD_JSON_BUILD_PAIR_UNSIGNED("minimalSizeBytes", size)); + } + + r = context_ponder(context); + if (r == -ENOSPC) { + /* Check if space issue is caused by the whole disk being too small */ + uint64_t size = UINT64_MAX; + (void) determine_auto_size(context, LOG_DEBUG, /* ignore_allocated= */ false, &size); + if (size != UINT64_MAX && context->total != UINT64_MAX && size > context->total) + return sd_varlink_errorbo( + link, + "io.systemd.Repart.DiskTooSmall", + SD_JSON_BUILD_PAIR_UNSIGNED("minimalSizeBytes", size), + SD_JSON_BUILD_PAIR_UNSIGNED("currentSizeBytes", context->total)); + + /* Or if the disk would fit, but theres's not enough unallocated space */ + uint64_t need_free = UINT64_MAX; + (void) determine_auto_size(context, LOG_DEBUG, /* ignore_allocated= */ true, &need_free); + return sd_varlink_errorbo( + link, + "io.systemd.Repart.InsufficientFreeSpace", + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("minimalSizeBytes", size, UINT64_MAX), + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("needFreeBytes", need_free, UINT64_MAX), + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("currentSizeBytes", context->total, UINT64_MAX)); + } + if (r < 0) + return r; + + if (p.dry_run) { + uint64_t size; + + /* If we are doing a dry-run, report the minimal size. */ + r = determine_auto_size(context, LOG_DEBUG, /* ignore_allocated= */ false, &size); + if (r < 0) + return r; + + return sd_varlink_replybo( + link, + SD_JSON_BUILD_PAIR_UNSIGNED("minimalSizeBytes", size), + JSON_BUILD_PAIR_UNSIGNED_NOT_EQUAL("currentSizeBytes", context->total, UINT64_MAX)); + } + + r = context_write_partition_table(context); + if (r < 0) + return r; + + context_disarm_auto_removal(context); + + return sd_varlink_reply(link, NULL); +} + static int vl_server(void) { _cleanup_(sd_varlink_server_unrefp) sd_varlink_server *varlink_server = NULL; int r; @@ -9897,7 +10348,8 @@ static int vl_server(void) { r = sd_varlink_server_bind_method_many( varlink_server, - "io.systemd.Repart.ListCandidateDevices", vl_method_list_candidate_devices); + "io.systemd.Repart.ListCandidateDevices", vl_method_list_candidate_devices, + "io.systemd.Repart.Run", vl_method_run); if (r < 0) return log_error_errno(r, "Failed to bind Varlink methods: %m"); @@ -9964,7 +10416,7 @@ static int run(int argc, char *argv[]) { if (!arg_root) return log_oom(); - if (!arg_node) { + if (!arg_node && !arg_node_none) { arg_node = strdup(loop_device->node); if (!arg_node) return log_oom(); @@ -9982,13 +10434,22 @@ static int run(int argc, char *argv[]) { return log_oom(); } - context = context_new(arg_seed, certificate, private_key); + context = context_new( + arg_definitions, + arg_empty, + arg_dry_run, + arg_seed, + certificate, + private_key); if (!context) return log_oom(); TAKE_PTR(certificate); TAKE_PTR(private_key); + context->defer_partitions_empty = arg_defer_partitions_empty; + context->defer_partitions_factory_reset = arg_defer_partitions_factory_reset; + r = context_read_seed(context, arg_root); if (r < 0) return r; @@ -9999,7 +10460,7 @@ static int run(int argc, char *argv[]) { if (arg_make_ddi) { _cleanup_free_ char *d = NULL, *dp = NULL; - assert(!arg_definitions); + assert(!context->definitions); d = strjoin(arg_make_ddi, ".repart.d/"); if (!d) @@ -10009,10 +10470,10 @@ static int run(int argc, char *argv[]) { if (r < 0) return log_error_errno(r, "DDI type '%s' is not defined: %m", arg_make_ddi); - if (strv_consume(&arg_definitions, TAKE_PTR(dp)) < 0) + if (strv_consume(&context->definitions, TAKE_PTR(dp)) < 0) return log_oom(); } else - strv_uniq(arg_definitions); + strv_uniq(context->definitions); r = context_read_definitions(context); if (r < 0) @@ -10025,21 +10486,24 @@ static int run(int argc, char *argv[]) { if (r < 0) return r; - if (arg_size != UINT64_MAX) { - r = resize_backing_fd( - context->node, - &context->backing_fd, - node_is_our_loop ? arg_image : NULL, - node_is_our_loop ? loop_device : NULL, - context->sector_size); - if (r < 0) - return r; - } + if (context->node) { + if (arg_size != UINT64_MAX) { + r = resize_backing_fd( + context->node, + &context->backing_fd, + node_is_our_loop ? arg_image : NULL, + node_is_our_loop ? loop_device : NULL, + context->sector_size); + if (r < 0) + return r; + } - r = context_load_partition_table(context); - if (r == -EHWPOISON) - return 77; /* Special return value which means "Not GPT, so not doing anything". This isn't - * really an error when called at boot. */ + r = context_load_partition_table(context); + if (r == -EHWPOISON) + return 77; /* Special return value which means "Not GPT, so not doing anything". This isn't + * really an error when called at boot. */ + } else + r = context_load_fallback_metrics(context); if (r < 0) return r; context->from_scratch = r > 0; /* Starting from scratch */ @@ -10102,8 +10566,13 @@ static int run(int argc, char *argv[]) { if (r < 0) return r; + if (arg_node_none) { + (void) determine_auto_size(context, LOG_INFO, /* ignore_allocated= */ false, /* ret= */ NULL); + return 0; + } + if (arg_size_auto) { - r = determine_auto_size(context); + r = determine_auto_size(context, LOG_INFO, /* ignore_allocated= */ false, &arg_size); if (r < 0) return r; @@ -10125,53 +10594,15 @@ static int run(int argc, char *argv[]) { return r; } - /* First try to fit new partitions in, dropping by priority until it fits */ - for (;;) { - uint64_t largest_free_area; - - if (context_allocate_partitions(context, &largest_free_area)) - break; /* Success! */ - - if (context_unmerge_and_allocate_partitions(context)) - break; /* We had to un-suppress a supplement or few, but still success! */ - - if (context_drop_or_foreignize_one_priority(context)) - continue; /* Still no luck. Let's drop a priority and try again. */ - - /* No more priorities left to drop. This configuration just doesn't fit on this disk... */ - r = log_error_errno(SYNTHETIC_ERRNO(ENOSPC), - "Can't fit requested partitions into available free space (%s), refusing.", - FORMAT_BYTES(largest_free_area)); - determine_auto_size(context); + r = context_ponder(context); + if (r == -ENOSPC) { + /* When we hit space issues, tell the user the minimal size. */ + (void) determine_auto_size(context, LOG_INFO, /* ignore_allocated= */ false, /* ret= */ NULL); return r; } - - LIST_FOREACH(partitions, p, context->partitions) { - if (!p->supplement_for) - continue; - - if (PARTITION_SUPPRESSED(p)) { - assert(!p->allocated_to_area); - p->dropped = true; - - log_debug("Partition %s can be merged into %s, suppressing supplement.", - p->definition_path, p->supplement_for->definition_path); - } else if (PARTITION_EXISTS(p)) - log_info("Partition %s already exists on disk, using supplement verbatim.", - p->definition_path); - else - log_info("Couldn't allocate partitions with %s merged into %s, using supplement verbatim.", - p->definition_path, p->supplement_for->definition_path); - } - - /* Now assign free space according to the weight logic */ - r = context_grow_partitions(context); if (r < 0) return r; - /* Now calculate where each new partition gets placed */ - context_place_partitions(context); - (void) context_dump(context, /*late=*/ false); r = context_write_partition_table(context); diff --git a/src/shared/blockdev-list.c b/src/shared/blockdev-list.c index 57f80bb111..6897ea6bca 100644 --- a/src/shared/blockdev-list.c +++ b/src/shared/blockdev-list.c @@ -8,6 +8,8 @@ #include "blockdev-util.h" #include "device-private.h" #include "device-util.h" +#include "errno-util.h" +#include "string-util.h" #include "strv.h" #include "terminal-util.h" @@ -16,6 +18,9 @@ void block_device_done(BlockDevice *d) { d->node = mfree(d->node); d->symlinks = strv_free(d->symlinks); + d->model = mfree(d->model); + d->vendor = mfree(d->vendor); + d->subsystem = mfree(d->subsystem); } void block_device_array_free(BlockDevice *d, size_t n_devices) { @@ -26,6 +31,61 @@ void block_device_array_free(BlockDevice *d, size_t n_devices) { free(d); } +static int blockdev_get_prop(sd_device *d, const char *prop1, const char *prop2, char **ret_value) { + int r, ret = 0; + + assert(d); + assert(prop1); + assert(ret_value); + + FOREACH_STRING(prop, prop1, prop2) { + const char *m = NULL; + r = sd_device_get_property_value(d, prop, &m); + if (r < 0 && r != -ENOENT) + RET_GATHER(ret, log_device_debug_errno(d, r, "Failed to acquire '%s' from device, ignoring: %m", prop)); + else if (!isempty(m)) + return strdup_to(ret_value, m); + } + + return ret < 0 ? ret : -ENOENT; +} + +static int blockdev_get_subsystem(sd_device *d, char **ret_subsystem) { + int r; + + assert(d); + assert(ret_subsystem); + + /* We prefer the explicitly set block device subsystem property, because if it is set it's generally + * the most useful. If it's not set we'll look for the subsystem of the first parent device that + * isn't of subsystem 'block'. The former covers 'virtual' block devices such as loopback, device + * mapper, zram, while the latter covers physical block devices such as USB or NVME. */ + + r = blockdev_get_prop(d, "ID_BLOCK_SUBSYSTEM", /* prop2= */ NULL, ret_subsystem); + if (r >= 0) + return r; + + int ret = r != -ENOENT ? r : 0; + sd_device *q = d; + for (;;) { + r = sd_device_get_parent(q, &q); + if (r < 0) { + if (r != -ENOENT) + RET_GATHER(ret, log_device_debug_errno(q, r, "Failed to get parent device, ignoring: %m")); + break; + } + + const char *s = NULL; + r = sd_device_get_subsystem(q, &s); + if (r < 0) + RET_GATHER(ret, log_device_debug_errno(q, r, "Failed to get subsystem of device, ignoring: %m")); + else if (!isempty(s) && !streq(s, "block")) + return strdup_to(ret_subsystem, s); + } + + return ret < 0 ? ret : -ENOENT; +} + int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *ret_n_devices) { _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL; int r; @@ -65,7 +125,7 @@ int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *re r = sd_device_get_devname(dev, &node); if (r < 0) { - log_warning_errno(r, "Failed to get device node of discovered block device, ignoring: %m"); + log_device_warning_errno(dev, r, "Failed to get device node of discovered block device, ignoring: %m"); continue; } @@ -74,7 +134,7 @@ int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *re r = sd_device_get_devnum(dev, &devno); if (r < 0) { - log_warning_errno(r, "Failed to get major/minor of discovered block device, ignoring: %m"); + log_device_warning_errno(dev, r, "Failed to get major/minor of discovered block device, ignoring: %m"); continue; } @@ -85,7 +145,7 @@ int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *re if (FLAGS_SET(flags, BLOCKDEV_LIST_IGNORE_ZRAM)) { r = device_sysname_startswith(dev, "zram"); if (r < 0) { - log_warning_errno(r, "Failed to check device name of discovered block device '%s', ignoring: %m", node); + log_device_warning_errno(dev, r, "Failed to check device name of discovered block device '%s', ignoring: %m", node); continue; } if (r > 0) @@ -95,27 +155,26 @@ int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *re if (FLAGS_SET(flags, BLOCKDEV_LIST_REQUIRE_PARTITION_SCANNING)) { r = blockdev_partscan_enabled(dev); if (r < 0) { - log_warning_errno(r, "Unable to determine whether '%s' supports partition scanning, skipping device: %m", node); + log_device_warning_errno(dev, r, "Unable to determine whether '%s' supports partition scanning, skipping device: %m", node); continue; } if (r == 0) { - log_debug("Device '%s' does not support partition scanning, skipping.", node); + log_device_debug(dev, "Device '%s' does not support partition scanning, skipping.", node); continue; } } uint64_t size = UINT64_MAX; if (FLAGS_SET(flags, BLOCKDEV_LIST_IGNORE_EMPTY) || ret_devices) { - r = device_get_sysattr_u64(dev, "size", &size); if (r < 0) - log_debug_errno(r, "Failed to acquire size of device '%s', ignoring: %m", node); + log_device_debug_errno(dev, r, "Failed to acquire size of device '%s', ignoring: %m", node); else /* the 'size' sysattr is always in multiples of 512, even on 4K sector block devices! */ assert_se(MUL_ASSIGN_SAFE(&size, 512)); /* Overflow check for coverity */ if (size == 0 && FLAGS_SET(flags, BLOCKDEV_LIST_IGNORE_EMPTY)) { - log_debug("Device '%s' has a zero size, assuming drive without a medium, skipping.", node); + log_device_debug(dev, "Device '%s' has a zero size, assuming drive without a medium, skipping.", node); continue; } } @@ -129,11 +188,18 @@ int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *re strv_sort(list); } + _cleanup_free_ char *model = NULL, *vendor = NULL, *subsystem = NULL; + if (FLAGS_SET(flags, BLOCKDEV_LIST_METADATA)) { + (void) blockdev_get_prop(dev, "ID_MODEL_FROM_DATABASE", "ID_MODEL", &model); + (void) blockdev_get_prop(dev, "ID_VENDOR_FROM_DATABASE", "ID_VENDOR", &vendor); + (void) blockdev_get_subsystem(dev, &subsystem); + } + if (ret_devices) { uint64_t diskseq = UINT64_MAX; r = sd_device_get_diskseq(dev, &diskseq); if (r < 0) - log_debug_errno(r, "Failed to acquire diskseq of device '%s', ignoring: %m", node); + log_device_debug_errno(dev, r, "Failed to acquire diskseq of device '%s', ignoring: %m", node); if (!GREEDY_REALLOC(l, n+1)) return log_oom(); @@ -147,6 +213,9 @@ int blockdev_list(BlockDevListFlags flags, BlockDevice **ret_devices, size_t *re .symlinks = TAKE_PTR(list), .diskseq = diskseq, .size = size, + .model = TAKE_PTR(model), + .vendor = TAKE_PTR(vendor), + .subsystem = TAKE_PTR(subsystem), }; } else { diff --git a/src/shared/blockdev-list.h b/src/shared/blockdev-list.h index f1f1bc9949..845f336be5 100644 --- a/src/shared/blockdev-list.h +++ b/src/shared/blockdev-list.h @@ -10,11 +10,15 @@ typedef enum BlockDevListFlags { BLOCKDEV_LIST_REQUIRE_LUKS = 1 << 3, /* Only consider block devices with LUKS superblocks */ BLOCKDEV_LIST_IGNORE_ROOT = 1 << 4, /* Ignore the block device we are currently booted from */ BLOCKDEV_LIST_IGNORE_EMPTY = 1 << 5, /* Ignore disks of zero size (usually drives without a medium) */ + BLOCKDEV_LIST_METADATA = 1 << 6, /* Fill in model, vendor, subsystem */ } BlockDevListFlags; typedef struct BlockDevice { char *node; char **symlinks; + char *model; + char *vendor; + char *subsystem; uint64_t diskseq; uint64_t size; /* in bytes */ } BlockDevice; diff --git a/src/shared/varlink-io.systemd.Repart.c b/src/shared/varlink-io.systemd.Repart.c index ba02b7da53..53f5563d82 100644 --- a/src/shared/varlink-io.systemd.Repart.c +++ b/src/shared/varlink-io.systemd.Repart.c @@ -4,27 +4,119 @@ #include "varlink-io.systemd.Repart.h" +static SD_VARLINK_DEFINE_ENUM_TYPE( + ProgressPhase, + SD_VARLINK_DEFINE_ENUM_VALUE(loading_definitions), + SD_VARLINK_DEFINE_ENUM_VALUE(loading_table), + SD_VARLINK_DEFINE_ENUM_VALUE(opening_copy_block_sources), + SD_VARLINK_DEFINE_ENUM_VALUE(acquiring_partition_labels), + SD_VARLINK_DEFINE_ENUM_VALUE(minimizing), + SD_VARLINK_DEFINE_ENUM_VALUE(placing), + SD_VARLINK_DEFINE_ENUM_VALUE(wiping_disk), + SD_VARLINK_DEFINE_ENUM_VALUE(wiping_partition), + SD_VARLINK_DEFINE_ENUM_VALUE(copying_partition), + SD_VARLINK_DEFINE_ENUM_VALUE(formatting_partition), + SD_VARLINK_DEFINE_ENUM_VALUE(adjusting_partition), + SD_VARLINK_DEFINE_ENUM_VALUE(writing_table), + SD_VARLINK_DEFINE_ENUM_VALUE(rereading_table)); + +static SD_VARLINK_DEFINE_ENUM_TYPE( + EmptyMode, + SD_VARLINK_FIELD_COMMENT("Refuse to operate on disks without an existing partition table"), + SD_VARLINK_DEFINE_ENUM_VALUE(refuse), + SD_VARLINK_FIELD_COMMENT("Create a new partition table if one doesn't already exist on disk"), + SD_VARLINK_DEFINE_ENUM_VALUE(allow), + SD_VARLINK_FIELD_COMMENT("Refuse to operate on disks with an existing partition table, and create a new table if none exists"), + SD_VARLINK_DEFINE_ENUM_VALUE(require), + SD_VARLINK_FIELD_COMMENT("Always create a new partition table, potentially overwriting an existing table"), + SD_VARLINK_DEFINE_ENUM_VALUE(force)); + +static SD_VARLINK_DEFINE_METHOD_FULL( + Run, + SD_VARLINK_SUPPORTS_MORE, + SD_VARLINK_FIELD_COMMENT("Full path to the block device node to operate on. If omitted, dryRun must be true, in which case the minimal disk size is determined."), + SD_VARLINK_DEFINE_INPUT(node, SD_VARLINK_STRING, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("Decides whether to install the OS in addition to what is already on it, or if it shall be erased."), + SD_VARLINK_DEFINE_INPUT_BY_TYPE(empty, EmptyMode, 0), + SD_VARLINK_FIELD_COMMENT("If true this will ponder if the installation would fit, but does not actually write anything to disk. Must be set to false to actually make changes."), + SD_VARLINK_DEFINE_INPUT(dryRun, SD_VARLINK_BOOL, 0), + SD_VARLINK_FIELD_COMMENT("The seed value to derive partition and file system UUIDs from"), + SD_VARLINK_DEFINE_INPUT(seed, SD_VARLINK_STRING, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("Path to directory containing definition files."), + SD_VARLINK_DEFINE_INPUT(definitions, SD_VARLINK_STRING, SD_VARLINK_ARRAY), + SD_VARLINK_FIELD_COMMENT("If true, automatically defer creation of all partitions whose label is \"empty\"."), + SD_VARLINK_DEFINE_INPUT(deferPartitionsEmpty, SD_VARLINK_BOOL, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("If true, automatically defer creation of all partitions which are marked for factory reset."), + SD_VARLINK_DEFINE_INPUT(deferPartitionsFactoryReset, SD_VARLINK_BOOL, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("In dry-run mode returns the minimal disk size required."), + SD_VARLINK_DEFINE_OUTPUT(minimalSizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("In dry-run mode returns the size of the selected block device."), + SD_VARLINK_DEFINE_OUTPUT(currentSizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("If used with the 'more' flag, a phase identifier is sent in progress updates."), + SD_VARLINK_DEFINE_OUTPUT_BY_TYPE(phase, ProgressPhase, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("If used with the 'more' flag, an object identifier string is sent in progress updates."), + SD_VARLINK_DEFINE_OUTPUT(object, SD_VARLINK_STRING, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("If used with the 'more' flag, a progress percentrage (specific to the work done for the specified phase+object is sent in progress updates."), + SD_VARLINK_DEFINE_OUTPUT(progress, SD_VARLINK_INT, SD_VARLINK_NULLABLE)); + static SD_VARLINK_DEFINE_METHOD( ListCandidateDevices, - SD_VARLINK_FIELD_COMMENT("The device node path of the block device."), - SD_VARLINK_DEFINE_OUTPUT(node, SD_VARLINK_STRING, 0), SD_VARLINK_FIELD_COMMENT("Control whether to include the root disk of the currently booted OS in the list. Defaults to false, i.e. the root disk is included."), SD_VARLINK_DEFINE_INPUT(ignoreRoot, SD_VARLINK_BOOL, SD_VARLINK_NULLABLE), SD_VARLINK_FIELD_COMMENT("Control whether to include block devices with zero size in the list, i.e. typically block devices without any inserted medium. Defaults to false, i.e. empty block devices are included."), SD_VARLINK_DEFINE_INPUT(ignoreEmpty, SD_VARLINK_BOOL, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("The device node path of the block device."), + SD_VARLINK_DEFINE_OUTPUT(node, SD_VARLINK_STRING, 0), SD_VARLINK_FIELD_COMMENT("List of symlinks pointing to the device node, if any."), SD_VARLINK_DEFINE_OUTPUT(symlinks, SD_VARLINK_STRING, SD_VARLINK_ARRAY|SD_VARLINK_NULLABLE), SD_VARLINK_FIELD_COMMENT("The Linux kernel disk sequence number identifying the medium."), SD_VARLINK_DEFINE_OUTPUT(diskseq, SD_VARLINK_INT, SD_VARLINK_NULLABLE), SD_VARLINK_FIELD_COMMENT("The size of the block device in bytes."), - SD_VARLINK_DEFINE_OUTPUT(sizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE)); + SD_VARLINK_DEFINE_OUTPUT(sizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("The device vendor string if known"), + SD_VARLINK_DEFINE_OUTPUT(vendor, SD_VARLINK_STRING, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("The device model string if known"), + SD_VARLINK_DEFINE_OUTPUT(model, SD_VARLINK_STRING, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("The subsystem the block device belongs to if known"), + SD_VARLINK_DEFINE_OUTPUT(subsystem, SD_VARLINK_STRING, SD_VARLINK_NULLABLE)); + static SD_VARLINK_DEFINE_ERROR(NoCandidateDevices); +static SD_VARLINK_DEFINE_ERROR(ConflictingDiskLabelPresent); +static SD_VARLINK_DEFINE_ERROR( + InsufficientFreeSpace, + SD_VARLINK_FIELD_COMMENT("Minimal size of the disk required for the installation."), + SD_VARLINK_DEFINE_FIELD(minimalSizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("Additional free space needed on the selected disk."), + SD_VARLINK_DEFINE_FIELD(needFreeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("Size of the selected block device."), + SD_VARLINK_DEFINE_FIELD(currentSizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE)); +static SD_VARLINK_DEFINE_ERROR( + DiskTooSmall, + SD_VARLINK_FIELD_COMMENT("Minimal size of the disk required for the installation."), + SD_VARLINK_DEFINE_FIELD(minimalSizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE), + SD_VARLINK_FIELD_COMMENT("Actual size of the selected block device."), + SD_VARLINK_DEFINE_FIELD(currentSizeBytes, SD_VARLINK_INT, SD_VARLINK_NULLABLE)); SD_VARLINK_DEFINE_INTERFACE( io_systemd_Repart, "io.systemd.Repart", SD_VARLINK_INTERFACE_COMMENT("API for declaratively re-partitioning disks using systemd-repart."), + + SD_VARLINK_SYMBOL_COMMENT("Behaviors for disks that are completely empty (i.e. don't have a partition table yet)"), + &vl_type_EmptyMode, + SD_VARLINK_SYMBOL_COMMENT("Progress phase identifiers. Note that we might add more phases here, and thus identifiers. Frontends can choose to display the phase to the user in some human readable form, or not do that, but if they do it and they receive a notification for a so far unknown phase, they should just ignore it."), + &vl_type_ProgressPhase, + + SD_VARLINK_SYMBOL_COMMENT("Invoke the actual repartitioning operation, either in dry-run mode or for real. If invoked with 'more' enabled will report progress, otherwise will just report completion."), + &vl_method_Run, + SD_VARLINK_SYMBOL_COMMENT("An incompatible disk label present, and not told to erase it."), + &vl_error_ConflictingDiskLabelPresent, + SD_VARLINK_SYMBOL_COMMENT("The target disk has insufficient free space to fit all requested partitions. (But the disk would fit, if emptied.)"), + &vl_error_InsufficientFreeSpace, + SD_VARLINK_SYMBOL_COMMENT("The target disk is too small to fit the installation. (Regardless if emtied or not.)"), + &vl_error_DiskTooSmall, + SD_VARLINK_SYMBOL_COMMENT("Return a list of candidate block devices, i.e. that support partition scanning and other requirements for successful operation."), &vl_method_ListCandidateDevices, SD_VARLINK_SYMBOL_COMMENT("Not a single candidate block device could be found."), diff --git a/test/units/TEST-58-REPART.sh b/test/units/TEST-58-REPART.sh index 014c00b3b6..b752af3f4d 100755 --- a/test/units/TEST-58-REPART.sh +++ b/test/units/TEST-58-REPART.sh @@ -141,6 +141,12 @@ SizeMaxBytes=64M PaddingMinBytes=92M EOF + systemd-repart --definitions="$defs" \ + --dry-run=yes \ + --seed="$seed" \ + --include-partitions=home,swap \ + "-" + systemd-repart --offline="$OFFLINE" \ --definitions="$defs" \ --dry-run=no \ @@ -1690,6 +1696,70 @@ testcase_varlink_list_devices() { varlinkctl call /run/systemd/io.systemd.Repart --graceful=io.systemd.Repart.NoCandidateDevices --collect io.systemd.Repart.ListCandidateDevices '{"ignoreEmpty":true,"ignoreRoot":true}' } +testcase_get_size() { + local defs + + defs="$(mktemp --directory "/tmp/test-repart.defs.XXXXXXXXXX")" + # shellcheck disable=SC2064 + trap "rm -rf '$defs'" RETURN + + tee "$defs/a.conf" <&1)" + assert_in "Automatically determined minimal disk image size as 39M." "$output" +} + +testcase_varlink_run() { + local defs + + defs="$(mktemp --directory "/tmp/test-repart.defs.XXXXXXXXXX")" + imgs="$(mktemp --directory "/var/tmp/test-repart.imgs.XXXXXXXXXX")" + # shellcheck disable=SC2064 + trap "rm -rf '$defs' '$imgs'" RETURN + + tee "$defs/a.conf" <