diff --git a/docs/libblockdev-docs.xml.in b/docs/libblockdev-docs.xml.in
index 5228feee..a9730fef 100644
--- a/docs/libblockdev-docs.xml.in
+++ b/docs/libblockdev-docs.xml.in
@@ -161,6 +161,31 @@ cat dist/libblockdev.spec.in | grep BuildRequires: | cut -f2 -d: | cut -f2 -d' '
is equivalent to `make test-all'.
+
+
+ To get full coverage report for the C code the LCOV tool can be used:
+
+ ./autogen.sh
+ ./configure CFLAGS="-O0 -g --coverage" LDFLAGS="--coverage" --prefix=/usr
+ make -j
+
+ to configure and build the project with coverage enabled, run
+
+ lcov --directory . --zerocounters
+ lcov --directory . --capture --initial --output-file coverage-base.info
+
+ to configure lcov, run tests as usual with
+
+ sudo python3 tests/run_tests.py --include-tags all
+
+ and produce the HTML report using
+
+ lcov --directory . --capture --output-file coverage-test.info
+ lcov --remove coverage-test.info '/usr/*' --output-file coverage-filtered.info
+ genhtml coverage-filtered.info --output-directory coverage-report --title "libblockdev Coverage Report" --show-details --legend
+
+
+
diff --git a/src/lib/plugin_apis/smart.api b/src/lib/plugin_apis/smart.api
index d6d72fd4..6e6d317e 100644
--- a/src/lib/plugin_apis/smart.api
+++ b/src/lib/plugin_apis/smart.api
@@ -42,7 +42,7 @@ typedef enum {
* Returns: whether the @tech-@mode combination is available -- supported by the
* plugin implementation and having all the runtime dependencies available
*/
-gboolean bd_smart_is_tech_avail (BDSmartTechMode tech, G_GNUC_UNUSED guint64 mode, GError **error);
+gboolean bd_smart_is_tech_avail (BDSmartTech tech, G_GNUC_UNUSED guint64 mode, GError **error);
/* BpG-skip */
/**
diff --git a/src/plugins/fs/generic.c b/src/plugins/fs/generic.c
index 9c11fc88..05bc5d2c 100644
--- a/src/plugins/fs/generic.c
+++ b/src/plugins/fs/generic.c
@@ -1305,7 +1305,7 @@ gboolean bd_fs_check (const gchar *device, const gchar *fstype, GError **error)
* Tech category: always available
*/
gboolean bd_fs_check_label (const gchar *fstype, const gchar *label, GError **error) {
- if (!fstype) {
+ if (!fstype || strlen (fstype) == 0) {
g_set_error (error, BD_FS_ERROR, BD_FS_ERROR_NOFS,
"Filesystem type must be specified to check label format");
return FALSE;
@@ -1352,7 +1352,7 @@ gboolean bd_fs_set_label (const gchar *device, const gchar *label, const gchar *
* Tech category: always available
*/
gboolean bd_fs_check_uuid (const gchar *fstype, const gchar *uuid, GError **error) {
- if (!fstype) {
+ if (!fstype || strlen (fstype) == 0) {
g_set_error (error, BD_FS_ERROR, BD_FS_ERROR_NOFS,
"Filesystem type must be specified to check UUID format");
return FALSE;
diff --git a/src/plugins/lvm/lvm-common.c b/src/plugins/lvm/lvm-common.c
index ffbb43a2..b5ec770b 100644
--- a/src/plugins/lvm/lvm-common.c
+++ b/src/plugins/lvm/lvm-common.c
@@ -850,3 +850,124 @@ gboolean bd_lvm_vgcfgbackup (const gchar *vg_name, const gchar *backup_file, con
gboolean bd_lvm_vgcfgrestore (const gchar *vg_name, const gchar *backup_file, const BDExtraArg **extra, GError **error) {
return _vgcfgbackup_restore ("vgcfgrestore", vg_name, backup_file, extra, error);
}
+
+/**
+ * bd_lvm_cache_stats:
+ * @vg_name: name of the VG containing the @cached_lv
+ * @cached_lv: cached LV to get stats for
+ * @error: (out) (optional): place to store error (if any)
+ *
+ * Returns: stats for the @cached_lv or %NULL in case of error
+ *
+ * Tech category: %BD_LVM_TECH_CACHE-%BD_LVM_TECH_MODE_QUERY
+ */
+BDLVMCacheStats* bd_lvm_cache_stats (const gchar *vg_name, const gchar *cached_lv, GError **error) {
+ struct dm_pool *pool = NULL;
+ struct dm_task *task = NULL;
+ struct dm_info info;
+ struct dm_status_cache *status = NULL;
+ gchar *map_name = NULL;
+ guint64 start = 0;
+ guint64 length = 0;
+ gchar *type = NULL;
+ gchar *params = NULL;
+ BDLVMCacheStats *ret = NULL;
+ BDLVMLVdata *lvdata = NULL;
+
+ lvdata = bd_lvm_lvinfo (vg_name, cached_lv, error);
+ if (!lvdata)
+ return NULL;
+
+ pool = dm_pool_create ("bd-pool", 20);
+
+ if (g_strcmp0 (lvdata->segtype, "thin-pool") == 0)
+ map_name = dm_build_dm_name (pool, vg_name, lvdata->data_lv, NULL);
+ else
+ /* translate the VG+LV name into the DM map name */
+ map_name = dm_build_dm_name (pool, vg_name, cached_lv, NULL);
+
+ bd_lvm_lvdata_free (lvdata);
+
+ task = dm_task_create (DM_DEVICE_STATUS);
+ if (!task) {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
+ "Failed to create DM task for the cache map '%s': ", map_name);
+ dm_pool_destroy (pool);
+ return NULL;
+ }
+
+ if (dm_task_set_name (task, map_name) == 0) {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
+ "Failed to create DM task for the cache map '%s': ", map_name);
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+ return NULL;
+ }
+
+ if (dm_task_run (task) == 0) {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
+ "Failed to run the DM task for the cache map '%s': ", map_name);
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+ return NULL;
+ }
+
+ if (dm_task_get_info (task, &info) == 0) {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
+ "Failed to get task info for the cache map '%s': ", map_name);
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+ return NULL;
+ }
+
+ if (!info.exists) {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_NOCACHE,
+ "The cache map '%s' doesn't exist: ", map_name);
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+ return NULL;
+ }
+
+ dm_get_next_target (task, NULL, &start, &length, &type, ¶ms);
+
+ if (dm_get_status_cache (pool, params, &status) == 0) {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_INVAL,
+ "Failed to get status of the cache map '%s': ", map_name);
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+ return NULL;
+ }
+
+ ret = g_new0 (BDLVMCacheStats, 1);
+ ret->block_size = status->block_size * SECTOR_SIZE;
+ ret->cache_size = status->total_blocks * ret->block_size;
+ ret->cache_used = status->used_blocks * ret->block_size;
+
+ ret->md_block_size = status->metadata_block_size * SECTOR_SIZE;
+ ret->md_size = status->metadata_total_blocks * ret->md_block_size;
+ ret->md_used = status->metadata_used_blocks * ret->md_block_size;
+
+ ret->read_hits = status->read_hits;
+ ret->read_misses = status->read_misses;
+ ret->write_hits = status->write_hits;
+ ret->write_misses = status->write_misses;
+
+ if (status->feature_flags & DM_CACHE_FEATURE_WRITETHROUGH)
+ ret->mode = BD_LVM_CACHE_MODE_WRITETHROUGH;
+ else if (status->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
+ ret->mode = BD_LVM_CACHE_MODE_WRITEBACK;
+ else {
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_INVAL,
+ "Failed to determine status of the cache from '%"G_GUINT64_FORMAT"': ",
+ status->feature_flags);
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+ bd_lvm_cache_stats_free (ret);
+ return NULL;
+ }
+
+ dm_task_destroy (task);
+ dm_pool_destroy (pool);
+
+ return ret;
+}
diff --git a/src/plugins/lvm/lvm-dbus.c b/src/plugins/lvm/lvm-dbus.c
index 877a72a5..30bd5043 100644
--- a/src/plugins/lvm/lvm-dbus.c
+++ b/src/plugins/lvm/lvm-dbus.c
@@ -3697,133 +3697,6 @@ gchar* bd_lvm_cache_pool_name (const gchar *vg_name, const gchar *cached_lv, GEr
return pool_name;
}
-/**
- * bd_lvm_cache_stats:
- * @vg_name: name of the VG containing the @cached_lv
- * @cached_lv: cached LV to get stats for
- * @error: (out) (optional): place to store error (if any)
- *
- * Returns: stats for the @cached_lv or %NULL in case of error
- *
- * Tech category: %BD_LVM_TECH_CACHE-%BD_LVM_TECH_MODE_QUERY
- */
-BDLVMCacheStats* bd_lvm_cache_stats (const gchar *vg_name, const gchar *cached_lv, GError **error) {
- struct dm_pool *pool = NULL;
- struct dm_task *task = NULL;
- struct dm_info info;
- struct dm_status_cache *status = NULL;
- gchar *map_name = NULL;
- guint64 start = 0;
- guint64 length = 0;
- gchar *type = NULL;
- gchar *params = NULL;
- BDLVMCacheStats *ret = NULL;
- BDLVMLVdata *lvdata = NULL;
-
- if (geteuid () != 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_NOT_ROOT,
- "Not running as root, cannot query DM maps");
- return NULL;
- }
-
- lvdata = bd_lvm_lvinfo (vg_name, cached_lv, error);
- if (!lvdata)
- return NULL;
-
- pool = dm_pool_create ("bd-pool", 20);
-
- if (g_strcmp0 (lvdata->segtype, "thin-pool") == 0)
- map_name = dm_build_dm_name (pool, vg_name, lvdata->data_lv, NULL);
- else
- /* translate the VG+LV name into the DM map name */
- map_name = dm_build_dm_name (pool, vg_name, cached_lv, NULL);
-
- bd_lvm_lvdata_free (lvdata);
-
- task = dm_task_create (DM_DEVICE_STATUS);
- if (!task) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to create DM task for the cache map '%s': ", map_name);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (dm_task_set_name (task, map_name) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to create DM task for the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (dm_task_run (task) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to run the DM task for the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (dm_task_get_info (task, &info) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to get task info for the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (!info.exists) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_NOCACHE,
- "The cache map '%s' doesn't exist: ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- dm_get_next_target (task, NULL, &start, &length, &type, ¶ms);
-
- if (dm_get_status_cache (pool, params, &status) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_INVAL,
- "Failed to get status of the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- ret = g_new0 (BDLVMCacheStats, 1);
- ret->block_size = status->block_size * SECTOR_SIZE;
- ret->cache_size = status->total_blocks * ret->block_size;
- ret->cache_used = status->used_blocks * ret->block_size;
-
- ret->md_block_size = status->metadata_block_size * SECTOR_SIZE;
- ret->md_size = status->metadata_total_blocks * ret->md_block_size;
- ret->md_used = status->metadata_used_blocks * ret->md_block_size;
-
- ret->read_hits = status->read_hits;
- ret->read_misses = status->read_misses;
- ret->write_hits = status->write_hits;
- ret->write_misses = status->write_misses;
-
- if (status->feature_flags & DM_CACHE_FEATURE_WRITETHROUGH)
- ret->mode = BD_LVM_CACHE_MODE_WRITETHROUGH;
- else if (status->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
- ret->mode = BD_LVM_CACHE_MODE_WRITEBACK;
- else {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_INVAL,
- "Failed to determine status of the cache from '%"G_GUINT64_FORMAT"': ",
- status->feature_flags);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- bd_lvm_cache_stats_free (ret);
- return NULL;
- }
-
- dm_task_destroy (task);
- dm_pool_destroy (pool);
-
- return ret;
-}
-
/**
* bd_lvm_thpool_convert:
* @vg_name: name of the VG to create the new thin pool in
diff --git a/src/plugins/lvm/lvm.c b/src/plugins/lvm/lvm.c
index ce31f719..51d5354d 100644
--- a/src/plugins/lvm/lvm.c
+++ b/src/plugins/lvm/lvm.c
@@ -2735,133 +2735,6 @@ gchar* bd_lvm_cache_pool_name (const gchar *vg_name, const gchar *cached_lv, GEr
return pool_name;
}
-/**
- * bd_lvm_cache_stats:
- * @vg_name: name of the VG containing the @cached_lv
- * @cached_lv: cached LV to get stats for
- * @error: (out) (optional): place to store error (if any)
- *
- * Returns: stats for the @cached_lv or %NULL in case of error
- *
- * Tech category: %BD_LVM_TECH_CACHE-%BD_LVM_TECH_MODE_QUERY
- */
-BDLVMCacheStats* bd_lvm_cache_stats (const gchar *vg_name, const gchar *cached_lv, GError **error) {
- struct dm_pool *pool = NULL;
- struct dm_task *task = NULL;
- struct dm_info info;
- struct dm_status_cache *status = NULL;
- gchar *map_name = NULL;
- guint64 start = 0;
- guint64 length = 0;
- gchar *type = NULL;
- gchar *params = NULL;
- BDLVMCacheStats *ret = NULL;
- BDLVMLVdata *lvdata = NULL;
-
- if (geteuid () != 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_NOT_ROOT,
- "Not running as root, cannot query DM maps");
- return NULL;
- }
-
- lvdata = bd_lvm_lvinfo (vg_name, cached_lv, error);
- if (!lvdata)
- return NULL;
-
- pool = dm_pool_create ("bd-pool", 20);
-
- if (g_strcmp0 (lvdata->segtype, "thin-pool") == 0)
- map_name = dm_build_dm_name (pool, vg_name, lvdata->data_lv, NULL);
- else
- /* translate the VG+LV name into the DM map name */
- map_name = dm_build_dm_name (pool, vg_name, cached_lv, NULL);
-
- bd_lvm_lvdata_free (lvdata);
-
- task = dm_task_create (DM_DEVICE_STATUS);
- if (!task) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to create DM task for the cache map '%s': ", map_name);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (dm_task_set_name (task, map_name) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to create DM task for the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (dm_task_run (task) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to run the DM task for the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (dm_task_get_info (task, &info) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
- "Failed to get task info for the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- if (!info.exists) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_NOCACHE,
- "The cache map '%s' doesn't exist: ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- dm_get_next_target (task, NULL, &start, &length, &type, ¶ms);
-
- if (dm_get_status_cache (pool, params, &status) == 0) {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_INVAL,
- "Failed to get status of the cache map '%s': ", map_name);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- return NULL;
- }
-
- ret = g_new0 (BDLVMCacheStats, 1);
- ret->block_size = status->block_size * SECTOR_SIZE;
- ret->cache_size = status->total_blocks * ret->block_size;
- ret->cache_used = status->used_blocks * ret->block_size;
-
- ret->md_block_size = status->metadata_block_size * SECTOR_SIZE;
- ret->md_size = status->metadata_total_blocks * ret->md_block_size;
- ret->md_used = status->metadata_used_blocks * ret->md_block_size;
-
- ret->read_hits = status->read_hits;
- ret->read_misses = status->read_misses;
- ret->write_hits = status->write_hits;
- ret->write_misses = status->write_misses;
-
- if (status->feature_flags & DM_CACHE_FEATURE_WRITETHROUGH)
- ret->mode = BD_LVM_CACHE_MODE_WRITETHROUGH;
- else if (status->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
- ret->mode = BD_LVM_CACHE_MODE_WRITEBACK;
- else {
- g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_CACHE_INVAL,
- "Failed to determine status of the cache from '%"G_GUINT64_FORMAT"': ",
- status->feature_flags);
- dm_task_destroy (task);
- dm_pool_destroy (pool);
- bd_lvm_cache_stats_free (ret);
- return NULL;
- }
-
- dm_task_destroy (task);
- dm_pool_destroy (pool);
-
- return ret;
-}
-
/**
* bd_lvm_thpool_convert:
* @vg_name: name of the VG to create the new thin pool in
diff --git a/src/plugins/mpath.c b/src/plugins/mpath.c
index 013bd77d..dd231bf6 100644
--- a/src/plugins/mpath.c
+++ b/src/plugins/mpath.c
@@ -196,12 +196,6 @@ static gboolean map_is_multipath (const gchar *map_name, GError **error) {
gchar *params = NULL;
gboolean ret = FALSE;
- if (geteuid () != 0) {
- g_set_error (error, BD_MPATH_ERROR, BD_MPATH_ERROR_NOT_ROOT,
- "Not running as root, cannot query DM maps");
- return FALSE;
- }
-
task = dm_task_create (DM_DEVICE_STATUS);
if (!task) {
bd_utils_log_format (BD_UTILS_LOG_WARNING, "Failed to create DM task");
@@ -251,12 +245,6 @@ static gchar** get_map_deps (const gchar *map_name, guint64 *n_deps, GError **er
gchar *major_minor = NULL;
GError *l_error = NULL;
- if (geteuid () != 0) {
- g_set_error (error, BD_MPATH_ERROR, BD_MPATH_ERROR_NOT_ROOT,
- "Not running as root, cannot query DM maps");
- return NULL;
- }
-
task = dm_task_create (DM_DEVICE_DEPS);
if (!task) {
bd_utils_log_format (BD_UTILS_LOG_WARNING, "Failed to create DM task");
@@ -332,12 +320,6 @@ gboolean bd_mpath_is_mpath_member (const gchar *device, GError **error) {
gboolean ret = FALSE;
GError *l_error = NULL;
- if (geteuid () != 0) {
- g_set_error (error, BD_MPATH_ERROR, BD_MPATH_ERROR_NOT_ROOT,
- "Not running as root, cannot query DM maps");
- return FALSE;
- }
-
/* we check if the 'device' is a dependency of any multipath map */
/* get maps */
task_names = dm_task_create (DM_DEVICE_LIST);
@@ -433,14 +415,6 @@ gchar** bd_mpath_get_mpath_members (GError **error) {
progress_id = bd_utils_report_started ("Started getting mpath members");
- if (geteuid () != 0) {
- g_set_error (&l_error, BD_MPATH_ERROR, BD_MPATH_ERROR_NOT_ROOT,
- "Not running as root, cannot query DM maps");
- bd_utils_report_finished (progress_id, l_error->message);
- g_propagate_error (error, l_error);
- return NULL;
- }
-
/* we check if the 'device' is a dependency of any multipath map */
/* get maps */
task_names = dm_task_create (DM_DEVICE_LIST);
diff --git a/src/plugins/smart/libatasmart.c b/src/plugins/smart/libatasmart.c
index ea81d10b..c3cc8b74 100644
--- a/src/plugins/smart/libatasmart.c
+++ b/src/plugins/smart/libatasmart.c
@@ -31,6 +31,19 @@
#include "smart.h"
#include "smart-private.h"
+/**
+ * bd_smart_check_deps:
+ *
+ * Returns: whether the plugin's runtime dependencies are satisfied or not
+ *
+ * Function checking plugin's runtime dependencies.
+ *
+ * Deprecated: 3.5: use %bd_smart_is_tech_avail instead
+ */
+gboolean bd_smart_check_deps (void) {
+ return TRUE;
+}
+
/**
* bd_smart_is_tech_avail:
* @tech: the queried tech
@@ -40,7 +53,7 @@
* Returns: whether the @tech-@mode combination is available -- supported by the
* plugin implementation and having all the runtime dependencies available
*/
-gboolean bd_smart_is_tech_avail (G_GNUC_UNUSED BDSmartTech tech, G_GNUC_UNUSED guint64 mode, GError **error) {
+gboolean bd_smart_is_tech_avail (BDSmartTech tech, G_GNUC_UNUSED guint64 mode, GError **error) {
switch (tech) {
case BD_SMART_TECH_ATA:
return TRUE;
diff --git a/src/plugins/smart/smartmontools.c b/src/plugins/smart/smartmontools.c
index 4366f6c4..a1bd75b2 100644
--- a/src/plugins/smart/smartmontools.c
+++ b/src/plugins/smart/smartmontools.c
@@ -51,28 +51,11 @@ static const UtilDep deps[DEPS_LAST] = {
* Returns: whether the plugin's runtime dependencies are satisfied or not
*
* Function checking plugin's runtime dependencies.
+ *
+ * Deprecated: 3.5: use %bd_smart_is_tech_avail instead
*/
gboolean bd_smart_check_deps (void) {
- GError *error = NULL;
- guint i = 0;
- gboolean status = FALSE;
- gboolean ret = TRUE;
-
- for (i = 0; i < DEPS_LAST; i++) {
- status = bd_utils_check_util_version (deps[i].name, deps[i].version,
- deps[i].ver_arg, deps[i].ver_regexp, &error);
- if (!status)
- bd_utils_log_format (BD_UTILS_LOG_WARNING, "%s", error->message);
- else
- g_atomic_int_or (&avail_deps, 1 << i);
- g_clear_error (&error);
- ret = ret && status;
- }
-
- if (!ret)
- bd_utils_log_format (BD_UTILS_LOG_WARNING, "Cannot load the SMART plugin");
-
- return ret;
+ return bd_smart_is_tech_avail (0, 0, NULL);
}
/**
diff --git a/src/utils/logging.c b/src/utils/logging.c
index f44208aa..08f34601 100644
--- a/src/utils/logging.c
+++ b/src/utils/logging.c
@@ -110,6 +110,10 @@ void bd_utils_log_format (gint level, const gchar *format, ...) {
*
* Convenient function for logging to stdout. Can be used as #BDUtilsLogFunc.
*
+ * Note: This function uses GLib logging functions like %g_info or %g_warning
+ * for the printing to stdout/stderr so these messages can be redirected
+ * when a custom log handler is set using e.g. %g_log_set_handler.
+ *
*/
void bd_utils_log_stdout (gint level, const gchar *msg) {
if (level > log_level)
diff --git a/tests/_lvm_cases.py b/tests/_lvm_cases.py
index 6acecd6a..24db3e9b 100644
--- a/tests/_lvm_cases.py
+++ b/tests/_lvm_cases.py
@@ -757,6 +757,12 @@ def test_pvtags(self):
succ = BlockDev.lvm_pvcreate(self.loop_dev, 0, 0, None)
self.assertTrue(succ)
+ # tags can be added only to pvs in a vg
+ with self.assertRaises(GLib.GError):
+ BlockDev.lvm_add_pv_tags(self.loop_dev, ["a"])
+ with self.assertRaises(GLib.GError):
+ BlockDev.lvm_delete_pv_tags(self.loop_dev, ["a"])
+
# only pvs in a vg can be tagged so we need a vg here
succ = BlockDev.lvm_vgcreate("testVG", [self.loop_dev], 0, None)
self.assertTrue(succ)
@@ -1547,6 +1553,43 @@ def assert_raid1_structure(pv1, pv2):
assert_raid1_structure(self.loop_dev, self.loop_dev3)
+class LvmTestPVmove(LvmPVVGLVTestCase):
+ _sparse_size = 20*1024**2
+
+ @tag_test(TestTags.CORE)
+ def test_pvmove(self):
+ """Verify that it's possible to move extents from one PV to another"""
+
+ succ = BlockDev.lvm_pvcreate(self.loop_dev, 0, 0, None)
+ self.assertTrue(succ)
+
+ succ = BlockDev.lvm_pvcreate(self.loop_dev2, 0, 0, None)
+ self.assertTrue(succ)
+
+ succ = BlockDev.lvm_vgcreate("testVG", [self.loop_dev, self.loop_dev2], 0, None)
+ self.assertTrue(succ)
+
+ # create testLV on the first PV only
+ succ = BlockDev.lvm_lvcreate("testVG", "testLV", 10 * 1024**2, None, [self.loop_dev], None)
+ self.assertTrue(succ)
+
+ # check that the LV is allocated on the first PV only
+ info = BlockDev.lvm_lvinfo_tree("testVG", "testLV")
+ self.assertIsNotNone(info)
+ self.assertEqual(len(info.segs), 1)
+ self.assertEqual(info.segs[0].pvdev, self.loop_dev)
+
+ # pvmove from first PV to the second
+ succ = BlockDev.lvm_pvmove(self.loop_dev, self.loop_dev2, None)
+ self.assertTrue(succ)
+
+ # check that the LV was moved to the second LV
+ info = BlockDev.lvm_lvinfo_tree("testVG", "testLV")
+ self.assertIsNotNone(info)
+ self.assertEqual(len(info.segs), 1)
+ self.assertEqual(info.segs[0].pvdev, self.loop_dev2)
+
+
class LvmPVVGthpoolTestCase(LvmPVVGTestCase):
def _clean_up(self):
try:
@@ -1980,6 +2023,9 @@ def test_vdo_pool_create(self):
state_str = BlockDev.lvm_get_vdo_compression_state_str(vdo_info.compression_state)
self.assertEqual(state_str, "online")
+ index_str = BlockDev.lvm_get_vdo_index_state_str(vdo_info.index_state)
+ self.assertIn(index_str, ("online", "opening"))
+
@tag_test(TestTags.SLOW)
def test_vdo_pool_create_options(self):
# set index size to 300 MiB, disable compression and write policy to sync
diff --git a/tests/btrfs_test.py b/tests/btrfs_test.py
index 154b73c5..9fc3e6d1 100644
--- a/tests/btrfs_test.py
+++ b/tests/btrfs_test.py
@@ -379,7 +379,7 @@ def test_list_subvolumes_different_mount(self):
succ = BlockDev.btrfs_create_subvolume(TEST_MNT, "one/two", None)
self.assertTrue(succ)
- os.system("mkdir -p mkdir -p %s/one/two/one/two" % (TEST_MNT))
+ os.system("mkdir -p %s/one/two/one/two" % (TEST_MNT))
succ = BlockDev.btrfs_create_subvolume(TEST_MNT, "one/two/one/two/three", None)
self.assertTrue(succ)
diff --git a/tests/crypto_test.py b/tests/crypto_test.py
index 65e69c5b..0c9eba76 100644
--- a/tests/crypto_test.py
+++ b/tests/crypto_test.py
@@ -135,6 +135,28 @@ def setUp(self):
def test_plugin_version(self):
self.assertEqual(BlockDev.get_plugin_soname(BlockDev.Plugin.CRYPTO), "libbd_crypto.so.3")
+ @tag_test(TestTags.NOSTORAGE)
+ def test_tech_available(self):
+ """Verify that checking crypto functionality availability by technology works as expected"""
+ succ = BlockDev.crypto_is_tech_avail(BlockDev.CryptoTech.LUKS, BlockDev.CryptoTechMode.CREATE)
+ self.assertTrue(succ)
+
+ if HAVE_BITLK:
+ succ = BlockDev.crypto_is_tech_avail(BlockDev.CryptoTech.BITLK, BlockDev.CryptoTechMode.OPEN_CLOSE)
+ self.assertTrue(succ)
+
+ # create is not supported with bitlk
+ with self.assertRaises(GLib.GError):
+ BlockDev.crypto_is_tech_avail(BlockDev.CryptoTech.BITLK, BlockDev.CryptoTechMode.CREATE)
+
+ if HAVE_FVAULT2:
+ succ = BlockDev.crypto_is_tech_avail(BlockDev.CryptoTech.FVAULT2, BlockDev.CryptoTechMode.OPEN_CLOSE)
+ self.assertTrue(succ)
+
+ # create is not supported with fvault
+ with self.assertRaises(GLib.GError):
+ BlockDev.crypto_is_tech_avail(BlockDev.CryptoTech.FVAULT2, BlockDev.CryptoTechMode.CREATE)
+
@tag_test(TestTags.NOSTORAGE)
def test_generate_backup_passhprase(self):
"""Verify that backup passphrase generation works as expected"""
@@ -410,6 +432,9 @@ def test_luks_resize(self):
def test_luks2_resize(self):
"""Verify that resizing LUKS 2 device works"""
+ with self.assertRaisesRegex(GLib.GError, "Failed to initialize device"):
+ BlockDev.crypto_luks_resize("non-existing-device", 0)
+
# the simple case with password
self._luks2_format(self.loop_devs[0], PASSWD, self.keyfile, fast_pbkdf=True)
@@ -448,6 +473,9 @@ def _luks_open_close(self, create_fn):
ctx = BlockDev.CryptoKeyslotContext(passphrase=PASSWD)
BlockDev.crypto_luks_open(self.loop_devs[0], "libblockdev/TestLUKS", ctx, False)
+ with self.assertRaisesRegex(GLib.GError, r"Device name can be at most 127 characters long"):
+ ctx = BlockDev.CryptoKeyslotContext(passphrase=PASSWD)
+ BlockDev.crypto_luks_open(self.loop_devs[0], "a" * 128, ctx, False)
with self.assertRaisesRegex(GLib.GError, r"Incorrect passphrase"):
ctx = BlockDev.CryptoKeyslotContext(passphrase="wrong-passphrase")
@@ -558,12 +586,14 @@ def _remove_key(self, create_fn):
succ = BlockDev.crypto_luks_add_key(self.loop_devs[0], ctx, nctx)
self.assertTrue(succ)
- nctx2 = BlockDev.CryptoKeyslotContext(passphrase=PASSWD3)
+ nctx2 = BlockDev.CryptoKeyslotContext(keyfile=self.keyfile)
succ = BlockDev.crypto_luks_add_key(self.loop_devs[0], ctx, nctx2)
self.assertTrue(succ)
- nctx3 = BlockDev.CryptoKeyslotContext(keyfile=self.keyfile)
- succ = BlockDev.crypto_luks_add_key(self.loop_devs[0], ctx, nctx3)
+ # add key using the previously added keyfile
+ nctx3 = BlockDev.CryptoKeyslotContext(passphrase=PASSWD3)
+ succ = BlockDev.crypto_luks_add_key(self.loop_devs[0], nctx2, nctx3)
+ self.assertTrue(succ)
# remove key, wrong passphrase
with self.assertRaises(GLib.GError):
@@ -658,6 +688,22 @@ def _change_key(self, create_fn):
succ = BlockDev.crypto_luks_close("libblockdevTestLUKS")
self.assertTrue(succ)
+ # try with keyfile as the "old passphrase"
+ kctx = BlockDev.CryptoKeyslotContext(keyfile=self.keyfile)
+ succ = BlockDev.crypto_luks_change_key(self.loop_devs[0], kctx, ctx)
+ self.assertTrue(succ)
+
+ # keyfile should no longer work
+ with self.assertRaises(GLib.GError):
+ BlockDev.crypto_luks_remove_key(self.loop_devs[0], kctx)
+
+ # passphrase should work
+ succ = BlockDev.crypto_luks_open(self.loop_devs[0], "libblockdevTestLUKS", ctx)
+ self.assertTrue(succ)
+
+ succ = BlockDev.crypto_luks_close("libblockdevTestLUKS")
+ self.assertTrue(succ)
+
@tag_test(TestTags.SLOW)
def test_luks_change_key(self):
self._change_key(self._luks_format)
diff --git a/tests/fs_tests/generic_test.py b/tests/fs_tests/generic_test.py
index f5407c86..c08d03ca 100644
--- a/tests/fs_tests/generic_test.py
+++ b/tests/fs_tests/generic_test.py
@@ -313,19 +313,19 @@ def test_can_get_free_space(self):
def test_can_get_info(self):
"""Verify that tooling query works for getting info"""
- avail, util = BlockDev.fs_can_get_free_space("ext4")
+ avail, util = BlockDev.fs_can_get_info("ext4")
self.assertTrue(avail)
self.assertEqual(util, None)
old_path = os.environ.get("PATH", "")
os.environ["PATH"] = ""
- avail, util = BlockDev.fs_can_get_free_space("ext4")
+ avail, util = BlockDev.fs_can_get_info("ext4")
os.environ["PATH"] = old_path
self.assertFalse(avail)
self.assertEqual(util, "dumpe2fs")
with self.assertRaises(GLib.GError):
- BlockDev.fs_can_get_free_space("non-existing-fs")
+ BlockDev.fs_can_get_info("non-existing-fs")
def test_can_get_min_size(self):
"""Verify that tooling query works for getting min size"""
@@ -361,6 +361,25 @@ def test_can_get_min_size(self):
with self.assertRaises(GLib.GError):
BlockDev.fs_can_get_min_size("udf")
+ def test_can_set_uuid(self):
+ """Verify that tooling query works for setting"""
+ avail, util = BlockDev.fs_can_set_uuid("ext4")
+ self.assertTrue(avail)
+ self.assertEqual(util, None)
+
+ old_path = os.environ.get("PATH", "")
+ os.environ["PATH"] = ""
+ avail, util = BlockDev.fs_can_set_uuid("ext4")
+ os.environ["PATH"] = old_path
+ self.assertFalse(avail)
+ self.assertEqual(util, "tune2fs")
+
+ with self.assertRaises(GLib.GError):
+ BlockDev.fs_can_set_uuid("non-existing-fs")
+
+ with self.assertRaises(GLib.GError):
+ BlockDev.fs_can_set_uuid("f2fs")
+
class GenericMkfs(GenericTestCase):
@@ -757,22 +776,28 @@ def test_btrfs_generic_repair(self):
class GenericSetLabel(GenericTestCase):
- def _test_generic_set_label(self, mkfs_function, fstype):
+ def _test_generic_set_label(self, mkfs_function, fstype, test_label="new_label", expected_label="new_label", invalid_label=513*"a"):
# clean the device
succ = BlockDev.fs_clean(self.loop_devs[0])
succ = mkfs_function(self.loop_devs[0], None)
self.assertTrue(succ)
- succ = BlockDev.fs_check_label(fstype, "new_label")
+ succ = BlockDev.fs_check_label(fstype, test_label)
self.assertTrue(succ)
+ with self.assertRaises(GLib.GError):
+ BlockDev.fs_check_label(fstype, invalid_label)
+
# set label (expected to succeed)
- succ = BlockDev.fs_set_label(self.loop_devs[0], "new_label")
+ succ = BlockDev.fs_set_label(self.loop_devs[0], test_label)
self.assertTrue(succ)
+ fs_label = check_output(["blkid", "-ovalue", "-sLABEL", "-p", self.loop_devs[0]]).decode().strip()
+ self.assertEqual(fs_label, expected_label)
+
# set label (expected to succeed)
- succ = BlockDev.fs_set_label(self.loop_devs[0], "new_label", fstype)
+ succ = BlockDev.fs_set_label(self.loop_devs[0], test_label, fstype)
self.assertTrue(succ)
def test_ext4_generic_set_label(self):
@@ -821,6 +846,28 @@ def test_udf_generic_set_label(self):
self.skipTest("skipping udf: not available")
self._test_generic_set_label(mkfs_function=BlockDev.fs_udf_mkfs, fstype="udf")
+ def test_vfat_generic_set_label(self):
+ """Test generic set_label function with a vfat file system"""
+ if self._vfat_version < Version("4.2"):
+ self.skipTest("dosfstools >= 4.2 needed to set label")
+
+ def mkfs_vfat(device, options=None):
+ if self._vfat_version >= Version("4.2"):
+ return BlockDev.fs_vfat_mkfs(device, [BlockDev.ExtraArg.new("--mbr=n", "")])
+ else:
+ return BlockDev.fs_vfat_mkfs(device, options)
+
+ self._test_generic_set_label(mkfs_function=mkfs_vfat, fstype="vfat", expected_label="NEW_LABEL")
+
+ def test_fail_generic_check_label(self):
+ """ Test that generic check label fails correctly with unknown/unsupported filesystem """
+
+ with self.assertRaisesRegex(GLib.GError, "Checking label format for filesystem 'non-existing-fs' is not supported"):
+ BlockDev.fs_check_label("non-existing-fs", "label")
+
+ with self.assertRaisesRegex(GLib.GError, "Filesystem type must be specified to check label format"):
+ BlockDev.fs_check_label("", "label")
+
class GenericSetUUID(GenericTestCase):
def _test_generic_set_uuid(self, mkfs_function, fstype, test_uuid="4d7086c4-a4d3-432f-819e-73da03870df9", expected_uuid=None):
@@ -909,6 +956,15 @@ def test_udf_generic_set_uuid(self):
self.skipTest("skipping UDF: not available")
self._test_generic_set_uuid(mkfs_function=BlockDev.fs_udf_mkfs, fstype="udf", test_uuid="5fae9ade7938dfc8")
+ def test_fail_generic_check_uuid(self):
+ """ Test that generic check_uuid fails correctly with unknown/unsupported filesystem """
+
+ with self.assertRaisesRegex(GLib.GError, "Checking UUID format for filesystem 'non-existing-fs' is not supported"):
+ BlockDev.fs_check_uuid("non-existing-fs", "uuid")
+
+ with self.assertRaisesRegex(GLib.GError, "Filesystem type must be specified to check UUID format"):
+ BlockDev.fs_check_uuid("", "uuid")
+
class GenericResize(GenericTestCase):
log = ""
@@ -1003,25 +1059,32 @@ def mkfs_vfat(device, options=None):
self._test_generic_resize(mkfs_function=mkfs_vfat, size_delta=1024**2, fstype="vfat")
- def test_xfs_generic_resize(self):
- """Test generic resize function with an xfs file system"""
-
+ def _test_xfs_generic_resize(self, mount):
lv = self._setup_lvm(vgname="libbd_fs_tests", lvname="generic_test", lvsize="350M")
succ = BlockDev.fs_xfs_mkfs(lv, None)
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 350 * 1024**2)
# no change, nothing should happen
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ succ = BlockDev.fs_resize(lv, 0)
+ else:
succ = BlockDev.fs_resize(lv, 0)
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 350 * 1024**2)
@@ -1035,29 +1098,47 @@ def test_xfs_generic_resize(self):
self._lvresize("libbd_fs_tests", "generic_test", "380M")
# should grow to 375 MiB (full size of the LV)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ succ = BlockDev.fs_resize(lv, 0)
+ else:
succ = BlockDev.fs_resize(lv, 0)
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 380 * 1024**2)
self._lvresize("libbd_fs_tests", "generic_test", "400M")
# grow just to 390 MiB
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ succ = BlockDev.fs_resize(lv, 390 * 1024**2)
+ else:
succ = BlockDev.fs_resize(lv, 390 * 1024**2)
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 390 * 1024**2)
# should grow to 400 MiB (full size of the LV)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ succ = BlockDev.fs_resize(lv, 0, "xfs")
+ else:
succ = BlockDev.fs_resize(lv, 0, "xfs")
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 400 * 1024**2)
@@ -1066,7 +1147,10 @@ def test_xfs_generic_resize(self):
# unmounted resize (should get automounted)
succ = BlockDev.fs_resize(lv, 0, "xfs")
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 420 * 1024**2)
@@ -1082,7 +1166,10 @@ def test_xfs_generic_resize(self):
succ = BlockDev.fs_resize(lv, 0, "xfs")
self.assertTrue(succ)
- with mounted(lv, self.mount_dir):
+ if mount:
+ with mounted(lv, self.mount_dir):
+ fi = BlockDev.fs_xfs_get_info(lv)
+ else:
fi = BlockDev.fs_xfs_get_info(lv)
self.assertTrue(fi)
self.assertEqual(fi.block_size * fi.block_count, 450 * 1024**2)
@@ -1092,6 +1179,16 @@ def test_xfs_generic_resize(self):
BlockDev.utils_init_logging(None)
+ def test_xfs_generic_resize(self):
+ """Test generic resize function with an xfs file system with automatic mounting"""
+ # FS is not mounted by the test, libblockdev should mount it when needed
+ self._test_xfs_generic_resize(mount=False)
+
+ def test_xfs_generic_resize_mounted(self):
+ """Test generic resize function with an xfs file system with manual mounting"""
+ # FS is mounted by the test, libblockdev should use the existing mountpoint
+ self._test_xfs_generic_resize(mount=True)
+
def _can_resize_f2fs(self):
ret, out, _err = utils.run_command("resize.f2fs -V")
if ret != 0:
diff --git a/tests/library_test.py b/tests/library_test.py
index 86180fa3..c1296798 100644
--- a/tests/library_test.py
+++ b/tests/library_test.py
@@ -77,6 +77,30 @@ def test_require_plugins(self):
self.assertEqual(BlockDev.get_available_plugin_names(), ["swap"])
self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None))
+ # test the same with try_reinit
+ ps = BlockDev.PluginSpec(name=BlockDev.Plugin.SWAP, so_name="")
+ self.assertTrue(BlockDev.try_reinit([ps], True, None))
+ self.assertEqual(BlockDev.get_available_plugin_names(), ["swap"])
+ self.assertTrue(BlockDev.try_reinit(self.requested_plugins, True, None))
+
+ @tag_test(TestTags.CORE)
+ def test_get_plugin_name(self):
+ """Verify that getting plugin name works"""
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.BTRFS), "btrfs")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.CRYPTO), "crypto")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.DM), "dm")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.FS), "fs")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.LOOP), "loop")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.LVM), "lvm")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.MDRAID), "mdraid")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.MPATH), "mpath")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.NVDIMM), "nvdimm")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.NVME), "nvme")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.PART), "part")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.S390), "s390")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.SMART), "smart")
+ self.assertEqual(BlockDev.get_plugin_name(BlockDev.Plugin.SWAP), "swap")
+
@tag_test(TestTags.CORE)
def test_not_implemented(self):
"""Verify that unloaded/unimplemented functions report errors"""
@@ -127,6 +151,20 @@ def test_ensure_init(self):
self.assertTrue(BlockDev.ensure_init(self.requested_plugins, None))
self.assertGreaterEqual(len(BlockDev.get_available_plugin_names()), 6)
+ # check that init and try_init fail correctly when already initialized
+ succ = BlockDev.utils_init_logging(self.my_log_func)
+ self.assertTrue(succ)
+
+ succ = BlockDev.init()
+ self.assertFalse(succ)
+ self.assertIn("bd_init() called more than once", self.log)
+ self.log = ""
+
+ succ, _plugins = BlockDev.try_init()
+ self.assertFalse(succ)
+ self.assertIn("bd_try_init() called more than once", self.log)
+ self.log = ""
+
def test_non_en_init(self):
"""Verify that the library initializes with lang different from en_US"""
diff --git a/tests/loop_test.py b/tests/loop_test.py
index a6521c84..e59dba07 100644
--- a/tests/loop_test.py
+++ b/tests/loop_test.py
@@ -6,7 +6,9 @@
import gi
gi.require_version('BlockDev', '3.0')
+gi.require_version('GLib', '2.0')
from gi.repository import BlockDev
+from gi.repository import GLib
@required_plugins(("loop",))
@@ -43,12 +45,21 @@ class LoopPluginVersionCase(LoopTestCase):
def test_plugin_version(self):
self.assertEqual(BlockDev.get_plugin_soname(BlockDev.Plugin.LOOP), "libbd_loop.so.3")
+ @tag_test(TestTags.NOSTORAGE)
+ def test_tech_available(self):
+ """Verify that checking plugin availability works as expected"""
+ succ = BlockDev.loop_is_tech_avail(BlockDev.LoopTech.LOOP, 0)
+ self.assertTrue(succ)
+
class LoopTestSetupBasic(LoopTestCase):
@tag_test(TestTags.CORE)
def test_loop_setup_teardown_basic(self):
"""Verify that basic loop_setup and loop_teardown work as expected"""
+ with self.assertRaisesRegex(GLib.GError, "Failed to open the backing file"):
+ BlockDev.loop_setup("/non/existing", 10 * 1024**2)
+
succ, self.loop = BlockDev.loop_setup(self.dev_file)
self.assertTrue(succ)
self.assertTrue(self.loop)
@@ -221,6 +232,9 @@ def test_loop_get_set_autoclear(self):
self.assertIsNotNone(info)
self.assertFalse(info.autoclear)
+ with self.assertRaisesRegex(GLib.GError, "Failed to open device"):
+ BlockDev.loop_set_autoclear("/non/existing", True)
+
class LoopTestSetCapacity(LoopTestCase):
def test_loop_set_capacity(self):
@@ -240,3 +254,6 @@ def test_loop_set_capacity(self):
# now the size should be updated
self.assertEqual(self._get_loop_size(), self._loop_size * 2)
+
+ with self.assertRaisesRegex(GLib.GError, "Failed to open device"):
+ BlockDev.loop_set_capacity("/non/existing")
diff --git a/tests/lvm_dbus_tests.py b/tests/lvm_dbus_tests.py
index 89eddb14..6b637657 100644
--- a/tests/lvm_dbus_tests.py
+++ b/tests/lvm_dbus_tests.py
@@ -3,7 +3,7 @@
import _lvm_cases
-from utils import run_command, TestTags, tag_test, required_plugins
+from utils import run_command, TestTags, tag_test, required_plugins, fake_path
import gi
gi.require_version('GLib', '2.0')
@@ -69,6 +69,17 @@ def test_tech_available(self):
with self.assertRaisesRegex(GLib.GError, "Only 'query' supported for thin calculations"):
BlockDev.lvm_is_tech_avail(BlockDev.LVMTech.THIN_CALCS, BlockDev.LVMTechMode.CREATE)
+ with fake_path(all_but=("lvmconfig", "lvmdevices")):
+ self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None))
+
+ # no lvmconfig tool available, should fail
+ with self.assertRaises(GLib.GError):
+ BlockDev.lvm_is_tech_avail(BlockDev.LVMTech.CONFIG, 0)
+
+ # no lvmdevices tool available, should fail
+ with self.assertRaises(GLib.GError):
+ BlockDev.lvm_is_tech_avail(BlockDev.LVMTech.DEVICES, 0)
+
class LvmVDOTest(_lvm_cases.LvmVDOTest, LvmDBusTestCase):
@classmethod
@@ -88,6 +99,13 @@ def setUpClass(cls):
LvmDBusTestCase.setUpClass()
+class LvmTestPVmove(_lvm_cases.LvmTestPVmove, LvmDBusTestCase):
+ @classmethod
+ def setUpClass(cls):
+ _lvm_cases.LvmTestPVmove.setUpClass()
+ LvmDBusTestCase.setUpClass()
+
+
class LvmTestVGs(_lvm_cases.LvmTestVGs, LvmDBusTestCase):
@classmethod
def setUpClass(cls):
diff --git a/tests/lvm_test.py b/tests/lvm_test.py
index 56aba958..bce1ffd5 100644
--- a/tests/lvm_test.py
+++ b/tests/lvm_test.py
@@ -57,6 +57,17 @@ def test_tech_available(self):
avail = BlockDev.lvm_is_tech_avail(BlockDev.LVMTech.BASIC, BlockDev.LVMTechMode.CREATE)
self.assertTrue(avail)
+ with fake_path(all_but=("lvmconfig", "lvmdevices")):
+ self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None))
+
+ # no lvmconfig tool available, should fail
+ with self.assertRaises(GLib.GError):
+ BlockDev.lvm_is_tech_avail(BlockDev.LVMTech.CONFIG, 0)
+
+ # no lvmdevices tool available, should fail
+ with self.assertRaises(GLib.GError):
+ BlockDev.lvm_is_tech_avail(BlockDev.LVMTech.DEVICES, 0)
+
class LvmVDOTest(_lvm_cases.LvmVDOTest, LvmTestCase):
@classmethod
@@ -72,6 +83,13 @@ def setUpClass(cls):
LvmTestCase.setUpClass()
+class LvmTestPVmove(_lvm_cases.LvmTestPVmove, LvmTestCase):
+ @classmethod
+ def setUpClass(cls):
+ _lvm_cases.LvmTestPVmove.setUpClass()
+ LvmTestCase.setUpClass()
+
+
class LvmTestVGs(_lvm_cases.LvmTestVGs, LvmTestCase):
@classmethod
def setUpClass(cls):
diff --git a/tests/mpath_test.py b/tests/mpath_test.py
index 6c87f577..b9db34b4 100644
--- a/tests/mpath_test.py
+++ b/tests/mpath_test.py
@@ -3,7 +3,7 @@
import overrides_hack
import shutil
-from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, get_version, TestTags, tag_test, required_plugins
+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, TestTags, tag_test, required_plugins, run_command
import gi
gi.require_version('GLib', '2.0')
@@ -48,6 +48,18 @@ def test_is_mpath_member(self):
# device and no error is reported
self.assertFalse(BlockDev.mpath_is_mpath_member("/dev/loop0"))
+ def test_flush_mpaths(self):
+ """ Verify that mpath_flush_mpaths can be called """
+ # we have no multipath devices, but flush should still work (and do nothing)
+
+ ret, _out, _err = run_command("modprobe dm-multipath")
+ if ret != 0:
+ self.skipTest("cannot load dm-multipath, skipping")
+
+ succ = BlockDev.mpath_flush_mpaths()
+ self.assertTrue(succ)
+
+
class MpathNoDevTestCase(MpathTest):
@tag_test(TestTags.NOSTORAGE)
def test_plugin_version(self):
diff --git a/tests/part_test.py b/tests/part_test.py
index f57da91c..2bea2e05 100644
--- a/tests/part_test.py
+++ b/tests/part_test.py
@@ -72,6 +72,15 @@ class PartNoDevTestCase(PartTest):
def test_plugin_version(self):
self.assertEqual(BlockDev.get_plugin_soname(BlockDev.Plugin.PART), "libbd_part.so.3")
+ @tag_test(TestTags.NOSTORAGE)
+ def test_tech_available(self):
+ """Verify that checking plugin availability works as expected"""
+ succ = BlockDev.part_is_tech_avail(BlockDev.PartTech.MBR, 0)
+ self.assertTrue(succ)
+
+ succ = BlockDev.part_is_tech_avail(BlockDev.PartTech.GPT, 0)
+ self.assertTrue(succ)
+
@tag_test(TestTags.NOSTORAGE)
def test_part_type_str(self):
types = {BlockDev.PartType.NORMAL: 'primary', BlockDev.PartType.LOGICAL: 'logical',
@@ -81,6 +90,13 @@ def test_part_type_str(self):
for key, value in types.items():
self.assertEqual(BlockDev.part_get_type_str(key), value)
+ @tag_test(TestTags.NOSTORAGE)
+ def test_part_table_type_str(self):
+ types = {BlockDev.PartTableType.MSDOS: 'dos', BlockDev.PartTableType.GPT: 'gpt'}
+
+ for key, value in types.items():
+ self.assertEqual(BlockDev.part_get_part_table_type_str(key), value)
+
class PartCreateTableCase(PartTestCase):
_num_devices = 2
diff --git a/tests/skip.yml b/tests/skip.yml
index a7d16f16..a38ce16b 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -47,7 +47,8 @@
- test: lvm_dbus_tests.LvmTestPartialLVs
skip_on:
- - reason: "LVM DBus doesn't support LV repair yet"
+ - distro: ["centos", "debian"]
+ reason: "LVM DBus doesn't support LV repair yet"
- test: nvdimm_test
skip_on:
diff --git a/tests/smart_test.py b/tests/smart_test.py
index 13a0f931..f8a7346a 100644
--- a/tests/smart_test.py
+++ b/tests/smart_test.py
@@ -61,6 +61,16 @@ def _clean_loop(self):
def test_plugin_version(self):
self.assertEqual(BlockDev.get_plugin_soname(BlockDev.Plugin.SMART), "libbd_smart.so.3")
+ @tag_test(TestTags.NOSTORAGE)
+ def test_tech_available(self):
+ """Verify that checking availability works as expected"""
+ succ = BlockDev.smart_is_tech_avail(BlockDev.SmartTech.ATA, 0)
+ self.assertTrue(succ)
+
+ # SCSI not available with libatasmart
+ with self.assertRaises(GLib.GError):
+ BlockDev.smart_is_tech_avail(BlockDev.SmartTech.SCSI, 0)
+
@tag_test(TestTags.CORE)
def test_ata_info(self):
"""Test SMART ATA info on LIO, loop and scsi_debug devices"""
diff --git a/tests/smartmontools_test.py b/tests/smartmontools_test.py
index 91c370c9..dbe8f905 100644
--- a/tests/smartmontools_test.py
+++ b/tests/smartmontools_test.py
@@ -3,7 +3,7 @@
import shutil
import overrides_hack
-from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, TestTags, tag_test, required_plugins, setup_scsi_debug, clean_scsi_debug
+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, TestTags, tag_test, required_plugins, setup_scsi_debug, clean_scsi_debug
import gi
gi.require_version('GLib', '2.0')
@@ -61,6 +61,21 @@ def _clean_loop(self):
def test_plugin_version(self):
self.assertEqual(BlockDev.get_plugin_soname(BlockDev.Plugin.SMART), "libbd_smartmontools.so.3")
+ @tag_test(TestTags.NOSTORAGE)
+ def test_tech_available(self):
+ """Verify that checking availability works as expected"""
+ succ = BlockDev.smart_is_tech_avail(BlockDev.SmartTech.ATA, 0)
+ self.assertTrue(succ)
+
+ succ = BlockDev.smart_is_tech_avail(BlockDev.SmartTech.SCSI, 0)
+ self.assertTrue(succ)
+
+ with fake_path(all_but="smartctl"):
+ self.assertTrue(BlockDev.reinit([self.ps, self.ps2], True, None))
+
+ with self.assertRaises(GLib.GError):
+ BlockDev.smart_is_tech_avail(BlockDev.SmartTech.ATA, 0)
+
@tag_test(TestTags.CORE)
def test_ata_info(self):
"""Test SMART ATA info on LIO, loop and scsi_debug devices"""
diff --git a/tests/utils_test.py b/tests/utils_test.py
index 11d0c182..12f4abc5 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -1,6 +1,7 @@
import unittest
import re
import os
+import glob
import overrides_hack
from utils import fake_utils, create_sparse_tempfile, create_lio_device, delete_lio_device, run_command, TestTags, tag_test, read_file
@@ -47,6 +48,12 @@ def test_initialization(self):
succ = BlockDev.utils_prog_reporting_initialized()
self.assertFalse(succ)
+ succ = BlockDev.utils_init_prog_reporting_thread(self.my_progress_func)
+ self.assertTrue(succ)
+
+ succ = BlockDev.utils_mute_prog_reporting_thread()
+ self.assertTrue(succ)
+
class UtilsExecLoggingTest(UtilsTestCase):
log = ""
@@ -81,6 +88,9 @@ def test_logging(self):
succ = BlockDev.utils_exec_and_capture_output_no_progress(["true"])
self.assertTrue(succ)
+ succ = BlockDev.utils_exec_and_report_error_no_progress(["true"])
+ self.assertTrue(succ)
+
with self.assertRaisesRegex(GLib.GError, r"Process reported exit code 1"):
succ = BlockDev.utils_exec_and_report_error(["/bin/false"])
@@ -481,7 +491,7 @@ def test_get_device_symlinks(self):
class UtilsLinuxKernelVersionTest(UtilsTestCase):
@tag_test(TestTags.NOSTORAGE, TestTags.CORE)
- def test_initialization(self):
+ def test_kernel_version(self):
""" Test Linux kernel version detection"""
ver = BlockDev.utils_get_linux_version()
@@ -507,6 +517,9 @@ class UtilsKernelModuleTest(UtilsTestCase):
def test_have_kernel_module(self):
""" Test checking for kernel modules """
+ with self.assertRaises(GLib.GError):
+ BlockDev.utils_have_kernel_module("invalid name]")
+
have = BlockDev.utils_have_kernel_module("definitely-not-existing-kernel-module")
self.assertFalse(have)
@@ -522,3 +535,42 @@ def test_have_kernel_module(self):
self.assertTrue(have_fs)
else:
self.assertFalse(have_fs)
+
+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE)
+ def test_load_unload_kernel_module(self):
+ """ Test loading and unloading kernel module """
+ with self.assertRaises(GLib.GError):
+ BlockDev.utils_load_kernel_module("definitely-not-existing-kernel-module")
+ with self.assertRaises(GLib.GError):
+ BlockDev.utils_unload_kernel_module("definitely-not-existing-kernel-module")
+
+ if BlockDev.utils_have_kernel_module("loop") and not glob.glob('/dev/loop[0-9]*'):
+ # we have the loop module and no loop device exists so loading and unloading
+ # the module should be OK, right?
+ succ = BlockDev.utils_load_kernel_module("loop")
+ self.assertTrue(succ)
+
+ succ = BlockDev.utils_unload_kernel_module("loop")
+ self.assertTrue(succ)
+
+
+class UtilsLoggingTest(UtilsTestCase):
+ log = ""
+ handler = 0
+
+ def tearDown(self):
+ if self.handler:
+ GLib.log_remove_handler("", self.handler)
+
+ def log_func(self, domain, level, msg, usrptr):
+ self.log += msg
+
+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE)
+ def test_log_stdout(self):
+ """ Test that logging to stdout works as expected """
+ # we are using GLib g_info, g_warning, etc. functions so we can use GLib logging to
+ # redirect the message here
+ self.handler = GLib.log_set_handler("", GLib.LogLevelFlags.LEVEL_WARNING, self.log_func, None)
+
+ BlockDev.utils_log_stdout(BlockDev.UTILS_LOG_WARNING, "warning")
+ self.assertEqual(self.log, "warning")