Improve vitastor-cli ls - show I/O statistics, allow to sort & limit output

nbd-vmsplice
Vitaliy Filippov 2021-11-11 01:30:29 +03:00
parent 0f3f0a9d29
commit e4efa2c08a
3 changed files with 299 additions and 163 deletions

2
json11

@ -1 +1 @@
Subproject commit 3a5b4477bc011e3224a3b0f9e2883acc78eec14c
Subproject commit 34781ed7ee0b70059eb1c97e2660b7121ae6d1de

View File

@ -36,10 +36,6 @@ json11::Json::object cli_tool_t::parse_args(int narg, const char *args[])
{
cfg["count"] = args[++i];
}
else if (args[i][0] == '-' && args[i][1] == 'i')
{
cfg["interactive"] = "1";
}
else if (args[i][0] == '-' && args[i][1] == 'p')
{
cfg["pool"] = args[++i];
@ -48,11 +44,16 @@ json11::Json::object cli_tool_t::parse_args(int narg, const char *args[])
{
cfg["size"] = args[++i];
}
else if (args[i][0] == '-' && args[i][1] == 'r')
{
cfg["reverse"] = "1";
}
else if (args[i][0] == '-' && args[i][1] == '-')
{
const char *opt = args[i]+2;
cfg[opt] = i == narg-1 || !strcmp(opt, "json") || !strcmp(opt, "wait-list") ||
!strcmp(opt, "long") || !strcmp(opt, "writers-stopped") && strcmp("1", args[i+1]) != 0
!strcmp(opt, "long") || !strcmp(opt, "del") ||
!strcmp(opt, "writers-stopped") && strcmp("1", args[i+1]) != 0
? "1" : args[++i];
}
else
@ -79,46 +80,48 @@ void cli_tool_t::help()
"(c) Vitaliy Filippov, 2019+ (VNPL-1.1)\n"
"\n"
"USAGE:\n"
"%s ls [-l] [--pool|-p <id|name>]\n"
" List existing images from a specified pool or from all pools if not specified.\n"
" Also report allocated size if -l is specified.\n"
"%s ls [-l] [--del] [-p <id|name>] [-w]\n"
" List images.\n"
" -p|--pool POOL Filter images by pool ID or name\n"
" -l|--long Also report allocated size and I/O statistics\n"
" --del Also include delete operation statistics\n"
" --sort <field> Sort by specified field (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)\n"
" -r|--reverse Sort in descending order\n"
" --top <n> Only list top <n> items\n"
"\n"
"%s create -s|--size <size> [--pool <id|name>] [--parent <parent_name>[@<snapshot>]] <name>\n"
"%s create -s|--size <size> [-p|--pool <id|name>] [--parent <parent_name>[@<snapshot>]] <name>\n"
" Create an image. You may use K/M/G/T suffixes for <size>. If --parent is specified,\n"
" a copy-on-write image clone is created. Parent must be a snapshot (readonly image).\n"
" Pool must be specified if there is more than one pool.\n"
"\n"
"%s create --snapshot <snapshot> [--pool <id|name>] <image>\n"
"%s snap-create [--pool <id|name>] <image>@<snapshot>\n"
"%s create --snapshot <snapshot> [-p|--pool <id|name>] <image>\n"
"%s snap-create [-p|--pool <id|name>] <image>@<snapshot>\n"
" Create a snapshot of image <name>. May be used live if only a single writer is active.\n"
"\n"
"%s set <name> [-s|--size <size>] [--readonly | --readwrite]\n"
" Resize image or change its readonly status. Images with children can't be made read-write.\n"
"\n"
"%s top [-n <MAX_COUNT>] [-i]\n"
" Disable image list sorted by I/O load, interactive if -i specified.\n"
"\n"
"%s rm [OPTIONS] <from> [<to>] [--writers-stopped]\n"
"%s rm <from> [<to>] [--writers-stopped]\n"
" Remove <from> or all layers between <from> and <to> (<to> must be a child of <from>),\n"
" rebasing all their children accordingly. --writers-stopped allows merging to be a bit\n"
" more effective in case of a single 'slim' read-write child and 'fat' removed parent:\n"
" the child is merged into parent and parent is renamed to child in that case.\n"
" In other cases parent layers are always merged into children.\n"
"\n"
"%s flatten [OPTIONS] <layer>\n"
"%s flatten <layer>\n"
" Flatten a layer, i.e. merge data and detach it from parents.\n"
"\n"
"%s rm-data [OPTIONS] --pool <pool> --inode <inode> [--wait-list]\n"
"%s rm-data --pool <pool> --inode <inode> [--wait-list]\n"
" Remove inode data without changing metadata.\n"
" --wait-list means first retrieve objects listings and then remove it.\n"
" --wait-list requires more memory, but allows to show correct removal progress.\n"
"\n"
"%s merge-data [OPTIONS] <from> <to> [--target <target>]\n"
"%s merge-data <from> <to> [--target <target>]\n"
" Merge layer data without changing metadata. Merge <from>..<to> to <target>.\n"
" <to> must be a child of <from> and <target> may be one of the layers between\n"
" <from> and <to>, including <from> and <to>.\n"
"\n"
"OPTIONS (global):\n"
"GLOBAL OPTIONS:\n"
" --etcd_address <etcd_address>\n"
" --iodepth N Send N operations in parallel to each OSD when possible (default 32)\n"
" --parallel_osds M Work with M osds in parallel when possible (default 4)\n"
@ -126,7 +129,7 @@ void cli_tool_t::help()
" --cas 1|0 Use online CAS writes when possible (default auto)\n"
" --json JSON output\n"
,
exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name
exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name
);
exit(0);
}

View File

@ -1,14 +1,21 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <algorithm>
#include "cli.h"
#include "cluster_client.h"
#include "base64.h"
std::string print_table(json11::Json items, json11::Json header);
#define MIN(a, b) ((a) < (b) ? (b) : (a))
std::string print_table(json11::Json items, json11::Json header, bool use_esc);
std::string format_size(uint64_t size);
std::string format_lat(uint64_t lat);
std::string format_q(double depth);
// List existing images
//
// Again, you can just look into etcd, but this console tool incapsulates it
@ -18,7 +25,10 @@ struct image_lister_t
pool_id_t list_pool_id = 0;
std::string list_pool_name;
bool detailed = false;
std::string sort_field;
int sort_dir = 1;
int max_count = 0;
bool show_stats = false, show_delete = false;
int state = 0;
std::map<inode_t, json11::Json::object> stats;
@ -71,143 +81,167 @@ struct image_lister_t
item["parent_pool_id"] = (uint64_t)INODE_POOL(ic.second.parent_id);
item["parent_inode_num"] = INODE_NO_POOL(ic.second.parent_id);
}
if (!parent->json_output)
{
item["used_size_fmt"] = format_size(0);
item["size_fmt"] = format_size(ic.second.size);
item["ro"] = ic.second.readonly ? "RO" : "-";
}
stats[ic.second.num] = item;
}
}
void get_stats()
{
if (state == 1)
goto resume_1;
// Space statistics
// inode/stats/<pool>/<inode>::raw_used divided by pool/stats/<pool>::pg_real_size
// multiplied by 1 or number of data drives
parent->waiting++;
parent->cli->st_cli.etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"0"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/inode/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/inode/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"0"
) },
} },
},
} },
}, ETCD_SLOW_TIMEOUT, [this](std::string err, json11::Json res)
{
parent->waiting--;
if (err != "")
{
fprintf(stderr, "Error reading from etcd: %s\n", err.c_str());
exit(1);
}
space_info = res;
});
state = 1;
resume_1:
if (parent->waiting > 0)
return;
std::map<pool_id_t, uint64_t> pool_pg_real_size;
for (auto & kv_item: space_info["responses"][0]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID
pool_id_t pool_id;
char null_byte = 0;
sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/pool/stats/%u%c", &pool_id, &null_byte);
if (!pool_id || pool_id >= POOL_ID_MAX || null_byte != 0)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
// pg_real_size
pool_pg_real_size[pool_id] = kv.value["pg_real_size"].uint64_value();
}
for (auto & kv_item: space_info["responses"][1]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID & inode number
pool_id_t pool_id;
inode_t only_inode_num;
char null_byte = 0;
sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
"/inode/stats/%u/%lu%c", &pool_id, &only_inode_num, &null_byte);
if (!pool_id || pool_id >= POOL_ID_MAX || INODE_POOL(only_inode_num) != 0 || null_byte != 0)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
inode_t inode_num = INODE_WITH_POOL(pool_id, only_inode_num);
uint64_t used_size = kv.value["raw_used"].uint64_value();
// save stats
auto pool_it = parent->cli->st_cli.pool_config.find(pool_id);
if (pool_it != parent->cli->st_cli.pool_config.end())
{
auto & pool_cfg = pool_it->second;
used_size = used_size / pool_pg_real_size[pool_id]
* (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
}
auto stat_it = stats.find(inode_num);
if (stat_it == stats.end())
{
stats[inode_num] = json11::Json::object {
{ "name", "Pool:"+std::to_string(pool_id)+",ID:"+std::to_string(only_inode_num) },
{ "size", 0 },
{ "readonly", false },
{ "pool_id", (uint64_t)INODE_POOL(inode_num) },
{ "pool_name", pool_it == parent->cli->st_cli.pool_config.end()
? (pool_it->second.name == "" ? "<Unnamed>" : pool_it->second.name) : "?" },
{ "inode_num", INODE_NO_POOL(inode_num) },
};
stat_it = stats.find(inode_num);
}
stat_it->second["used_size"] = used_size;
stat_it->second["read_iops"] = kv.value["read"]["iops"];
stat_it->second["read_bps"] = kv.value["read"]["bps"];
stat_it->second["read_lat"] = kv.value["read"]["lat"];
stat_it->second["read_queue"] = kv.value["read"]["iops"].number_value() * kv.value["read"]["lat"].number_value() / 1000000;
stat_it->second["write_iops"] = kv.value["write"]["iops"];
stat_it->second["write_bps"] = kv.value["write"]["bps"];
stat_it->second["write_lat"] = kv.value["write"]["lat"];
stat_it->second["write_queue"] = kv.value["write"]["iops"].number_value() * kv.value["write"]["lat"].number_value() / 1000000;
stat_it->second["delete_iops"] = kv.value["delete"]["iops"];
stat_it->second["delete_bps"] = kv.value["delete"]["bps"];
stat_it->second["delete_lat"] = kv.value["delete"]["lat"];
stat_it->second["delete_queue"] = kv.value["delete"]["iops"].number_value() * kv.value["delete"]["lat"].number_value() / 1000000;
}
}
json11::Json::array to_list()
{
json11::Json::array list;
for (auto & kv: stats)
{
list.push_back(kv.second);
}
std::sort(list.begin(), list.end(), [this](json11::Json a, json11::Json b)
{
if (a[sort_field] < b[sort_field])
return -1 * sort_dir;
if (a[sort_field] > b[sort_field])
return 1 * sort_dir;
return 0;
});
if (max_count > 0 && list.size() > max_count)
{
list.resize(max_count);
}
return list;
}
void loop()
{
if (state == 1)
goto resume_1;
get_list();
if (detailed)
if (show_stats)
{
// Space statistics
// inode/stats/<pool>/<inode>::raw_used divided by pool/stats/<pool>::pg_real_size
// multiplied by 1 or number of data drives
parent->waiting++;
parent->cli->st_cli.etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"0"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/inode/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/inode/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"0"
) },
} },
},
} },
}, ETCD_SLOW_TIMEOUT, [this](std::string err, json11::Json res)
{
parent->waiting--;
if (err != "")
{
fprintf(stderr, "Error reading from etcd: %s\n", err.c_str());
exit(1);
}
space_info = res;
});
state = 1;
resume_1:
get_stats();
if (parent->waiting > 0)
return;
std::map<pool_id_t, uint64_t> pool_pg_real_size;
for (auto & kv_item: space_info["responses"][0]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID
pool_id_t pool_id;
char null_byte = 0;
sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/pool/stats/%u%c", &pool_id, &null_byte);
if (!pool_id || pool_id >= POOL_ID_MAX || null_byte != 0)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
// pg_real_size
pool_pg_real_size[pool_id] = kv.value["pg_real_size"].uint64_value();
}
for (auto & kv_item: space_info["responses"][1]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID & inode number
pool_id_t pool_id;
inode_t only_inode_num;
char null_byte = 0;
sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
"/inode/stats/%u/%lu%c", &pool_id, &only_inode_num, &null_byte);
if (!pool_id || pool_id >= POOL_ID_MAX || INODE_POOL(only_inode_num) != 0 || null_byte != 0)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
inode_t inode_num = INODE_WITH_POOL(pool_id, inode_num);
uint64_t used_size = kv.value["raw_used"].uint64_value();
// save stats
auto pool_it = parent->cli->st_cli.pool_config.find(pool_id);
if (pool_it == parent->cli->st_cli.pool_config.end())
{
auto & pool_cfg = pool_it->second;
used_size = used_size / pool_pg_real_size[pool_id]
* (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
}
auto stat_it = stats.find(inode_num);
if (stat_it == stats.end())
{
stats[inode_num] = json11::Json::object {
{ "name", "UnknownID:"+std::to_string(pool_id)+"/"+std::to_string(only_inode_num)+")" },
{ "size", 0 },
{ "readonly", false },
{ "pool_id", (uint64_t)INODE_POOL(inode_num) },
{ "pool_name", pool_it == parent->cli->st_cli.pool_config.end()
? (pool_it->second.name == "" ? "<Unnamed>" : pool_it->second.name) : "?" },
{ "inode_num", INODE_NO_POOL(inode_num) },
};
if (!parent->json_output)
{
stats[inode_num]["size_fmt"] = format_size(0);
stats[inode_num]["ro"] = "-";
}
stat_it = stats.find(inode_num);
}
stat_it->second["used_size"] = used_size;
if (!parent->json_output)
{
stat_it->second["used_size_fmt"] = format_size(used_size);
}
}
}
json11::Json::array list;
for (auto & kv: stats)
{
list.push_back(kv.second);
}
if (parent->json_output)
{
// JSON output
printf("%s\n", json11::Json(list).dump().c_str());
printf("%s\n", json11::Json(to_list()).dump().c_str());
state = 100;
return;
}
@ -217,26 +251,74 @@ resume_1:
{ "key", "name" },
{ "title", "NAME" },
});
if (!list_pool_id && parent->cli->st_cli.pool_config.size() > 1)
if (!list_pool_id)
{
cols.push_back(json11::Json::object{
{ "key", "used_size_fmt" },
{ "title", "USED" },
{ "right", true },
{ "key", "pool_name" },
{ "title", "POOL" },
});
}
cols.push_back(json11::Json::object{
{ "key", "size_fmt" },
{ "title", "SIZE" },
{ "right", true },
});
if (detailed)
if (show_stats)
{
cols.push_back(json11::Json::object{
{ "key", "used_size_fmt" },
{ "title", "USED" },
{ "right", true },
});
cols.push_back(json11::Json::object{
{ "key", "read_bw" },
{ "title", "READ" },
});
cols.push_back(json11::Json::object{
{ "key", "read_iops" },
{ "title", "IOPS" },
});
cols.push_back(json11::Json::object{
{ "key", "read_q" },
{ "title", "QUEUE" },
});
cols.push_back(json11::Json::object{
{ "key", "read_lat_f" },
{ "title", "LAT" },
});
cols.push_back(json11::Json::object{
{ "key", "write_bw" },
{ "title", "WRITE" },
});
cols.push_back(json11::Json::object{
{ "key", "write_iops" },
{ "title", "IOPS" },
});
cols.push_back(json11::Json::object{
{ "key", "write_q" },
{ "title", "QUEUE" },
});
cols.push_back(json11::Json::object{
{ "key", "write_lat_f" },
{ "title", "LAT" },
});
if (show_delete)
{
cols.push_back(json11::Json::object{
{ "key", "delete_bw" },
{ "title", "DEL" },
});
cols.push_back(json11::Json::object{
{ "key", "delete_iops" },
{ "title", "IOPS" },
});
cols.push_back(json11::Json::object{
{ "key", "delete_q" },
{ "title", "QUEUE" },
});
cols.push_back(json11::Json::object{
{ "key", "delete_lat_f" },
{ "title", "LAT" },
});
}
}
cols.push_back(json11::Json::object{
{ "key", "ro" },
@ -247,12 +329,31 @@ resume_1:
{ "key", "parent_name" },
{ "title", "PARENT" },
});
printf("%s", print_table(list, cols).c_str());
json11::Json::array list;
for (auto & kv: stats)
{
if (show_stats)
{
kv.second["used_size_fmt"] = format_size(kv.second["used_size"].uint64_value());
kv.second["read_bw"] = format_size(kv.second["read_bps"].uint64_value())+"/s";
kv.second["write_bw"] = format_size(kv.second["write_bps"].uint64_value())+"/s";
kv.second["delete_bw"] = format_size(kv.second["delete_bps"].uint64_value())+"/s";
kv.second["read_lat_f"] = format_lat(kv.second["read_lat"].uint64_value());
kv.second["write_lat_f"] = format_lat(kv.second["write_lat"].uint64_value());
kv.second["delete_lat_f"] = format_lat(kv.second["delete_lat"].uint64_value());
kv.second["read_q"] = format_q(kv.second["read_queue"].number_value());
kv.second["write_q"] = format_q(kv.second["write_queue"].number_value());
kv.second["delete_q"] = format_q(kv.second["delete_queue"].number_value());
}
kv.second["size_fmt"] = format_size(kv.second["size"].uint64_value());
kv.second["ro"] = kv.second["readonly"].bool_value() ? "RO" : "-";
}
printf("%s", print_table(to_list(), cols, true).c_str());
state = 100;
}
};
std::string print_table(json11::Json items, json11::Json header)
std::string print_table(json11::Json items, json11::Json header, bool use_esc)
{
std::vector<int> sizes;
for (int i = 0; i < header.array_items().size(); i++)
@ -263,11 +364,11 @@ std::string print_table(json11::Json items, json11::Json header)
{
for (int i = 0; i < header.array_items().size(); i++)
{
int l = item[header[i]["key"].string_value()].string_value().length();
int l = item[header[i]["key"].string_value()].as_string().length();
sizes[i] = sizes[i] < l ? l : sizes[i];
}
}
std::string str = "";
std::string str = use_esc ? "\033[1m" : "";
for (int i = 0; i < header.array_items().size(); i++)
{
if (i > 0)
@ -291,7 +392,8 @@ std::string print_table(json11::Json items, json11::Json header)
str += ' ';
}
}
str += "\n";
if (use_esc)
str += "\033[0m\n";
for (auto & item: items.array_items())
{
for (int i = 0; i < header.array_items().size(); i++)
@ -301,18 +403,18 @@ std::string print_table(json11::Json items, json11::Json header)
// Separator
str += " ";
}
int pad = sizes[i] - item[header[i]["key"].string_value()].string_value().length();
int pad = sizes[i] - item[header[i]["key"].string_value()].as_string().length();
if (header[i]["right"].bool_value())
{
// Align right
for (int j = 0; j < pad; j++)
str += ' ';
str += item[header[i]["key"].string_value()].string_value();
str += item[header[i]["key"].string_value()].as_string();
}
else
{
// Align left
str += item[header[i]["key"].string_value()].string_value();
str += item[header[i]["key"].string_value()].as_string();
for (int j = 0; j < pad; j++)
str += ' ';
}
@ -322,8 +424,8 @@ std::string print_table(json11::Json items, json11::Json header)
return str;
}
static uint64_t size_thresh[] = { 1024l*1024*1024*1024, 1024l*1024*1024, 1024l*1024, 1024 };
static const char *size_unit = "TGMK";
static uint64_t size_thresh[] = { 1024l*1024*1024*1024, 1024l*1024*1024, 1024l*1024, 1024, 0 };
static const char *size_unit = "TGMKB";
std::string format_size(uint64_t size)
{
@ -332,7 +434,7 @@ std::string format_size(uint64_t size)
{
if (size >= size_thresh[i] || i >= sizeof(size_thresh)/sizeof(size_thresh[0])-1)
{
double value = (double)size/size_thresh[i];
double value = size_thresh[i] ? (double)size/size_thresh[i] : size;
int l = snprintf(buf, sizeof(buf), "%.1f", value);
assert(l < sizeof(buf)-2);
if (buf[l-1] == '0')
@ -346,14 +448,45 @@ std::string format_size(uint64_t size)
return std::string(buf);
}
std::string format_lat(uint64_t lat)
{
char buf[256];
int l = 0;
if (lat < 100)
l = snprintf(buf, sizeof(buf), "%lu us", lat);
else if (lat < 500000)
l = snprintf(buf, sizeof(buf), "%.2f ms", (double)lat/1000);
else
l = snprintf(buf, sizeof(buf), "%.2f s", (double)lat/1000000);
assert(l < sizeof(buf));
return std::string(buf);
}
std::string format_q(double depth)
{
char buf[256];
int l = snprintf(buf, sizeof(buf), "%.2f", depth);
assert(l < sizeof(buf));
if (buf[l-1] == '0')
l--;
if (buf[l-1] == '0')
l -= 2;
buf[l] = 0;
return std::string(buf);
}
std::function<bool(void)> cli_tool_t::start_ls(json11::Json cfg)
{
json11::Json::array cmd = cfg["command"].array_items();
auto lister = new image_lister_t();
lister->parent = this;
lister->list_pool_id = cfg["pool"].uint64_value();
lister->list_pool_name = lister->list_pool_id ? "" : cfg["pool"].string_value();
lister->detailed = cfg["long"].bool_value();
lister->list_pool_name = lister->list_pool_id ? "" : cfg["pool"].as_string();
lister->show_stats = cfg["long"].bool_value();
lister->show_delete = cfg["del"].bool_value();
lister->sort_field = cfg["sort"].string_value();
lister->sort_dir = cfg["reverse"].bool_value() ? -1 : 1;
lister->max_count = cfg["top"].uint64_value();
return [lister]()
{
lister->loop();