2 Commits
0.4.4 ... 0.5.0

Author SHA1 Message Date
a4d8de7cc6 Changing CSV argument to --csv or -o
All checks were successful
automlst.cli/pipeline/head This commit looks good
automlst.cli/pipeline/tag This commit looks good
2025-02-19 19:57:15 +00:00
5ef5b6ac08 Updated pyproject.toml to use license text and updated repo
All checks were successful
automlst.cli/pipeline/head This commit looks good
2025-02-19 16:26:59 +00:00
2 changed files with 14 additions and 10 deletions

View File

@@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
name = "autoBIGS.cli" name = "autoBIGS.cli"
dynamic = ["version"] dynamic = ["version"]
readme = "README.md" readme = "README.md"
license = {file = "LICENSE"} license = {text = "GPL-3.0-or-later"}
dependencies = [ dependencies = [
"autoBIGS-engine==0.12.*" "autoBIGS-engine==0.12.*"
] ]
@@ -14,8 +14,8 @@ requires-python = ">=3.12"
description = "A CLI tool to rapidly fetch fetch MLST profiles given sequences for various diseases." description = "A CLI tool to rapidly fetch fetch MLST profiles given sequences for various diseases."
[project.urls] [project.urls]
Repository = "https://github.com/RealYHD/autoBIGS.cli" Repository = "https://github.com/Syph-and-VPD-Lab/autoBIGS.cli"
Issues = "https://github.com/RealYHD/autoBIGS.cli/issues" Issues = "https://github.com/Syph-and-VPD-Lab/autoBIGS.cli/issues"
[project.scripts] [project.scripts]

View File

@@ -27,7 +27,7 @@ def setup_parser(parser: ArgumentParser):
) )
parser.add_argument( parser.add_argument(
"--csv-prefix", "-o", "--csv", "-o",
dest="csv_output", dest="csv_output",
required=False, required=False,
default=None, default=None,
@@ -39,18 +39,21 @@ def setup_parser(parser: ArgumentParser):
async def run(args: Namespace): async def run(args: Namespace):
async with BIGSdbIndex() as bigsdb_index: async with BIGSdbIndex() as bigsdb_index:
if args.list_dbs and len(args.list_bigsdb_schemas) > 0:
print("Cannot specify both database listing and schema listing, please choose one!")
exit(1)
if args.list_dbs: if args.list_dbs:
known_seqdef_dbs = await bigsdb_index.get_known_seqdef_dbs(force=False) known_seqdef_dbs = await bigsdb_index.get_known_seqdef_dbs(force=False)
sorted_seqdef_dbs = [(name, source) for name, source in sorted(known_seqdef_dbs.items())] sorted_seqdef_dbs = [(name, source) for name, source in sorted(known_seqdef_dbs.items())]
print("The following are all known BIGS database names, and their source (sorted alphabetically):") print("The following are all known BIGS database names, and their source (sorted alphabetically):")
print("\n".join(["{0}: {1}".format(name, source) for name, source in sorted_seqdef_dbs])) print("\n".join(["{0}: {1}".format(name, source) for name, source in sorted_seqdef_dbs]))
if args.csv_output: if args.csv_output:
dbs_csv_path = path.splitext(args.csv_output)[0] + "_" + "dbs.csv" with open(args.csv_output, "w") as csv_out_handle:
with open(dbs_csv_path, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle) writer = csv.writer(csv_out_handle)
writer.writerow(("BIGSdb Names", "Source")) writer.writerow(("BIGSdb Names", "Source"))
writer.writerows(sorted_seqdef_dbs) writer.writerows(sorted_seqdef_dbs)
print("\nDatabase output written to {0}".format(dbs_csv_path)) print("\nDatabase output written to {0}".format(args.csv_output))
for bigsdb_schema_name in args.list_bigsdb_schemas: for bigsdb_schema_name in args.list_bigsdb_schemas:
schemas = await bigsdb_index.get_schemas_for_seqdefdb(bigsdb_schema_name) schemas = await bigsdb_index.get_schemas_for_seqdefdb(bigsdb_schema_name)
@@ -58,14 +61,15 @@ async def run(args: Namespace):
print("The following are the known schemas for \"{0}\", and their associated IDs:".format(bigsdb_schema_name)) print("The following are the known schemas for \"{0}\", and their associated IDs:".format(bigsdb_schema_name))
print("\n".join(["{0}: {1}".format(name, id) for name, id in sorted_schemas])) print("\n".join(["{0}: {1}".format(name, id) for name, id in sorted_schemas]))
if args.csv_output: if args.csv_output:
schema_csv_path = path.splitext(args.csv_output)[0] + "_" + "schemas.csv" with open(args.csv_output, "w") as csv_out_handle:
with open(schema_csv_path, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle) writer = csv.writer(csv_out_handle)
writer.writerow(("Name", "ID")) writer.writerow(("Name", "ID"))
writer.writerows(sorted_schemas) writer.writerows(sorted_schemas)
print("\nSchema list output written to {0}".format(schema_csv_path)) print("\nSchema list output written to {0}".format(args.csv_output))
if not (args.list_dbs or len(args.list_bigsdb_schemas) > 0): if not (args.list_dbs or len(args.list_bigsdb_schemas) > 0):
print("Nothing to do. Try specifying \"-l\" for a list of known databases, or \"-h\" for more information.") print("Nothing to do. Try specifying \"-l\" for a list of known databases, or \"-h\" for more information.")
exit(1)
def run_asynchronously(args: Namespace): def run_asynchronously(args: Namespace):
asyncio.run(run(args)) asyncio.run(run(args))