Compare commits

..

No commits in common. "af9c8c70b81942d8c5f03f598dea81152281dd12" and "43a17d698b9b6285f049288cdfcfdabfa89ba000" have entirely different histories.

2 changed files with 6 additions and 30 deletions

View File

@ -1,7 +1,5 @@
from argparse import ArgumentParser, Namespace from argparse import ArgumentParser, Namespace
import asyncio import asyncio
import csv
from os import path
from autobigs.engine.analysis.bigsdb import BIGSdbIndex from autobigs.engine.analysis.bigsdb import BIGSdbIndex
def setup_parser(parser: ArgumentParser): def setup_parser(parser: ArgumentParser):
@ -26,14 +24,6 @@ def setup_parser(parser: ArgumentParser):
help="Lists the known schema IDs for a given BIGSdb sequence definition database name. The name, and then the ID of the schema is given." help="Lists the known schema IDs for a given BIGSdb sequence definition database name. The name, and then the ID of the schema is given."
) )
parser.add_argument(
"--csv-prefix", "-o",
dest="csv_output",
required=False,
default=None,
help="Output list as CSV at a given path. A suffix is added depending on the action taken."
)
parser.set_defaults(run=run_asynchronously) parser.set_defaults(run=run_asynchronously)
return parser return parser
@ -41,29 +31,15 @@ async def run(args: Namespace):
async with BIGSdbIndex() as bigsdb_index: async with BIGSdbIndex() as bigsdb_index:
if args.list_dbs: if args.list_dbs:
known_seqdef_dbs = await bigsdb_index.get_known_seqdef_dbs(force=False) known_seqdef_dbs = await bigsdb_index.get_known_seqdef_dbs(force=False)
sorted_seqdef_dbs = [(name, source) for name, source in sorted(known_seqdef_dbs.items())] print("The following are all known BIGS database names (sorted alphabetically):")
print("The following are all known BIGS database names, and their source (sorted alphabetically):") print("\n".join(sorted(known_seqdef_dbs.keys())))
print("\n".join(["{0}: {1}".format(name, source) for name, source in sorted_seqdef_dbs]))
if args.csv_output:
dbs_csv_path = path.splitext(args.csv_output)[0] + "_" + "dbs.csv"
with open(dbs_csv_path, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle)
writer.writerow(("BIGSdb Names", "Source"))
writer.writerows(sorted_seqdef_dbs)
print("\nDatabase output written to {0}".format(dbs_csv_path))
for bigsdb_schema_name in args.list_bigsdb_schemas: for bigsdb_schema_name in args.list_bigsdb_schemas:
schemas = await bigsdb_index.get_schemas_for_seqdefdb(bigsdb_schema_name) schemas = await bigsdb_index.get_schemas_for_seqdefdb(bigsdb_schema_name)
sorted_schemas = [(name, id) for name, id in sorted(schemas.items())]
print("The following are the known schemas for \"{0}\", and their associated IDs:".format(bigsdb_schema_name)) print("The following are the known schemas for \"{0}\", and their associated IDs:".format(bigsdb_schema_name))
print("\n".join(["{0}: {1}".format(name, id) for name, id in sorted_schemas])) for schema_desc, schema_id in schemas.items():
if args.csv_output: print(f"{schema_desc}: {schema_id}")
schema_csv_path = path.splitext(args.csv_output)[0] + "_" + "schemas.csv"
with open(schema_csv_path, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle)
writer.writerow(("Name", "ID"))
writer.writerows(sorted_schemas)
print("\nSchema list output written to {0}".format(schema_csv_path))
if not (args.list_dbs or len(args.list_bigsdb_schemas) > 0): if not (args.list_dbs or len(args.list_bigsdb_schemas) > 0):
print("Nothing to do. Try specifying \"-l\" for a list of known databases, or \"-h\" for more information.") print("Nothing to do. Try specifying \"-l\" for a list of known databases, or \"-h\" for more information.")

View File

@ -50,7 +50,7 @@ async def run(args: Namespace):
async with BIGSdbIndex() as bigsdb_index: async with BIGSdbIndex() as bigsdb_index:
gen_strings = read_multiple_fastas(args.fastas) gen_strings = read_multiple_fastas(args.fastas)
async with await bigsdb_index.build_profiler_from_seqdefdb(False, args.seqdefdb, args.schema) as mlst_profiler: async with await bigsdb_index.build_profiler_from_seqdefdb(False, args.seqdefdb, args.schema) as mlst_profiler:
mlst_profiles = mlst_profiler.profile_multiple_strings(gen_strings, args.stop_on_fail) mlst_profiles = mlst_profiler.profile_multiple_strings(gen_strings)
failed = await write_mlst_profiles_as_csv(mlst_profiles, args.out) failed = await write_mlst_profiles_as_csv(mlst_profiles, args.out)
if len(failed) > 0: if len(failed) > 0:
print(f"A total of {len(failed)} IDs failed (no profile found):\n{"\n".join(failed)}") print(f"A total of {len(failed)} IDs failed (no profile found):\n{"\n".join(failed)}")