10 Commits

Author SHA1 Message Date
d9a16de97e Merge branch 'main' into develop
All checks were successful
automlst.cli/pipeline/tag This commit looks good
automlst.cli/pipeline/head This commit looks good
2025-02-26 15:25:23 +00:00
19b23539b3 Added bioconda and personal conda repos to channels
Some checks reported errors
automlst.cli/pipeline/head Something is wrong with the build of this commit
2025-02-26 15:22:32 +00:00
41c05cc521 Increased user friendliness
Some checks failed
automlst.cli/pipeline/head There was a failure building this commit
Added default MLST scheme
Added multiple ways of defining a MLST scheme
CSV output now shows database name for each scheme ID
Now uses native argparse library for ensuring mutual exclusivity of arguments and whether or not one is required.
Updated to use 0.13.* of engine
2025-02-26 07:37:34 +00:00
dbd8238cef Added recipe patching script
Some checks failed
automlst.cli/pipeline/head There was a failure building this commit
2025-02-21 14:14:02 +00:00
4b0fac0801 Added grayskull and curl to environment.yml
Some checks failed
automlst.cli/pipeline/head There was a failure building this commit
2025-02-21 06:51:20 +00:00
d78ae19c4f Re-added pytest-cov to conda environment.yml
Some checks failed
automlst.cli/pipeline/head There was a failure building this commit
2025-02-21 06:40:20 +00:00
6b8376c470 Added publishing to personal git repo
Some checks failed
automlst.cli/pipeline/head There was a failure building this commit
2025-02-21 06:33:07 +00:00
a4d8de7cc6 Changing CSV argument to --csv or -o
All checks were successful
automlst.cli/pipeline/head This commit looks good
automlst.cli/pipeline/tag This commit looks good
2025-02-19 19:57:15 +00:00
5ef5b6ac08 Updated pyproject.toml to use license text and updated repo
All checks were successful
automlst.cli/pipeline/head This commit looks good
2025-02-19 16:26:59 +00:00
3aa2916324 Updated pipeline to not publish to system repo if it's a tagged version
All checks were successful
automlst.cli/pipeline/head This commit looks good
automlst.cli/pipeline/tag This commit looks good
2025-02-19 16:02:31 +00:00
11 changed files with 216 additions and 56 deletions

16
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
FROM mcr.microsoft.com/devcontainers/miniconda:1-3
# Copy environment.yml (if found) to a temp location so we update the environment. Also
# copy "noop.txt" so the COPY instruction does not fail if no environment.yml exists.
COPY environment.yml* .devcontainer/noop.txt /tmp/conda-tmp/
RUN if [ -f "/tmp/conda-tmp/environment.yml" ]; then umask 0002 && /opt/conda/bin/conda env update -n base -f /tmp/conda-tmp/environment.yml; fi \
&& rm -rf /tmp/conda-tmp
# [Optional] Uncomment to install a different version of Python than the default
# RUN conda install -y python=3.6 \
# && pip install --no-cache-dir pipx \
# && pipx reinstall-all
# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>

View File

@@ -1,9 +1,11 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/python
// README at: https://github.com/devcontainers/templates/tree/main/src/miniconda
{
"name": "Python 3",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye",
"name": "Miniconda (Python 3)",
"build": {
"context": "..",
"dockerfile": "Dockerfile"
},
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
@@ -12,7 +14,9 @@
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "pip3 install --user -r requirements.txt && pip install -e .",
"postCreateCommand": "pip install -e .",
// Configure tool-specific properties.
"customizations": {
"vscode": {
"extensions": [
@@ -20,8 +24,6 @@
]
}
},
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"

3
.devcontainer/noop.txt Normal file
View File

@@ -0,0 +1,3 @@
This file is copied into the container along with environment.yml* from the
parent folder. This is done to prevent the Dockerfile COPY instruction from
failing if no environment.yml is found.

20
Jenkinsfile vendored
View File

@@ -2,14 +2,16 @@ pipeline {
agent {
kubernetes {
cloud 'rsys-devel'
defaultContainer 'pip'
inheritFrom 'pip'
defaultContainer 'miniforge3'
inheritFrom 'miniforge'
}
}
stages {
stage("install") {
steps {
sh 'python -m pip install -r requirements.txt'
sh 'conda config --add channels bioconda'
sh 'conda config --add channels https://git.reslate.systems/api/packages/ydeng/conda'
sh 'conda env update -n base -f environment.yml'
}
}
stage("unit tests") {
@@ -22,26 +24,34 @@ pipeline {
stage("build") {
steps {
sh "python -m build"
sh "grayskull pypi dist/*.tar.gz --maintainers 'Harrison Deng'"
sh "python scripts/patch_recipe.py"
sh 'conda build autobigs-cli -c bioconda --output-folder conda-bld --verify'
}
}
stage("archive") {
steps {
archiveArtifacts artifacts: 'dist/*.tar.gz, dist/*.whl', fingerprint: true, followSymlinks: false, onlyIfSuccessful: true
archiveArtifacts artifacts: 'dist/*.tar.gz, dist/*.whl, conda-bld/**/*.conda', fingerprint: true, followSymlinks: false, onlyIfSuccessful: true
}
}
stage("publish") {
parallel {
stage ("git.reslate.systems") {
when {
branch '**/main'
}
environment {
CREDS = credentials('username-password-rs-git')
}
steps {
sh script: 'python -m twine upload --repository-url https://git.reslate.systems/api/packages/ydeng/pypi -u ${CREDS_USR} -p ${CREDS_PSW} --non-interactive --disable-progress-bar --verbose dist/*'
sh 'curl --user ${CREDS_USR}:${CREDS_PSW} --upload-file conda-bld/**/*.conda https://git.reslate.systems/api/packages/${CREDS_USR}/conda/$(basename conda-bld/**/*.conda)'
}
}
stage ("pypi.org") {
when {
tag '*.*'
tag '*.*.*'
}
environment {
TOKEN = credentials('pypi.org')

View File

@@ -9,7 +9,7 @@ This program is simply a command-line interface for [autoBIGS.engine](https://py
This CLI is capable of exactly what [autoBIGS.engine](https://pypi.org/project/autoBIGS.engine) is capable of:
- Import multiple whole genome FASTA files
- Fetch the available BIGSdb databases that is currently live and available
- Fetch the available BIGSdb database schemas for a given MLST database
- Fetch the available BIGSdb database schemes for a given MLST database
- Retrieve exact/non-exact MLST allele variant IDs based off a sequence
- Retrieve MLST sequence type IDs based off a sequence
- Inexact matches are annotated with an asterisk (\*)
@@ -17,7 +17,7 @@ This CLI is capable of exactly what [autoBIGS.engine](https://pypi.org/project/a
## Planned Features for CLI
- Specifications of multi-threading capacity
- Session authentication for updated database schemas (as required by both PubMLST and Institut Pasteur)
- Session authentication for updated database schemes (as required by both PubMLST and Institut Pasteur)
Please refer to [autoBIGS.engine](https://pypi.org/project/autoBIGS.engine) for more planned features.
@@ -33,11 +33,11 @@ This CLI can be installed with `pip`. Please ensure [pip is installed](https://p
### Example
Let's say you have a fasta called `seq.fasta` which contains several sequences. You know all sequences in `seq.fasta` are Bordetella pertussis sequences, and you know you have the sequences for the necessary targets of your schema in each of them. You want to retrieve MLST profiles for all of them. This can be done by:
Let's say you have a fasta called `seq.fasta` which contains several sequences. You know all sequences in `seq.fasta` are Bordetella pertussis sequences, and you know you have the sequences for the necessary targets of your scheme in each of them. You want to retrieve MLST profiles for all of them. This can be done by:
1. Running `autobigs info -l` to list all available `seqdef` databases and find the database associated with Bordetella (you should see one called `pubmlst_bordetella_seqdef`).
2. Then, run `autobigs info -lschema pubmlst_bordetella_seqdef` to get the available typing schemas and their associated IDs. In this example, let's assume we want a normal MLST scheme. In this case, we would pay attention to the number next to `MLST` (it should be `3`).
2. Then, run `autobigs info -lscheme pubmlst_bordetella_seqdef` to get the available typing schemes and their associated IDs. In this example, let's assume we want a normal MLST scheme. In this case, we would pay attention to the number next to `MLST` (it should be `3`).
3. Then, run `autobigs st -h` and familiarize yourself with the parameters needed for sequence typing.

14
environment.yml Normal file
View File

@@ -0,0 +1,14 @@
name: base
channels:
- bioconda
- conda-forge
dependencies:
- pytest
- pytest-asyncio
- pytest-cov
- python-build
- conda-build
- twine==6.0.1
- setuptools_scm
- grayskull
- curl

View File

@@ -6,16 +6,16 @@ build-backend = "setuptools.build_meta"
name = "autoBIGS.cli"
dynamic = ["version"]
readme = "README.md"
license = {file = "LICENSE"}
license = {text = "GPL-3.0-or-later"}
dependencies = [
"autoBIGS-engine==0.12.*"
"autoBIGS-engine==0.13.*"
]
requires-python = ">=3.12"
description = "A CLI tool to rapidly fetch fetch MLST profiles given sequences for various diseases."
[project.urls]
Repository = "https://github.com/RealYHD/autoBIGS.cli"
Issues = "https://github.com/RealYHD/autoBIGS.cli/issues"
Repository = "https://github.com/Syph-and-VPD-Lab/autoBIGS.cli"
Issues = "https://github.com/Syph-and-VPD-Lab/autoBIGS.cli/issues"
[project.scripts]

103
scripts/patch_recipe.py Normal file
View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
import argparse
from os import fdopen, path
import os
import re
import shutil
from sys import argv
import tempfile
INDENTATION = " "
GRAYSKULL_OUTPUT_PATH = "autoBIGS.cli"
RUN_EXPORTED_VALUE = r'{{ pin_subpackage( name|lower|replace(".", "-"), max_pin="x.x") }}'
LICENSE_SUFFIX = "-or-later"
HOME_PAGE = "https://github.com/Syph-and-VPD-Lab/autoBIGS.cli"
def _calc_indentation(line: str):
return len(re.findall(INDENTATION, line.split(line.strip())[0])) if line != "\n" else 0
def read_grayskull_output():
original_recipe = path.abspath(GRAYSKULL_OUTPUT_PATH)
original_meta = path.join(original_recipe, "meta.yaml")
meta_file = open(original_meta)
lines = meta_file.readlines()
meta_file.close()
return lines
def update_naming_scheme(lines):
modified_lines = []
for line in lines:
matches = re.finditer(r"\{\{\s*name\|lower()\s+\}\}", line)
modified_line = line
for match in matches:
modified_line = modified_line[:match.start(1)] + r'|replace(".", "-")' + modified_line[match.end(1):]
modified_lines.append(modified_line)
return modified_lines
def inject_run_exports(lines: list[str]):
package_indent = False
modified_lines = []
for line in lines:
indentation_count = _calc_indentation(line)
if line == "build:\n" and indentation_count == 0:
package_indent = True
modified_lines.append(line)
elif package_indent and indentation_count == 0:
modified_lines.append(INDENTATION*1 + "run_exports:\n")
modified_lines.append(INDENTATION*2 + "- " + RUN_EXPORTED_VALUE + "\n")
package_indent = False
else:
modified_lines.append(line)
return modified_lines
def suffix_license(lines: list[str]):
about_indent = False
modified_lines = []
for line in lines:
indentation_count = _calc_indentation(line)
if line == "about:\n" and indentation_count == 0:
about_indent = True
modified_lines.append(line)
elif about_indent and indentation_count == 1 and line.lstrip().startswith("license:"):
modified_lines.append(line.rstrip() + LICENSE_SUFFIX + "\n")
about_indent = False
else:
modified_lines.append(line)
return modified_lines
def inject_home_page(lines: list[str]):
about_indent = False
modified_lines = []
for line in lines:
indentation_count = _calc_indentation(line)
if line == "about:\n" and indentation_count == 0:
about_indent = True
modified_lines.append(line)
elif about_indent and indentation_count == 0:
modified_lines.append(INDENTATION + "home: " + HOME_PAGE + "\n")
about_indent = False
else:
modified_lines.append(line)
return modified_lines
def write_to_original(lines: list[str]):
original_recipe = path.abspath(GRAYSKULL_OUTPUT_PATH)
original_meta = path.join(original_recipe, "meta.yaml")
with open(original_meta, "w") as file:
file.writelines(lines)
def rename_recipe_dir():
new_recipe_name = path.abspath(path.join(GRAYSKULL_OUTPUT_PATH.replace(".", "-").lower()))
shutil.rmtree(new_recipe_name, ignore_errors=True)
os.replace(path.abspath(GRAYSKULL_OUTPUT_PATH), new_recipe_name)
if __name__ == "__main__":
original_grayskull_out = read_grayskull_output()
modified_recipe_meta = None
modified_recipe_meta = update_naming_scheme(original_grayskull_out)
modified_recipe_meta = inject_run_exports(modified_recipe_meta)
modified_recipe_meta = suffix_license(modified_recipe_meta)
modified_recipe_meta = inject_home_page(modified_recipe_meta)
write_to_original(modified_recipe_meta)
rename_recipe_dir()

View File

@@ -6,7 +6,9 @@ from autobigs.engine.analysis.bigsdb import BIGSdbIndex
def setup_parser(parser: ArgumentParser):
parser.description = "Fetches the latest BIGSdb MLST database definitions."
parser.add_argument(
retrieve_group = parser.add_mutually_exclusive_group(required=True)
retrieve_group.add_argument(
"--retrieve-bigsdbs", "-l",
action="store_true",
dest="list_dbs",
@@ -15,19 +17,19 @@ def setup_parser(parser: ArgumentParser):
help="Lists all known BIGSdb MLST databases (fetched from known APIs and cached)."
)
parser.add_argument(
"--retrieve-bigsdb-schemas", "-lschemas",
retrieve_group.add_argument(
"--retrieve-bigsdb-schemes", "-lschemes",
nargs="+",
action="extend",
dest="list_bigsdb_schemas",
dest="list_bigsdb_schemes",
required=False,
default=[],
type=str,
help="Lists the known schema IDs for a given BIGSdb sequence definition database name. The name, and then the ID of the schema is given."
help="Lists the known scheme IDs for a given BIGSdb sequence definition database name. The name, and then the ID of the scheme is given."
)
parser.add_argument(
"--csv-prefix", "-o",
"--csv", "-o",
dest="csv_output",
required=False,
default=None,
@@ -45,27 +47,24 @@ async def run(args: Namespace):
print("The following are all known BIGS database names, and their source (sorted alphabetically):")
print("\n".join(["{0}: {1}".format(name, source) for name, source in sorted_seqdef_dbs]))
if args.csv_output:
dbs_csv_path = path.splitext(args.csv_output)[0] + "_" + "dbs.csv"
with open(dbs_csv_path, "w") as csv_out_handle:
with open(args.csv_output, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle)
writer.writerow(("BIGSdb Names", "Source"))
writer.writerows(sorted_seqdef_dbs)
print("\nDatabase output written to {0}".format(dbs_csv_path))
print("\nDatabase output written to {0}".format(args.csv_output))
for bigsdb_schema_name in args.list_bigsdb_schemas:
schemas = await bigsdb_index.get_schemas_for_seqdefdb(bigsdb_schema_name)
sorted_schemas = [(name, id) for name, id in sorted(schemas.items())]
print("The following are the known schemas for \"{0}\", and their associated IDs:".format(bigsdb_schema_name))
print("\n".join(["{0}: {1}".format(name, id) for name, id in sorted_schemas]))
if args.csv_output:
schema_csv_path = path.splitext(args.csv_output)[0] + "_" + "schemas.csv"
with open(schema_csv_path, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle)
writer.writerow(("Name", "ID"))
writer.writerows(sorted_schemas)
print("\nSchema list output written to {0}".format(schema_csv_path))
if not (args.list_dbs or len(args.list_bigsdb_schemas) > 0):
print("Nothing to do. Try specifying \"-l\" for a list of known databases, or \"-h\" for more information.")
csv_scheme_rows = []
for bigsdb_scheme_name in args.list_bigsdb_schemes:
schemes = await bigsdb_index.get_schemes_for_seqdefdb(bigsdb_scheme_name)
csv_scheme_rows.extend([(name, id, bigsdb_scheme_name) for name, id in sorted(schemes.items())])
print("The following are the known schemes for \"{0}\", and their associated IDs:".format(bigsdb_scheme_name))
print("\n".join(["{0}: {1}".format(name, id) for name, id, database in csv_scheme_rows]))
if args.csv_output:
with open(args.csv_output, "w") as csv_out_handle:
writer = csv.writer(csv_out_handle)
writer.writerow(("Name", "ID", "Database Name"))
writer.writerows(csv_scheme_rows)
print("\nscheme list output written to {0}".format(args.csv_output))
def run_asynchronously(args: Namespace):
asyncio.run(run(args))

View File

@@ -7,7 +7,7 @@ from autobigs.cli import info, st
from autobigs.cli.meta import get_module_base_name
import importlib
root_parser = argparse.ArgumentParser(epilog='Use "%(prog)s info -h" to learn how to get available MLST databases, and their available schemas.'
root_parser = argparse.ArgumentParser(epilog='Use "%(prog)s info -h" to learn how to get available MLST databases, and their available schemes.'
+ ' Once that is done, use "%(prog)s st -h" to learn how to retrieve MLST profiles.'
)
subparsers = root_parser.add_subparsers(required=False)
@@ -20,17 +20,17 @@ root_parser.add_argument(
action="store_true",
default=False,
required=False,
help="Displays the autoBIGS.CLI version, and the autoBIGS.Engine version."
help="Displays the autoBIGS.cli version, and the autoBIGS.Engine version."
)
def run():
args = root_parser.parse_args()
if args.version:
print(f'autoBIGS.CLI is running version {
metadata.version("autoBIGS-cli")}.')
print(f'autoBIGS.Engine is running version {
metadata.version("autoBIGS-engine")}.')
print(f'autoBIGS.cli is running version {
metadata.version("autobigs-cli")}.')
print(f'autoBIGS.engine is running version {
metadata.version("autobigs-engine")}.')
if hasattr(args, "run"):
args.run(args)
elif not args.version:

View File

@@ -4,8 +4,7 @@ import asyncio
import datetime
from autobigs.engine.writing import write_mlst_profiles_as_csv
from autobigs.engine.reading import read_multiple_fastas
from autobigs.engine.analysis.bigsdb import BIGSdbIndex
from autobigs.engine.analysis.bigsdb import BIGSdbIndex, BIGSdbMLSTProfiler
def setup_parser(parser: ArgumentParser):
parser.description = "Returns MLST exact profile matches."
@@ -23,10 +22,18 @@ def setup_parser(parser: ArgumentParser):
help="The BIGSdb seqdef database to use for typing."
)
parser.add_argument(
"schema",
scheme_group = parser.add_mutually_exclusive_group()
scheme_group.add_argument(
"--scheme-id", "-sid",
type=int,
help="The BIGSdb seqdef database schema ID (integer) to use for typing."
help="The BIGSdb seqdef database scheme ID (integer) to use for typing."
)
scheme_group.add_argument(
"--scheme-name", "-sn",
type=str,
help="The BIGSdb seqdef database scheme name (string) to use for typing. If neither this argument, nor the ID equivalent is defined, a scheme ID with name \"MLST\" will be used."
)
parser.add_argument(
@@ -49,12 +56,18 @@ def setup_parser(parser: ArgumentParser):
async def run(args: Namespace):
async with BIGSdbIndex() as bigsdb_index:
gen_strings = read_multiple_fastas(args.fastas)
async with await bigsdb_index.build_profiler_from_seqdefdb(False, args.seqdefdb, args.schema) as mlst_profiler:
scheme_id_lookup = await bigsdb_index.get_schemes_for_seqdefdb(args.seqdefdb)
scheme_id = args.scheme_id or args.scheme_name or (scheme_id_lookup)["MLST"]
scheme_name_lookup = {value: key for key, value in scheme_id_lookup.items()}
async with await bigsdb_index.build_profiler_from_seqdefdb(False, args.seqdefdb, scheme_id) as mlst_profiler:
if not isinstance(mlst_profiler, BIGSdbMLSTProfiler):
raise TypeError("MLST profiler type invalid")
mlst_profiles = mlst_profiler.profile_multiple_strings(gen_strings, args.stop_on_fail)
failed = await write_mlst_profiles_as_csv(mlst_profiles, args.out)
if len(failed) > 0:
print(f"A total of {len(failed)} IDs failed (no profile found):\n{"\n".join(failed)}")
print(f"Completed fetching MLSTs for {len(args.fastas)} sequences.")
print(f"Completed fetching from {args.seqdefdb} for {scheme_name_lookup[scheme_id]}s for {len(args.fastas)} sequences.")
def run_asynchronously(args):
asyncio.run(run(args))