Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions geofetch/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,8 @@
"""Package-level data"""

import coloredlogs
import logmuse

from geofetch._version import __version__
from geofetch.finder import Finder
from geofetch.geofetch import Geofetcher

__author__ = ["Oleksandr Khoroshevskyi", "Vince Reuter", "Nathan Sheffield"]
__all__ = ["Finder", "Geofetcher", "__version__"]

_LOGGER = logmuse.init_logger("geofetch")
coloredlogs.install(
logger=_LOGGER,
datefmt="%H:%M:%S",
fmt="[%(levelname)s] [%(asctime)s] %(message)s",
)
6 changes: 0 additions & 6 deletions geofetch/finder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import re
from datetime import datetime, timedelta

import coloredlogs
import requests
import xmltodict

Expand All @@ -19,11 +18,6 @@
__author__ = "Oleksandr Khoroshevskyi"

_LOGGER = logging.getLogger("__name__")
coloredlogs.install(
logger=_LOGGER,
datefmt="%H:%M:%S",
fmt="[%(levelname)s] [%(asctime)s] %(message)s",
)


class Finder:
Expand Down
44 changes: 22 additions & 22 deletions geofetch/geofetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,63 +117,63 @@ def __init__(
:param input: GSEnumber or path to the input file
:param name: Specify a project name. Defaults to GSE number or name of accessions file name
:param metadata_root: Specify a parent folder location to store metadata.
The project name will be added as a subfolder [Default: $SRAMETA:]
The project name will be added as a subfolder (Default: $SRAMETA:)
:param metadata_folder: Specify an absolute folder location to store metadata. No subfolder will be added.
Overrides value of --metadata-root [Default: Not used (--metadata-root is used by default)]
Overrides value of --metadata-root (Default: Not used, --metadata-root is used by default)
:param just_metadata: If set, don't actually run downloads, just create metadata
:param refresh_metadata: If set, re-download metadata even if it exists.
:param config_template: Project config yaml file template.
:param pipeline_samples: Specify one or more filepaths to SAMPLES pipeline interface yaml files.
These will be added to the project config file to make it immediately compatible with looper.
[Default: null]
(Default: null)
:param pipeline_project: Specify one or more filepaths to PROJECT pipeline interface yaml files.
These will be added to the project config file to make it immediately compatible with looper.
[Default: null]
(Default: null)
:param acc_anno: Produce annotation sheets for each accession.
Project combined PEP for the whole project won't be produced.
:param discard_soft: Create project without downloading soft files on the disc
:param add_dotfile: Add .pep.yaml file that points .yaml PEP file
:param disable_progressbar: Set true to disable progressbar

:param const_limit_project: Optional: Limit of the number of the constant sample characters
that should not be in project yaml. [Default: 50]
that should not be in project yaml. (Default: 50)
:param const_limit_discard: Optional: Limit of the number of the constant sample characters
that should not be discarded [Default: 250]
that should not be discarded (Default: 250)
:param attr_limit_truncate: Optional: Limit of the number of sample characters.
Any attribute with more than X characters will truncate to the first X, where X is a number of characters
[Default: 500]
(Default: 500)

:param max_soft_size: Optional: Max size of soft file.
Supported input formats : 12B, 12KB, 12MB, 12GB. [Default value: 1GB]
Supported input formats : 12B, 12KB, 12MB, 12GB. (Default: 1GB)

:param processed: Download processed da_soft_sizeta [Default: download raw data].
:param processed: Download processed data (Default: download raw data).
:param data_source: Specifies the source of data on the GEO record to retrieve processed data,
which may be attached to the collective series entity, or to individual samples. Allowable values are:
samples, series or both (all). Ignored unless 'processed' flag is set. [Default: samples]
:param filter: Filter regex for processed filenames [Default: None].Ignored unless 'processed' flag is set.
:param filter_size: Filter size for processed files that are stored as sample repository [Default: None].
samples, series or both (all). Ignored unless 'processed' flag is set. (Default: samples)
:param filter: Filter regex for processed filenames (Default: None). Ignored unless 'processed' flag is set.
:param filter_size: Filter size for processed files that are stored as sample repository (Default: None).
Works only for sample data. Supported input formats : 12B, 12KB, 12MB, 12GB.
Ignored unless 'processed' flag is set.
:param geo_folder: Specify a location to store processed GEO files.
Ignored unless 'processed' flag is set.[Default: $GEODATA:]
Ignored unless 'processed' flag is set. (Default: $GEODATA:)

:param split_experiments: Split SRR runs into individual samples. By default, SRX experiments with multiple SRR
Runs will have a single entry in the annotation table, with each run as a separate row in the
subannotation table. This setting instead treats each run as a separate sample [Works with raw data]
subannotation table. This setting instead treats each run as a separate sample (Works with raw data)
:param bam_folder: Optional: Specify folder of bam files. Geofetch will not download sra files when
corresponding bam files already exist. [Default: $SRABAM:] [Works with raw data]
corresponding bam files already exist. (Default: $SRABAM:) (Works with raw data)
:param fq_folder: Optional: Specify folder of fastq files. Geofetch will not download sra files when corresponding
fastq files already exist. [Default: $SRAFQ:] [Works with raw data]
:param use_key_subset: Use just the keys defined in this module when writing out metadata. [Works with raw data]
fastq files already exist. (Default: $SRAFQ:) (Works with raw data)
:param use_key_subset: Use just the keys defined in this module when writing out metadata. (Works with raw data)
:param sra_folder: Optional: Specify a location to store sra files
[Default: $SRARAW:" + safe_echo("SRARAW") + ]
:param bam_conversion: Optional: set True to convert bam files [Works with raw data]
(Default: $SRARAW:" + safe_echo("SRARAW") + )
:param bam_conversion: Optional: set True to convert bam files (Works with raw data)
:param picard_path: Specify a path to the picard jar, if you want to convert fastq to bam
[Default: $PICARD:" + safe_echo("PICARD") + "] [Works with raw data]
(Default: $PICARD:" + safe_echo("PICARD") + ") (Works with raw data)
:param add_convert_modifier: Add looper SRA convert modifier to config file.

:param skip: Skip some accessions. [Default: no skip].
:param opts: opts object [Optional]
:param skip: Skip some accessions. (Default: no skip).
:param opts: opts object (Optional)
:param str | int max_prefetch_size: argmuent to prefetch command's --max-size option;
for reference: https://github.com/ncbi/sra-tools/wiki/08.-prefetch-and-fasterq-dump#check-the-maximum-size-limit-of-the-prefetch-tool
:param kwargs: other values
Expand Down
3 changes: 1 addition & 2 deletions requirements/requirements-all.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
colorama>=0.3.9
logmuse>=0.2.6
logmuse>=0.3.0
ubiquerg>=0.6.2
requests>=2.28.1
xmltodict>=0.13.0
pandas>=1.5.3
peppy>=0.40.6
rich>=12.5.1
coloredlogs>=15.0.1
piper>=0.14.4
Loading