diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index c7854d8..9007a7f 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -24,7 +24,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install github_stats_pages run: | - pip install .[test] + pip install -e .[test] - name: Test with pytest run: | echo "Username for unit tests : ${{ github.actor }}" diff --git a/conftest.py b/conftest.py index 2052cdc..3bd7a13 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,7 @@ import pytest +from github_stats_pages import db + def pytest_addoption(parser): parser.addoption("--username", action="store", default="GitHub username") @@ -22,3 +24,8 @@ def token(request): if name_value is None: pytest.skip() return name_value + + +@pytest.fixture(scope="session") +def test_engine(): + return db.create_db_and_tables(test=True) diff --git a/entrypoint.sh b/entrypoint.sh index d3c6f02..58ae71a 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -21,6 +21,8 @@ else test="--test" fi +migrate_to_sqlite + get_repo_list -u $1 gts_run_all_repos -u $1 -t $2 -c "$1".csv ${test} diff --git a/github_stats_pages/__init__.py b/github_stats_pages/__init__.py index 0479570..72d1f7a 100644 --- a/github_stats_pages/__init__.py +++ b/github_stats_pages/__init__.py @@ -1,5 +1,10 @@ __version__ = "0.4.14" +RENAME_MAPPING = { + "count": "views", # for paths + "unique_visitors/cloners": "unique", # for clones, traffic, referrer + "uniques": "unique", # for paths +} STATS_TYPES = ["clone", "paths", "referrer", "traffic"] diff --git a/github_stats_pages/db.py b/github_stats_pages/db.py new file mode 100644 index 0000000..ef079d1 --- /dev/null +++ b/github_stats_pages/db.py @@ -0,0 +1,168 @@ +from functools import partial +from pathlib import Path +from typing import List, Type, Union + +import pandas as pd +from sqlalchemy.future import Engine +from sqlalchemy.exc import NoResultFound +from sqlmodel import SQLModel, Session, create_engine, select + +from .models import Clone, Referrer, Traffic, Paths +from .logger import app_log as log +from . import RENAME_MAPPING, STATS_SORT_DATAFRAME + +SQLITE_FILE_NAME = Path("data/sqlite3.db") + + +def configure(test: bool = False, echo: bool = False) -> Engine: + sqlite_file_name = ( + Path("tests_data/sqlite3.db") if test else SQLITE_FILE_NAME + ) + if not sqlite_file_name.parent.exists(): # pragma: no cover + sqlite_file_name.parent.mkdir() + sqlite_url = f"sqlite:///{sqlite_file_name}" + log.info(f"Configuring SQLite at: {sqlite_url}") + return create_engine(sqlite_url, echo=echo) + + +def create_db_and_tables(test: bool = False, echo: bool = False): + engine = configure(test=test, echo=echo) + SQLModel.metadata.create_all(engine) + return engine + + +def migrate_csv( + filename: Path, + model: Type[SQLModel], + engine: Engine, +): + """Migrate CSV over to SQLite""" + + log.info(f"[yellow]Loading: {filename}") + df = pd.read_csv(filename, na_filter=False) + df.rename(columns=RENAME_MAPPING, inplace=True) + log.info(f"Size of dataframe: {len(df)}") + log.info(f"columns: {df.columns}") + if "merge" not in filename.name: + if model.__name__ == "Referrer": # Add date since this isn't included + file_date = filename.name[:10] + df.insert(loc=0, column="date", value=file_date) + + if model.__name__ == "Paths": + if "repository_name" not in df.columns: + repository_names = [a.split("/")[2] for a in df["path"].values] + df.insert(1, "repository_name", repository_names) + simple_paths = [ + "/".join(a.split("/")[3:]) for a in df["path"].values + ] + df["path"] = simple_paths + else: + log.info( + f"{filename} already updated with repository_name and path" + ) + + sort_columns = STATS_SORT_DATAFRAME[model.__name__.lower()] + log.info(f"sort_columns: {sort_columns}") + df.sort_values(by=sort_columns, inplace=True) + + if model.__name__ == "Paths": + func = partial(query_path, engine=engine, model=model) + query_results = list( + map(func, df["repository_name"], df["date"], df["path"]) + ) + elif model.__name__ == "Referrer": + func = partial(query_referrer, engine=engine, model=model) + query_results = list( + map(func, df["repository_name"], df["date"], df["site"]) + ) + else: # For Clone and Traffic + func = partial(query, engine=engine, model=model) + query_results = list(map(func, df["repository_name"], df["date"])) + + new_df: pd.DataFrame = df.iloc[ + [idx for idx, item in enumerate(query_results) if not item] + ] + if new_df.empty: + log.info("No new records!") + else: + log.info(f"New records found: {len(new_df)}") + log.info("[bold yellow]Adding data") + new_df.to_sql( + model.__name__.lower(), engine, if_exists="append", index=False + ) + if len(new_df) < len(df): # pragma: no cover + log.info("[orange]Some records exists in db") + + +def query( + repository_name: str, + date: str, + engine: Engine, + model: Union[Type[SQLModel], Clone, Referrer, Paths, Traffic], +) -> Union[SQLModel, Clone, Referrer, Paths, Traffic, None]: + + with Session(engine) as session: + result = session.exec( + select(model).where( + model.repository_name == repository_name, model.date == date + ) + ) + try: + return result.one() + except NoResultFound: + return + + +def query_all( + engine: Engine, + model: Union[Type[SQLModel], Clone, Referrer, Paths, Traffic], +) -> List[Union[SQLModel, Clone, Referrer, Paths, Traffic]]: + """Retrieve an entire table""" + + with Session(engine) as session: + result = session.exec(select(model)) + return result.all() + + +def query_path( + repository_name: str, + date: str, + path: str, + engine: Engine, + model: Union[Type[SQLModel], Paths], +) -> Union[SQLModel, Paths, None]: + + with Session(engine) as session: + result = session.exec( + select(model).where( + model.repository_name == repository_name, + model.date == date, + model.path == path, + ) + ) + try: + return result.one() + except NoResultFound: + return + + +def query_referrer( + repository_name: str, + date: str, + site: str, + engine: Engine, + model: Union[Type[SQLModel], Referrer], +) -> Union[SQLModel, Referrer, None]: + + with Session(engine) as session: + result = session.exec( + select(model).where( + model.repository_name == repository_name, + model.date == date, + model.site == site, + ) + ) + try: + return result.one() + except NoResultFound: + return diff --git a/github_stats_pages/models/__init__.py b/github_stats_pages/models/__init__.py new file mode 100644 index 0000000..2090a47 --- /dev/null +++ b/github_stats_pages/models/__init__.py @@ -0,0 +1,4 @@ +from .clone import Clone # noqa: F401 +from .referrer import Referrer # noqa: F401 +from .paths import Paths # noqa: F401 +from .traffic import Traffic # noqa: F401 diff --git a/github_stats_pages/models/clone.py b/github_stats_pages/models/clone.py new file mode 100644 index 0000000..1585695 --- /dev/null +++ b/github_stats_pages/models/clone.py @@ -0,0 +1,11 @@ +from typing import Optional + +from sqlmodel import SQLModel, Field + + +class Clone(SQLModel, table=True): + id: Optional[int] = Field(default=None, primary_key=True) + repository_name: str + date: str + clones: int + unique: int diff --git a/github_stats_pages/models/paths.py b/github_stats_pages/models/paths.py new file mode 100644 index 0000000..25cf34e --- /dev/null +++ b/github_stats_pages/models/paths.py @@ -0,0 +1,13 @@ +from typing import Optional + +from sqlmodel import SQLModel, Field + + +class Paths(SQLModel, table=True): + id: Optional[int] = Field(default=None, primary_key=True) + date: str + repository_name: Optional[str] + path: str + title: str + views: int + unique: int diff --git a/github_stats_pages/models/referrer.py b/github_stats_pages/models/referrer.py new file mode 100644 index 0000000..96c2107 --- /dev/null +++ b/github_stats_pages/models/referrer.py @@ -0,0 +1,12 @@ +from typing import Optional + +from sqlmodel import SQLModel, Field + + +class Referrer(SQLModel, table=True): + id: Optional[int] = Field(default=None, primary_key=True) + repository_name: str + site: str + date: Optional[str] + views: int + unique: int diff --git a/github_stats_pages/models/traffic.py b/github_stats_pages/models/traffic.py new file mode 100644 index 0000000..1842f67 --- /dev/null +++ b/github_stats_pages/models/traffic.py @@ -0,0 +1,11 @@ +from typing import Optional + +from sqlmodel import SQLModel, Field + + +class Traffic(SQLModel, table=True): + id: Optional[int] = Field(default=None, primary_key=True) + repository_name: str + date: str + views: int + unique: int diff --git a/github_stats_pages/stats_plots.py b/github_stats_pages/stats_plots.py index d3814dc..f53eb5f 100644 --- a/github_stats_pages/stats_plots.py +++ b/github_stats_pages/stats_plots.py @@ -15,9 +15,14 @@ import pandas as pd from .logger import app_log as log +from . import db +from .models import Clone, Traffic prefix = "merged" stats_type = ["traffic", "clone"] +c_columns = ["repository_name", "date", "total", "unique"] +r_columns = ["repository_name", "date", "source", "total", "unique"] +t_columns = ["repository_name", "date", "views", "unique"] TOOLTIPS = [ ("index", "$index"), @@ -27,21 +32,34 @@ main_p = Path(__file__).parent -def load_data(data_dir: str) -> Dict[str, pd.DataFrame]: +def load_data( + test: bool = False, engine: Optional[db.Engine] = None +) -> Dict[str, pd.DataFrame]: """ Load stats CSV as dict of pandas DataFrame - :param data_dir: Path containing merged*.csv :return: Dict of pandas DataFrame """ - p = Path(data_dir) / "data" + if not engine: + engine = db.create_db_and_tables(test=test) dict_df = {} - for stats in stats_type: - stat_file = p / f"{prefix}_{stats}.csv" - dict_df[stats] = pd.read_csv(stat_file) + for stats, m in zip(stats_type, [Traffic, Clone]): + records = [i.dict() for i in db.query_all(engine, m)] + if records: + dict_df[stats] = pd.DataFrame.from_records(records, index="id") + else: + log.warning(f"[bold red]No data in {stats} table!") + names = [] + if stats == "clone": + names = c_columns + elif stats == "traffic": + names = t_columns + elif stats == "referrer": + names = r_columns + dict_df[stats] = pd.DataFrame(columns=names) return dict_df @@ -64,8 +82,8 @@ def get_date_range(df_list: List[pd.DataFrame]) -> Optional[Tuple[dt, dt]]: if len(x_min) > 0: return min(x_min) - td(days=1), max(x_max) + td(days=1) - else: - return None + else: # pragma: no cover + return def date_subplots( @@ -211,19 +229,18 @@ def user_readme(username: str, token: str = None) -> str: def make_plots( username: str, - data_dir: str, out_dir: str, csv_file: str, symlink: bool = False, token: str = "", include_repos: str = "", exclude_repos: str = "", + test: bool = False, ): """ Generate HTML pages containing Bokeh plots :param username: GitHub username or organization - :param data_dir: Path to working folder. CSVs are under a 'data' sub-folder :param out_dir: Location of outputted HTML :param csv_file: CSV file containing user or organization repository list :param symlink: Symbolic link styles assets instead of copy. Default: copy @@ -232,6 +249,7 @@ def make_plots( Ignore csv_file inputs. Comma separated for multiples :param exclude_repos: Repositories to exclude from csv_file list. Comma separated for more than one + :param test: For CI testing """ if include_repos and exclude_repos: @@ -244,7 +262,7 @@ def make_plots( (~repository_df["fork"]) & (~repository_df["archived"]) ] - dict_df = load_data(data_dir) + dict_df = load_data(test=test) # Add repo folder for all static repo pages p_repos = Path(out_dir) / "repos" diff --git a/scripts/gts_run_all_repos b/scripts/gts_run_all_repos index 5f2c3c5..0798dcf 100755 --- a/scripts/gts_run_all_repos +++ b/scripts/gts_run_all_repos @@ -6,6 +6,7 @@ import pandas as pd from github_stats_pages import gts_run from github_stats_pages.logger import app_log as log +from github_stats_pages import db def read_csv(csv_file: str) -> pd.DataFrame: @@ -28,7 +29,7 @@ if __name__ == "__main__": ) args = parser.parse_args() - log.info("[yellow]Running gts_run_all_repos script") + log.info("[bold yellow]Running gts_run_all_repos script") df = read_csv(args.csv_file) @@ -52,14 +53,17 @@ if __name__ == "__main__": gts_run.run_each_repo(args.user, args.token, repo_name, save_csv=True) gts_run.get_top_paths(args.user, args.token, repo_name, save_csv=True) - # Save files in a data folder - log.info("[yellow]Moving records to data/folder") + log.info("[yellow]Moving CSV records to SQLite") + engine = db.create_db_and_tables() + p_cwd = Path.cwd() p_data = p_cwd / "data" - if not p_data.exists(): - p_data.mkdir() - for f in p_cwd.glob("????-??-??-???-???-*stats.csv"): - f.rename(p_data / f.name) + model_names = ["clone", "traffic", "paths", "referrer"] + models = [db.Clone, db.Traffic, db.Paths, db.Referrer] + for datatype, model in zip(model_names, models): + for f in p_cwd.glob(f"????-??-??-???-???-*{datatype}-stats.csv"): + db.migrate_csv(f, model, engine) + f.rename(p_data / f.name) - log.info("[dark_green]gts_run_all_repos script completed!") + log.info("[bold dark_green]gts_run_all_repos script completed!") diff --git a/scripts/make_stats_plots b/scripts/make_stats_plots index 29084af..0649fad 100755 --- a/scripts/make_stats_plots +++ b/scripts/make_stats_plots @@ -17,13 +17,6 @@ if __name__ == "__main__": parser.add_argument( "-t", "--token", default="", help="GitHub API token" ) # Avoids rate limiting - parser.add_argument( - "-d", - "--data-dir", - default=Path.cwd(), - help="""Folder path containing merge CSV files - 'Default: current working directory""", - ) parser.add_argument( "-o", "--out-dir", @@ -53,7 +46,7 @@ if __name__ == "__main__": args = parser.parse_args() vargs = vars(args) - log.info("[yellow]Running make_stats_plots script") + log.info("[bold yellow]Running make_stats_plots script") if args.include_repos and args.exclude_repos: msg = "Cannot provide include_repos and exclude_repos simultaneously!" @@ -61,3 +54,5 @@ if __name__ == "__main__": raise ValueError(msg) stats_plots.make_plots(**vargs) + + log.info("[bold dark_green]make_stats_plots script completed!") diff --git a/scripts/merge_csv b/scripts/merge_csv index 27d4c28..3763e28 100755 --- a/scripts/merge_csv +++ b/scripts/merge_csv @@ -4,15 +4,14 @@ from pathlib import Path import pandas as pd -from github_stats_pages import STATS_TYPES, STATS_COLUMNS, STATS_SORT_DATAFRAME +from github_stats_pages import ( + RENAME_MAPPING, + STATS_TYPES, + STATS_COLUMNS, + STATS_SORT_DATAFRAME, +) from github_stats_pages.logger import app_log as log -rename_mapping = { - "count": "views", # for paths - "unique_visitors/cloners": "unique", # for clones, traffic, referrer - "uniques": "unique", # for paths -} - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( @@ -32,7 +31,7 @@ if __name__ == "__main__": log.info(f"Number of {stat} files found: {len(files)}") for file in files: df = pd.read_csv(file) - df.rename(columns=rename_mapping, inplace=True) + df.rename(columns=RENAME_MAPPING, inplace=True) if stat == "traffic": df.rename(columns={"total": "views"}, inplace=True) diff --git a/scripts/migrate_to_sqlite b/scripts/migrate_to_sqlite new file mode 100755 index 0000000..777af0f --- /dev/null +++ b/scripts/migrate_to_sqlite @@ -0,0 +1,51 @@ +#!/usr/bin/env python +import os +from pathlib import Path + +import pandas as pd + +from github_stats_pages import db +from github_stats_pages.logger import app_log as log +from github_stats_pages.models import Clone, Traffic, Referrer, Paths + +DROP_DUPLICATES_SUBSET = ["date", "repository_name", "site"] + + +if __name__ == "__main__": + log.info("[bold yellow]Running migrate_to_sqlite script") + + sql_path = Path(db.SQLITE_FILE_NAME) + if sql_path.exists(): + log.info("SQLite DB exists!") + engine = db.create_db_and_tables() + + p_data = Path("data") + + # Handle referrer files (missing date field) + referrer_files = list(p_data.glob("*referrer-stats.csv")) + log.info(f"Number of referrer files: {len(referrer_files)}") + referrer_merged_df = pd.DataFrame() + for r_file in referrer_files: + file_date = r_file.name.rstrip("data/")[:10] + r_df = pd.read_csv(r_file) + r_df.insert(loc=0, column="date", value=file_date) + referrer_merged_df = referrer_merged_df.append(r_df, ignore_index=True) + if not referrer_merged_df.empty: + referrer_merged_df.drop_duplicates( + subset=DROP_DUPLICATES_SUBSET, keep="last", inplace=True + ) + log.info(f"Referrer record number: {len(referrer_merged_df)}") + referrer_outfile = p_data / "merged_referrer.csv" + log.info(f"Writing: {referrer_outfile}") + referrer_merged_df.to_csv(referrer_outfile, index=False) + + merged_files = [x for x in sorted(p_data.glob("merged_*.csv"))] + if merged_files: + model_list = [Clone, Paths, Referrer, Traffic] + for file, model in zip(merged_files, model_list): + os.system(f"head -5 {file}") + db.migrate_csv(file, model=model, engine=engine) + else: + log.info("No merged files to migrate!") + + log.info("[bold dark_green]migrate_to_sqlite script completed!") diff --git a/setup.cfg b/setup.cfg index dbf2704..4b5a167 100644 --- a/setup.cfg +++ b/setup.cfg @@ -37,11 +37,13 @@ install_requires = PyGithub == 1.55 tabulate == 0.8.7 rich >= 12.4.1, <13.0.0 + sqlmodel >= 0.0.6, < 1.0.0 scripts = scripts/get_repo_list scripts/gts_run_all_repos scripts/make_stats_plots scripts/merge_csv + scripts/migrate_to_sqlite #package_dir= # =github_stats_pages packages = diff --git a/tests/test_db.py b/tests/test_db.py new file mode 100644 index 0000000..ab54525 --- /dev/null +++ b/tests/test_db.py @@ -0,0 +1,37 @@ +from pathlib import Path + +from github_stats_pages import db + +t_data = Path("tests_data/data/") + + +def test_migrate_csv(test_engine): + # This CSV is already present so no new records exist + db.migrate_csv(t_data / "merged_clone.csv", db.Clone, test_engine) + + # This will add new records and ensure Paths testing + db.migrate_csv(t_data / "merged_paths.csv", db.Paths, test_engine) + + +def test_query(test_engine): + + t_query = db.query( + "github-stats-pages", "2021-02-28", test_engine, db.Clone + ) + assert isinstance(t_query, db.Clone) + + t_query = db.query( + "github-stats-pages", "2021-02-28", test_engine, db.Traffic + ) + assert isinstance(t_query, db.Traffic) + + # This returns a None result + assert not db.query( + "github-stats-pages", "2020-01-01", test_engine, db.Clone + ) + + +def test_query_all(test_engine): + + t_query = db.query_all(test_engine, db.Clone) + assert isinstance(t_query, list) diff --git a/tests/test_stats_plots.py b/tests/test_stats_plots.py index 29138df..e2a56a0 100644 --- a/tests/test_stats_plots.py +++ b/tests/test_stats_plots.py @@ -9,8 +9,8 @@ tests_data_folder = Path("tests_data") -def test_load_data(): - dict_df = stats_plots.load_data(tests_data_folder) +def test_load_data(test_engine): + dict_df = stats_plots.load_data(test=True, engine=test_engine) assert isinstance(dict_df, dict) @@ -33,9 +33,9 @@ def html_check(input_list: list, exists=True): d0 = { "username": username, "token": token, - "data_dir": tests_data_folder, "out_dir": tests_data_folder, "csv_file": tests_data_folder / "repository.csv", + "test": True, } html_list = [ "index.html", diff --git a/tests_data/data/merged_paths.csv b/tests_data/data/merged_paths.csv new file mode 100644 index 0000000..74ff2cf --- /dev/null +++ b/tests_data/data/merged_paths.csv @@ -0,0 +1,79 @@ +date,path,title,views,unique +2021-05-13,/astrochun/Evolution-of-Galaxies,astrochun/Evolution-of-Galaxies: Research with Dr. Chun Ly. I am working with...,23,3 +2021-05-13,/astrochun/Evolution-of-Galaxies/blob/master/Analysis/emission_line_fit.py,Evolution-of-Galaxies/emission_line_fit.py at master · astrochun/Evolution-of...,4,2 +2021-05-13,/astrochun/Evolution-of-Galaxies/issues,Issues · astrochun/Evolution-of-Galaxies,10,3 +2021-05-13,/astrochun/Evolution-of-Galaxies/issues/55,Bug with saving fitting results · Issue #55 · astrochun/Evolution-of-Galaxies,3,2 +2021-05-13,/astrochun/Evolution-of-Galaxies/pull/56,hotfix/v0.8.2 by astrochun · Pull Request #56 · astrochun/Evolution-of-Galaxies,6,2 +2021-05-13,/astrochun/Evolution-of-Galaxies/pull/56/files,hotfix/v0.8.2 by astrochun · Pull Request #56 · astrochun/Evolution-of-Galaxies,4,2 +2021-05-13,/astrochun/Evolution-of-Galaxies/pull/57,Feature/53 windows uname by astrochun · Pull Request #57 · astrochun/Evolutio...,3,2 +2021-05-13,/astrochun/Evolution-of-Galaxies/pulls,Pull requests · astrochun/Evolution-of-Galaxies,19,3 +2021-05-13,/astrochun/Evolution-of-Galaxies/tree/develop,astrochun/Evolution-of-Galaxies at develop,4,2 +2021-05-13,/astrochun/Evolution-of-Galaxies/tree/master/Analysis,Evolution-of-Galaxies/Analysis at master · astrochun/Evolution-of-Galaxies,5,3 +2021-05-13,/astrochun/GNIRSLongSlit,GitHub - astrochun/GNIRSLongSlit: Python 2.7 codes to reduce Longslit data fr...,2,2 +2021-05-13,/astrochun/MMTtools,GitHub - astrochun/MMTtools: A set of Python 2.7 and 3.x codes to use with da...,2,2 +2021-05-13,/astrochun/Metallicity_Stack_Commons,astrochun/Metallicity_Stack_Commons: Set of common codes used in metallicity ...,19,3 +2021-05-13,/astrochun/Metallicity_Stack_Commons/blob/master/Metallicity_Stack_Commons/__init__.py,Metallicity_Stack_Commons/__init__.py at master · astrochun/Metallicity_Stack...,5,2 +2021-05-13,/astrochun/Metallicity_Stack_Commons/issues,Issues · astrochun/Metallicity_Stack_Commons,18,3 +2021-05-13,/astrochun/Metallicity_Stack_Commons/issues/114,Bug: Change HbHgHd_fit to have same y-axis scale · Issue #114 · astrochun/Met...,4,2 +2021-05-13,/astrochun/Metallicity_Stack_Commons/pull/115,hotfix/1.4.6 by astrochun · Pull Request #115 · astrochun/Metallicity_Stack_C...,4,2 +2021-05-13,/astrochun/Metallicity_Stack_Commons/pull/117,hotfix/v1.4.7 by Reagen · Pull Request #117 · astrochun/Metallicity_Stack_Com...,4,2 +2021-05-13,/astrochun/Metallicity_Stack_Commons/pull/118,hotfix/1.4.7 by Reagen · Pull Request #118 · astrochun/Metallicity_Stack_Comm...,11,2 +2021-05-13,/astrochun/Metallicity_Stack_Commons/pulls,Pull requests · astrochun/Metallicity_Stack_Commons,28,3 +2021-05-13,/astrochun/Metallicity_Stack_Commons/tree/master/Metallicity_Stack_Commons,Metallicity_Stack_Commons/Metallicity_Stack_Commons at master · astrochun/Met...,9,3 +2021-05-13,/astrochun/Metallicity_Stack_Commons/tree/master/Metallicity_Stack_Commons/analysis,Metallicity_Stack_Commons/Metallicity_Stack_Commons/analysis at master · astr...,4,2 +2021-05-13,/astrochun/PyMontage,GitHub - astrochun/PyMontage: Python 2.7 scripts to running the IPAC Montage ...,1,1 +2021-05-13,/astrochun/Zcalbase_gal,astrochun/Zcalbase_gal: Python 3.x codes for Metallicity Calibration Database...,18,2 +2021-05-13,/astrochun/Zcalbase_gal/blob/master/analysis/deep2_r23_o32/zoom_and_gauss_general.py,Zcalbase_gal/zoom_and_gauss_general.py at master · astrochun/Zcalbase_gal,3,1 +2021-05-13,/astrochun/Zcalbase_gal/issues,Issues · astrochun/Zcalbase_gal,13,2 +2021-05-13,/astrochun/Zcalbase_gal/issues/100,Bug: Incorrect use of normalization for fits · Issue #100 · astrochun/Zcalbas...,5,2 +2021-05-13,/astrochun/Zcalbase_gal/pull/99,hotfix/v0.13.1 by Reagen · Pull Request #99 · astrochun/Zcalbase_gal,14,2 +2021-05-13,/astrochun/Zcalbase_gal/pull/99/commits,hotfix/v0.13.1 by Reagen · Pull Request #99 · astrochun/Zcalbase_gal,4,2 +2021-05-13,/astrochun/Zcalbase_gal/pull/99/files,hotfix/v0.13.1 by Reagen · Pull Request #99 · astrochun/Zcalbase_gal,9,1 +2021-05-13,/astrochun/Zcalbase_gal/pulls,Pull requests · astrochun/Zcalbase_gal,21,2 +2021-05-13,/astrochun/Zcalbase_gal/tree/master/analysis,Zcalbase_gal/analysis at master · astrochun/Zcalbase_gal,4,1 +2021-05-13,/astrochun/Zcalbase_gal/tree/master/analysis/deep2_r23_o32,Zcalbase_gal/analysis/deep2_r23_o32 at master · astrochun/Zcalbase_gal,4,1 +2021-05-13,/astrochun/academic-ads-bibtex/blob/main/setup.py,academic-ads-bibtex/setup.py at main · astrochun/academic-ads-bibtex,1,1 +2021-05-13,/astrochun/figshare_autosync_check,astrochun/figshare_autosync_check,6,1 +2021-05-13,/astrochun/figshare_autosync_check/actions,Actions · astrochun/figshare_autosync_check,3,1 +2021-05-13,/astrochun/figshare_autosync_check/actions/runs/415969321,Add data.csv 2020-08-15 data · astrochun/figshare_autosync_check@1ecfbfa,2,1 +2021-05-13,/astrochun/figshare_autosync_check/actions/workflows/create_release.yml,Actions · astrochun/figshare_autosync_check,3,1 +2021-05-13,/astrochun/figshare_autosync_check/blob/main/.github/workflows/create_release.yml,figshare_autosync_check/create_release.yml at main · astrochun/figshare_autos...,4,1 +2021-05-13,/astrochun/figshare_autosync_check/commit/9829445961cfeef4b203925a3d5dcb073341863d,Add .git dot files · astrochun/figshare_autosync_check@9829445,1,1 +2021-05-13,/astrochun/figshare_autosync_check/commits/main,Commits · astrochun/figshare_autosync_check,1,1 +2021-05-13,/astrochun/figshare_autosync_check/tree/main/.github/workflows,figshare_autosync_check/.github/workflows at main · astrochun/figshare_autosy...,5,1 +2021-05-13,/astrochun/github-stats,astrochun/github-stats: My GitHub stats,14,4 +2021-05-13,/astrochun/github-stats-pages,astrochun/github-stats-pages: Retrieve statistics for a user's repositories a...,37,2 +2021-05-13,/astrochun/github-stats-pages/actions,Actions · astrochun/github-stats-pages,17,1 +2021-05-13,/astrochun/github-stats-pages/graphs/traffic,Traffic · astrochun/github-stats-pages,5,1 +2021-05-13,/astrochun/github-stats-pages/issues,Issues · astrochun/github-stats-pages,24,2 +2021-05-13,/astrochun/github-stats-pages/issues/52,Add popular content · Issue #52 · astrochun/github-stats-pages,10,2 +2021-05-13,/astrochun/github-stats-pages/pull/55,Feature: Add GitHub repo description on repo pages by astrochun · Pull Reques...,8,1 +2021-05-13,/astrochun/github-stats-pages/pull/56,Switch over to use PyGitHub for more capabilities by astrochun · Pull Request...,8,1 +2021-05-13,/astrochun/github-stats-pages/pulls,Pull requests · astrochun/github-stats-pages,20,1 +2021-05-13,/astrochun/github-stats-pages/pulse,Pulse · astrochun/github-stats-pages,6,2 +2021-05-13,/astrochun/github-stats-pages/releases,Releases · astrochun/github-stats-pages,26,2 +2021-05-13,/astrochun/github-stats/blob/gh-pages/repos/Extract1D.html,github-stats/Extract1D.html at gh-pages · astrochun/github-stats,1,1 +2021-05-13,/astrochun/github-stats/blob/gh-pages/repositories.html,github-stats/repositories.html at gh-pages · astrochun/github-stats,1,1 +2021-05-13,/astrochun/github-stats/blob/main/.github/workflows/gh-pages-deploy.yml,github-stats/gh-pages-deploy.yml at main · astrochun/github-stats,1,1 +2021-05-13,/astrochun/github-stats/network,Network Graph · astrochun/github-stats,1,1 +2021-05-13,/astrochun/github-stats/network/dependencies,Dependencies · astrochun/github-stats,1,1 +2021-05-13,/astrochun/github-stats/tree/gh-pages,astrochun/github-stats at gh-pages,4,2 +2021-05-13,/astrochun/github-stats/tree/gh-pages/repos,github-stats/repos at gh-pages · astrochun/github-stats,2,1 +2021-05-13,/astrochun/github-stats/tree/gh-pages/styles,github-stats/styles at gh-pages · astrochun/github-stats,2,1 +2021-05-13,/astrochun/github-stats/tree/main/.github/workflows,github-stats/.github/workflows at main · astrochun/github-stats,3,1 +2021-05-13,/astrochun/site-hugo-academic,astrochun/site-hugo-academic,2,2 +2021-05-13,/astrochun/test-github-stats,astrochun/test-github-stats: Testing of GitHub action for GitHub pages deploy...,2,1 +2021-05-13,/astrochun/test-github-stats/blob/main/data/2021-01-17-00h-46m-clone-stats.csv,test-github-stats/2021-01-17-00h-46m-clone-stats.csv at main · astrochun/test...,1,1 +2021-05-13,/astrochun/test-github-stats/blob/main/data/2021-01-17-00h-46m-referrer-stats.csv,test-github-stats/2021-01-17-00h-46m-referrer-stats.csv at main · astrochun/t...,3,1 +2021-05-13,/astrochun/test-github-stats/blob/main/data/2021-01-17-00h-46m-traffic-stats.csv,test-github-stats/2021-01-17-00h-46m-traffic-stats.csv at main · astrochun/te...,2,1 +2021-05-13,/astrochun/test-github-stats/blob/main/data/2021-01-17-00h-52m-clone-stats.csv,test-github-stats/2021-01-17-00h-52m-clone-stats.csv at main · astrochun/test...,1,1 +2021-05-13,/astrochun/test-github-stats/blob/main/data/2021-02-20-08h-55m-referrer-stats.csv,test-github-stats/2021-02-20-08h-55m-referrer-stats.csv at main · astrochun/t...,1,1 +2021-05-13,/astrochun/test-github-stats/blob/main/data/2021-02-26-22h-11m-traffic-stats.csv,test-github-stats/2021-02-26-22h-11m-traffic-stats.csv at main · astrochun/te...,1,1 +2021-05-13,/astrochun/test-github-stats/commit/27a681b3ce456710f7db46606566f000e144d6f4,Update data: 2021-05-11 · astrochun/test-github-stats@27a681b,1,1 +2021-05-13,/astrochun/test-github-stats/commit/de2412de46852a7acdfad4db7a10bbd956ef8448,Update data: 2021-05-12 · astrochun/test-github-stats@de2412d,2,1 +2021-05-13,/astrochun/test-github-stats/tree/main/data,test-github-stats/data at main · astrochun/test-github-stats,3,1 +2021-05-13,/astrochun/test-stats,astrochun/test-stats,2,1 +2021-05-13,/astrochun/test-stats/actions,Actions · astrochun/test-stats,1,1 +2021-05-13,/astrochun/test-stats/tree/gh-pages,astrochun/test-stats at gh-pages,1,1 +2021-05-13,/astrochun/test-stats/tree/gh-pages/repos,test-stats/repos at gh-pages · astrochun/test-stats,1,1 +2021-05-13,/astrochun/voxcharta-my-voting-record,GitHub - astrochun/voxcharta-my-voting-record: A Python tool to extract infor...,1,1 diff --git a/tests_data/sqlite3.db b/tests_data/sqlite3.db new file mode 100644 index 0000000..b4540e6 Binary files /dev/null and b/tests_data/sqlite3.db differ