diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 81959dfbd..8c5e119aa 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -4,7 +4,6 @@ on: paths: - '**.yml' - '**.toml' - - '**.ini' - '**.py' - '**.json' - '**.csv' @@ -14,7 +13,6 @@ on: paths: - '**.yml' - '**.toml' - - '**.ini' - '**.py' - '**.json' - '**.csv' @@ -33,24 +31,14 @@ jobs: uses: actions/checkout@v4 with: persist-credentials: false - - name: Setup Miniconda using Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v3 + - name: Install uv + uses: astral-sh/setup-uv@v4 with: - miniconda-version: "latest" - auto-update-conda: true - activate-environment: ogcore-dev - environment-file: environment.yml python-version: ${{ matrix.python-version }} - auto-activate-base: false - - name: Build - shell: bash -l {0} - run: | - pip install -e . + - name: Install package and dependencies + run: uv sync --extra dev - name: Test - shell: bash -l {0} - working-directory: ./ - run: | - python -m pytest -m "not local and not benchmark" --cov=./ --cov-report=xml + run: uv run python -m pytest -m "not local and not benchmark" --cov=./ --cov-report=xml - name: Upload coverage to Codecov if: matrix.os == 'ubuntu-latest' && contains(github.repository, 'PSLmodels/OG-Core') uses: codecov/codecov-action@v4 diff --git a/.github/workflows/check_black.yml b/.github/workflows/check_black.yml deleted file mode 100644 index ca27c0693..000000000 --- a/.github/workflows/check_black.yml +++ /dev/null @@ -1,15 +0,0 @@ - -name: Check Black formatting - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - uses: psf/black@stable - with: - options: "-l 79 --check" - src: "." \ No newline at end of file diff --git a/.github/workflows/check_ruff.yml b/.github/workflows/check_ruff.yml new file mode 100644 index 000000000..78e94c496 --- /dev/null +++ b/.github/workflows/check_ruff.yml @@ -0,0 +1,14 @@ +name: Check Ruff formatting and linting + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 + - name: Check formatting with Ruff + run: uvx ruff format --check . + - name: Check linting with Ruff + run: uvx ruff check . diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index dc91edc73..431198dcd 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -21,21 +21,18 @@ jobs: with: persist-credentials: false - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v3 + - name: Install uv + uses: astral-sh/setup-uv@v4 with: - miniconda-version: "latest" - activate-environment: ogcore-dev - environment-file: environment.yml python-version: "3.13" - auto-activate-base: false + + - name: Install package and dependencies + run: uv sync --extra dev --extra docs - name: Build # Build Jupyter Book - shell: bash -l {0} run: | - pip install -e . - python -m ipykernel install --user --name=ogcore-dev - make build-docs + uv run python -m ipykernel install --user --name=ogcore-dev + uv run make build-docs - name: Deploy uses: JamesIves/github-pages-deploy-action@v4 diff --git a/.github/workflows/docs_check.yml b/.github/workflows/docs_check.yml index 15a4dd55e..cbc0505e6 100644 --- a/.github/workflows/docs_check.yml +++ b/.github/workflows/docs_check.yml @@ -19,18 +19,15 @@ jobs: with: persist-credentials: false - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v3 + - name: Install uv + uses: astral-sh/setup-uv@v4 with: - miniconda-version: "latest" - activate-environment: ogcore-dev - environment-file: environment.yml python-version: "3.13" - auto-activate-base: false + + - name: Install package and dependencies + run: uv sync --extra dev --extra docs - name: Build # Build Jupyter Book - shell: bash -l {0} run: | - pip install -e . - python -m ipykernel install --user --name=ogcore-dev - make build-docs + uv run python -m ipykernel install --user --name=ogcore-dev + uv run make build-docs diff --git a/.github/workflows/publish_to_pypi.yml b/.github/workflows/publish_to_pypi.yml index 8c66f73de..6f074c55a 100644 --- a/.github/workflows/publish_to_pypi.yml +++ b/.github/workflows/publish_to_pypi.yml @@ -13,12 +13,12 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 + - name: Install uv + uses: astral-sh/setup-uv@v4 with: python-version: "3.13" - name: Build package - run: make pip-package + run: uv build - name: Publish a Python distribution to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: diff --git a/Makefile b/Makefile index cbafe5595..b8e0b0d95 100644 --- a/Makefile +++ b/Makefile @@ -71,10 +71,9 @@ build-docs: @cd ./docs ; python make_params.py; python make_vars.py; jb build ./book format: - black . -l 79 + ruff format . + ruff check . --fix linecheck . --fix pip-package: - pip install wheel - pip install setuptools - python setup.py sdist bdist_wheel + uv build diff --git a/README.md b/README.md index c7c57e84f..0912f17b6 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@ | | | | --- | --- | | Org | [![PSL cataloged](https://img.shields.io/badge/PSL-cataloged-a0a0a0.svg)](https://www.PSLmodels.org) [![OS License: CCO-1.0](https://img.shields.io/badge/OS%20License-CCO%201.0-yellow)](https://github.com/PSLmodels/OG-Core/blob/master/LICENSE) [![Jupyter Book Badge](https://raw.githubusercontent.com/jupyter-book/jupyter-book/next/docs/media/images/badge.svg)](https://pslmodels.github.io/OG-Core/) | -| Package | [![Python 3.12](https://img.shields.io/badge/python-3.12-blue.svg)](https://www.python.org/downloads/release/python-3129/) [![Python 3.13](https://img.shields.io/badge/python-3.13-blue.svg)](https://www.python.org/downloads/release/python-3137/) [![PyPI Latest Release](https://img.shields.io/pypi/v/ogcore.svg)](https://pypi.org/project/ogcore/) [![PyPI Downloads](https://img.shields.io/pypi/dm/ogcore.svg?label=PyPI%20downloads)](https://pypi.org/project/ogcore/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) | -| Testing | ![example event parameter](https://github.com/PSLmodels/OG-Core/actions/workflows/build_and_test.yml/badge.svg?branch=master) ![example event parameter](https://github.com/PSLmodels/OG-Core/actions/workflows/deploy_docs.yml/badge.svg?branch=master) ![example event parameter](https://github.com/PSLmodels/OG-Core/actions/workflows/check_black.yml/badge.svg?branch=master) [![Codecov](https://codecov.io/gh/PSLmodels/OG-Core/branch/master/graph/badge.svg)](https://codecov.io/gh/PSLmodels/OG-Core) | +| Package | [![Python 3.12](https://img.shields.io/badge/python-3.12-blue.svg)](https://www.python.org/downloads/release/python-3129/) [![Python 3.13](https://img.shields.io/badge/python-3.13-blue.svg)](https://www.python.org/downloads/release/python-3137/) [![PyPI Latest Release](https://img.shields.io/pypi/v/ogcore.svg)](https://pypi.org/project/ogcore/) [![PyPI Downloads](https://img.shields.io/pypi/dm/ogcore.svg?label=PyPI%20downloads)](https://pypi.org/project/ogcore/) [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) | +| Testing | ![example event parameter](https://github.com/PSLmodels/OG-Core/actions/workflows/build_and_test.yml/badge.svg?branch=master) ![example event parameter](https://github.com/PSLmodels/OG-Core/actions/workflows/deploy_docs.yml/badge.svg?branch=master) ![example event parameter](https://github.com/PSLmodels/OG-Core/actions/workflows/check_ruff.yml/badge.svg?branch=master) [![Codecov](https://codecov.io/gh/PSLmodels/OG-Core/branch/master/graph/badge.svg)](https://codecov.io/gh/PSLmodels/OG-Core) | OG-Core is an overlapping-generations (OG) model core theory, logic, and solution method algorithms that allow for dynamic general equilibrium analysis of fiscal policy. OG-Core provides a general framework and is a dependency of several country-specific OG model caliibrations, as listed in the table belowsuch as [OG-USA](https://github.com/PSLmodels/OG-USA) and [OG-UK](https://github.com/PSLmodels/OG-UK). The model output includes changes in macroeconomic aggregates (GDP, investment, consumption), wages, interest rates, and the stream of tax revenues over time. Regularly updated documentation of the model theory--its output, and solution method--and the Python API is available [here](https://pslmodels.github.io/OG-Core). @@ -24,13 +24,12 @@ The model is constantly under development, and model components could change sig ## Using/contributing to OG-Core -There are two primary methods for installing and running OG-Core on your computer locally. The first and simplest method is to download the most recent `ogcore` Python package from the Python Package Index ([PyPI.org]()). A second option is to fork and clone the most recent version of OG-Core from its GitHub repository and create the conda environment for the `ogcore` package. We detail both of these methods below. +There are two primary methods for installing and running OG-Core on your computer locally. The first and simplest method is to download the most recent `ogcore` Python package from the Python Package Index ([PyPI.org](https://pypi.org/project/ogcore/)). A second option is to fork and clone the most recent version of OG-Core from its GitHub repository and install the `ogcore` package with its development dependencies using `uv`. We detail both of these methods below. ### Installing and Running OG-Core from Python Package Index (PyPI.org) -* Open your terminal (or Conda command prompt), and make sure you have the most recent version of `pip` (the Python Index Package manager) by typing on a Unix/macOS machine `python3 -m pip install --upgrade pip` or on a Windows machine `py -m pip install --upgrade pip`. -* Install the [`ogcore`](https://pypi.org/project/ogcore/) package from the Python Package Index by typing `pip install ogcore`. +* Open your terminal and install the [`ogcore`](https://pypi.org/project/ogcore/) package from the Python Package Index by typing `pip install ogcore`. * Navigate to a folder `./YourFolderName/` where you want to save scripts to run OG-Core and output from the simulations in those scripts. * Save the python script [`run_ogcore_example.py`](https://github.com/PSLmodels/OG-Core/blob/master/run_examples/run_ogcore_example.py) from the OG-Core GitHub repository in the folder where you are working on your local machine `./YourFolderName/run_ogcore_example.py`. * Run the model with an example reform from terminal/command prompt by typing `python run_ogcore_example.py` @@ -60,11 +59,9 @@ The CSV output file `./ogcore_example_output.csv` can be compared to the [`./run ### Installing and Running OG-Core from GitHub repository -* Install the [Anaconda distribution](https://www.anaconda.com/distribution/) of Python +* Install [`uv`](https://docs.astral.sh/uv/) by following the [installation instructions](https://docs.astral.sh/uv/getting-started/installation/) for your platform (or simply run `pip install uv`) * Clone this repository to a directory on your computer -* From the terminal (or Conda command prompt), navigate to the directory to which you cloned this repository and run `conda env create -f environment.yml` -* Then, `conda activate ogcore-dev` -* Then install by `pip install -e .` +* From the terminal, navigate to the directory to which you cloned this repository and run `uv pip install -e ".[dev]"` to install the package and all development dependencies * Navigate to `./run_examples` * Run the model with an example reform from terminal/command prompt by typing `python run_ogcore_example.py` * You can adjust the `./run_examples/run_ogcore_example.py` script by modifying model parameters specified in the `og_spec` dictionary. diff --git a/docs/book/content/theory/images/SS_images.py b/docs/book/content/theory/images/SS_images.py index d65c0e816..552153065 100644 --- a/docs/book/content/theory/images/SS_images.py +++ b/docs/book/content/theory/images/SS_images.py @@ -10,8 +10,6 @@ import os import numpy as np import matplotlib.pyplot as plt -from matplotlib.ticker import MultipleLocator -from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm """ diff --git a/docs/make_params.py b/docs/make_params.py index 05761a204..5afacdbda 100644 --- a/docs/make_params.py +++ b/docs/make_params.py @@ -1,6 +1,5 @@ import numpy as np import pandas as pd -from collections import OrderedDict import ogcore import os import sys diff --git a/docs/make_vars.py b/docs/make_vars.py index c6c7e79d0..4a8bd5e9f 100644 --- a/docs/make_vars.py +++ b/docs/make_vars.py @@ -1,6 +1,5 @@ import numpy as np import pandas as pd -from collections import OrderedDict import ogcore import os import sys diff --git a/environment.yml b/environment.yml index 6062afe39..375342c10 100644 --- a/environment.yml +++ b/environment.yml @@ -5,7 +5,6 @@ dependencies: - python>3.11, <3.14 - numpy - ipython -- setuptools - scipy>=1.7.1 - pandas>=1.2.5 - numba @@ -24,11 +23,10 @@ dependencies: - pytest>=6.0 - pytest-cov - pytest-xdist -- pylint - coverage - requests - openpyxl>=3.1.2 -- black>=24.1.1 +- ruff - pip - pip: - pygam diff --git a/examples/multi_industry_example.py b/examples/multi_industry_example.py index 1c4975515..92bd34aba 100644 --- a/examples/multi_industry_example.py +++ b/examples/multi_industry_example.py @@ -3,7 +3,6 @@ """ # import modules -from asyncio import base_events import multiprocessing from distributed import Client import time diff --git a/examples/run_ogcore_example.py b/examples/run_ogcore_example.py index 6b883abbe..2f1a555e1 100644 --- a/examples/run_ogcore_example.py +++ b/examples/run_ogcore_example.py @@ -14,8 +14,6 @@ from ogcore.parameters import Specifications from ogcore.constants import REFORM_DIR, BASELINE_DIR from ogcore.utils import safe_read_pickle -import ogcore -from ogcore import SS import matplotlib.pyplot as plt # Use a custom matplotlib style file for plots diff --git a/ogcore/SS.py b/ogcore/SS.py index f7239f90c..bd0338bc3 100644 --- a/ogcore/SS.py +++ b/ogcore/SS.py @@ -1,12 +1,9 @@ # imports import numpy as np import scipy.optimize as opt -from dask import delayed, compute -import dask.multiprocessing from ogcore import tax, pensions, household, firm, utils, fiscal from ogcore import aggregates as aggr from ogcore.constants import SHOW_RUNTIME, DEV_FACTOR_LIST -from ogcore import config import os import warnings import logging @@ -39,7 +36,8 @@ def euler_equation_solver(guesses, *args): Args: guesses (Numpy array): initial guesses for b and n, length 2S - args (tuple): tuple of arguments (r, w, p_tilde, p_i, bq, TR, factor, j, p) + args (tuple): tuple of arguments (r, w, p_tilde, p_i, bq, TR, + factor, j, p) r (scalar): real interest rate w (scalar): real wage rate p_tilde (scalar): composite good price @@ -290,7 +288,6 @@ def inner_loop(outer_loop_vars, p, client): # from dask.base import dask_sizeof if client: - # Before scattering, temporarily remove unpicklable schema objects schema_backup = {} for attr in ["_defaults_schema", "_validator_schema", "sel"]: @@ -298,7 +295,7 @@ def inner_loop(outer_loop_vars, p, client): schema_backup[attr] = getattr(p, attr) try: delattr(p, attr) - except: + except Exception: pass # Scatter the parameters @@ -308,7 +305,7 @@ def inner_loop(outer_loop_vars, p, client): for attr, value in schema_backup.items(): try: setattr(p, attr, value) - except: + except Exception: pass # Launch in parallel with submit (or map) @@ -1148,7 +1145,8 @@ def SS_solver( euler_savings = euler_errors[: p.S, :] euler_labor_leisure = euler_errors[p.S :, :] logging.info( - f"Maximum error in labor FOC = {np.absolute(euler_labor_leisure).max()}" + "Maximum error in labor FOC = " + f"{np.absolute(euler_labor_leisure).max()}" ) logging.info( f"Maximum error in savings FOC = {np.absolute(euler_savings).max()}" @@ -1437,9 +1435,7 @@ def run_SS(p, client=None): if ss_solutions["b_sp1"].shape == ( p.S, p.J, - ) and np.squeeze( - ss_solutions["Y_m"].shape - ) == (p.M): + ) and np.squeeze(ss_solutions["Y_m"].shape) == (p.M): logging.info("Using previous solutions for SS") ( b_guess, @@ -1509,14 +1505,15 @@ def run_SS(p, client=None): logging.warning("KeyError: previous solutions for SS not found") use_new_guesses = True if p.baseline or not p.reform_use_baseline_solution or use_new_guesses: - # Loop over initial guesses of r and TR until find a solution or until have - # gone through all guesses. This should usually solve in the first guess + # Loop over initial guesses of r and TR until find a solution + # or until have gone through all guesses. This should usually + # solve in the first guess SS_solved = False k = 0 while not SS_solved and k < len(DEV_FACTOR_LIST) - 1: for k, v in enumerate(DEV_FACTOR_LIST): logging.info( - f"SS using initial guess factors for r and TR of " + "SS using initial guess factors for r and TR of " + f"{v[0]} and {v[1]} respectively." ) guesses, b_guess, n_guess = SS_initial_guesses( diff --git a/ogcore/TPI.py b/ogcore/TPI.py index 7b38ad68f..6220b6e7b 100644 --- a/ogcore/TPI.py +++ b/ogcore/TPI.py @@ -13,12 +13,9 @@ import numpy as np import pickle import scipy.optimize as opt -from dask import delayed, compute -import dask.multiprocessing from ogcore import tax, utils, household, firm, fiscal, pensions from ogcore import aggregates as aggr from ogcore.constants import SHOW_RUNTIME -from ogcore import config import os import warnings import logging @@ -267,7 +264,8 @@ def twist_doughnut( tr (Numpy array): government transfer amount theta (Numpy array): retirement replacement rates, length J factor (scalar): scaling factor converting model units to dollars - ubi (Numpy array): length remaining periods of life UBI payout to household + ubi (Numpy array): length remaining periods of life UBI payout + to household j (int): index of ability type s (int): years of life remaining t (int): model period @@ -776,7 +774,7 @@ def run_TPI(p, client=None): schema_backup[attr] = getattr(p, attr) try: delattr(p, attr) - except: + except Exception: pass # Scatter the parameters @@ -786,7 +784,7 @@ def run_TPI(p, client=None): for attr, value in schema_backup.items(): try: setattr(p, attr, value) - except: + except Exception: pass # TPI loop @@ -824,7 +822,8 @@ def run_TPI(p, client=None): if not_done: # Some futures didn't complete in time raise TimeoutError( - f"{len(not_done)} futures did not complete within 600 seconds" + f"{len(not_done)} futures did not complete" + " within 600 seconds" ) results = client.gather(futures) except Exception as e: @@ -1365,7 +1364,7 @@ def run_TPI(p, client=None): I_d = aggr.get_I( bmat_splus1[: p.T], K_d[1 : p.T + 1], K_d[: p.T], p, "TPI" ) - I = aggr.get_I(bmat_splus1[: p.T], K[1 : p.T + 1], K[: p.T], p, "TPI") + I = aggr.get_I(bmat_splus1[: p.T], K[1 : p.T + 1], K[: p.T], p, "TPI") # noqa: E741 # solve resource constraint # foreign debt service costs debt_service_f = fiscal.get_debt_service_f(r_p, D_f) diff --git a/ogcore/__init__.py b/ogcore/__init__.py index 7a6e63ec4..a7bebca9c 100644 --- a/ogcore/__init__.py +++ b/ogcore/__init__.py @@ -2,22 +2,22 @@ Specify what is available to import from the ogcore package. """ -from ogcore.SS import * -from ogcore.TPI import * -from ogcore.aggregates import * -from ogcore.constants import * -from ogcore.elliptical_u_est import * -from ogcore.execute import * -from ogcore.firm import * -from ogcore.fiscal import * -from ogcore.household import * -from ogcore.output_plots import * -from ogcore.output_tables import * -from ogcore.parameter_plots import * -from ogcore.parameter_tables import * -from ogcore.parameters import * -from ogcore.tax import * -from ogcore.txfunc import * -from ogcore.utils import * +from ogcore.SS import * # noqa: F403 +from ogcore.TPI import * # noqa: F403 +from ogcore.aggregates import * # noqa: F403 +from ogcore.constants import * # noqa: F403 +from ogcore.elliptical_u_est import * # noqa: F403 +from ogcore.execute import * # noqa: F403 +from ogcore.firm import * # noqa: F403 +from ogcore.fiscal import * # noqa: F403 +from ogcore.household import * # noqa: F403 +from ogcore.output_plots import * # noqa: F403 +from ogcore.output_tables import * # noqa: F403 +from ogcore.parameter_plots import * # noqa: F403 +from ogcore.parameter_tables import * # noqa: F403 +from ogcore.parameters import * # noqa: F403 +from ogcore.tax import * # noqa: F403 +from ogcore.txfunc import * # noqa: F403 +from ogcore.utils import * # noqa: F403 __version__ = "0.15.4" diff --git a/ogcore/constants.py b/ogcore/constants.py index 7b32e624e..558ad02ac 100644 --- a/ogcore/constants.py +++ b/ogcore/constants.py @@ -170,7 +170,8 @@ r"$\mu_{d,t}$", ], "avg_earn_num_years": [ - "Number of years over which compute average earnings for pension benefit", + "Number of years over which compute average earnings for" + " pension benefit", r"$\texttt{avg\_earn\_num\_years}$", ], "AIME_bkt_1": ["First AIME bracket threshold", r"$\texttt{AIME\_bkt\_1}$"], diff --git a/ogcore/demographics.py b/ogcore/demographics.py index ce8a3e660..13990f692 100644 --- a/ogcore/demographics.py +++ b/ogcore/demographics.py @@ -71,7 +71,8 @@ def get_un_data( else: # if file not exist, prompt user for token try: UN_TOKEN = input( - "Please enter your UN API token (press return if you do not have one): " + "Please enter your UN API token " + "(press return if you do not have one): " ) # write the UN_TOKEN to a file to find in the future with open(os.path.join("un_api_token.txt"), "w") as file: @@ -103,7 +104,7 @@ def get_un_data( else: # Read from UN GH Repo: print( - f"Failed to retrieve population data from UN. Reading " + "Failed to retrieve population data from UN. Reading " + " from https://github.com/EAPD-DRB/Population-Data " + "instead of UN WPP API" ) @@ -138,7 +139,8 @@ def get_un_data( # Do we still want to keep the status code for failures? # print( - # f"Failed to retrieve population data. HTTP status code: {response.status_code}" + # "Failed to retrieve population data. HTTP status code: " + # f"{response.status_code}" # ) # assert False @@ -445,8 +447,9 @@ def get_pop( "47", country_id=country_id, start_year=start_year, - end_year=end_year - + 2, # note go to + 2 because needed to infer immigration for end_year + # note go to + 2 because needed to infer immigration + # for end_year + end_year=end_year + 2, ) # CLean and rebin data for y in range(start_year, end_year + 2): @@ -991,7 +994,8 @@ def get_pop_objs( assert np.allclose(pop_counter_2D, pop_2D) # """" - # CHANGE - in OG-Core, we are implicitly assuming pre-TP rates of mortality, + # CHANGE - in OG-Core, we are implicitly assuming pre-TP rates of + # mortality, # fertility, and immigration are the same as the period 0 rates. # So let's just infer the pre-pop_dist from those. diff --git a/ogcore/firm.py b/ogcore/firm.py index 5d454f679..dec2769d2 100644 --- a/ogcore/firm.py +++ b/ogcore/firm.py @@ -22,9 +22,14 @@ def get_Y(K, K_g, L, p, method, m=-1): .. math:: \hat{Y}_t &= F(\hat{K}_t, \hat{K}_{g,t}, \hat{L}_t) \\ - &\equiv Z_t\biggl[(\gamma)^\frac{1}{\varepsilon}(\hat{K}_t)^\frac{\varepsilon-1}{\varepsilon} + - (\gamma_{g})^\frac{1}{\varepsilon}(\hat{K}_{g,t})^\frac{\varepsilon-1}{\varepsilon} + - (1-\gamma-\gamma_{g})^\frac{1}{\varepsilon}(\hat{L}_t)^\frac{\varepsilon-1}{\varepsilon}\biggr]^\frac{\varepsilon}{\varepsilon-1} + &\equiv Z_t\biggl[ + (\gamma)^\frac{1}{\varepsilon} + (\hat{K}_t)^\frac{\varepsilon-1}{\varepsilon} + + (\gamma_{g})^\frac{1}{\varepsilon} + (\hat{K}_{g,t})^\frac{\varepsilon-1}{\varepsilon} + + (1-\gamma-\gamma_{g})^\frac{1}{\varepsilon} + (\hat{L}_t)^\frac{\varepsilon-1}{\varepsilon} + \biggr]^\frac{\varepsilon}{\varepsilon-1} \quad\forall t Args: @@ -43,7 +48,8 @@ def get_Y(K, K_g, L, p, method, m=-1): if method == "SS": if m is not None: - # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g from prod func + # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g + # from prod func if K_g == 0 and p.epsilon[m] <= 1: gamma_g = 0 K_g = 1 @@ -75,7 +81,8 @@ def get_Y(K, K_g, L, p, method, m=-1): ) ) ** (epsilon / (epsilon - 1)) else: - # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g from prod func + # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g + # from prod func if K_g == 0 and np.any(p.epsilon) <= 1: gamma_g = p.gamma_g gamma_g[p.epsilon <= 1] = 0 @@ -100,7 +107,8 @@ def get_Y(K, K_g, L, p, method, m=-1): Y[epsilon == 1] = Y2[epsilon == 1] else: # TPI case if m is not None: - # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g from prod func + # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g + # from prod func if np.any(K_g == 0) and p.epsilon[m] == 1: gamma_g = 0 K_g[K_g == 0] = 1.0 @@ -132,7 +140,8 @@ def get_Y(K, K_g, L, p, method, m=-1): ) ) ** (epsilon / (epsilon - 1)) else: - # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g from prod func + # Set gamma_g to 0 when K_g=0 and eps=1 to remove K_g + # from prod func if np.any(K_g == 0) and np.any(p.epsilon) == 1: gamma_g = p.gamma_g K_g[K_g == 0] = 1.0 @@ -649,7 +658,7 @@ def solve_L(Y, K, K_g, p, method, m=-1): if K_g == 0: K_g = 1.0 gamma_g = 0 - except: + except Exception: if np.any(K_g == 0): K_g[K_g == 0] = 1.0 gamma_g = 0 @@ -673,7 +682,9 @@ def adj_cost(K, Kp1, p, method): Firm capital adjstment costs ..math:: - \Psi(K_{t}, K_{t+1}) = \frac{\psi}{2}\biggr(\frac{\biggr(\frac{I_{t}}{K_{t}}-\mu\biggl)^{2}}{\frac{I_{t}}{K_{t}}}\biggl) + \Psi(K_{t}, K_{t+1}) = \frac{\psi}{2}\biggr( + \frac{\biggr(\frac{I_{t}}{K_{t}}-\mu\biggl)^{2}} + {\frac{I_{t}}{K_{t}}}\biggl) Args: K (array-like): Current period capital stock diff --git a/ogcore/fiscal.py b/ogcore/fiscal.py index 30bc2bdce..2d9ab2f77 100644 --- a/ogcore/fiscal.py +++ b/ogcore/fiscal.py @@ -23,16 +23,28 @@ def D_G_path(r, dg_fixed_values, p): .. math:: \begin{split} - &e^{g_y}\left(1 + \tilde{g}_{n,t+1}\right)\hat{D}_{t+1} + \hat{Rev}_t = (1 + r_{gov,t})\hat{D}_t + \hat{G}_t + \hat{TR}_t + \hat{UBI}_t \quad\forall t \\ + &e^{g_y}\left(1 + \tilde{g}_{n,t+1}\right)\hat{D}_{t+1} + + \hat{Rev}_t = (1 + r_{gov,t})\hat{D}_t + \hat{G}_t + + \hat{TR}_t + \hat{UBI}_t \quad\forall t \\ &\hat{G}_t = g_{g,t}\:\alpha_{g}\: \hat{Y}_t \\ &\text{where}\quad g_{g,t} = \begin{cases} - 1 \qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\:\:\text{if}\quad t < T_{G1} \\ - \frac{e^{g_y}\left(1 + \tilde{g}_{n,t+1}\right)\left[\rho_{d}\alpha_{D}\hat{Y}_{t} + (1-\rho_{d})\hat{D}_{t}\right] - (1+r_{gov,t})\hat{D}_{t} - \hat{TR}_{t} - \hat{UBI}_t + \hat{Rev}_{t}}{\alpha_g \hat{Y}_t} \quad\text{if}\quad T_{G1}\leq t ltilde).any(): logging.info( diff --git a/ogcore/output_plots.py b/ogcore/output_plots.py index 09c8f2adb..d3bc2836b 100644 --- a/ogcore/output_plots.py +++ b/ogcore/output_plots.py @@ -2,7 +2,6 @@ import os import matplotlib.pyplot as plt import matplotlib.ticker as mticker -from mpl_toolkits.mplot3d import Axes3D import matplotlib from ogcore.constants import ( VAR_LABELS, @@ -83,9 +82,9 @@ def plot_aggregates( v, reform_tpi, reform_params ) for i, v in enumerate(var_list): - assert ( - v in VAR_LABELS.keys() - ), "{} is not in the list of variable labels".format(v) + assert v in VAR_LABELS.keys(), ( + "{} is not in the list of variable labels".format(v) + ) if plot_type == "pct_diff": if v in ["r_gov", "r", "r_p"]: # Compute just percentage point changes for rates @@ -248,9 +247,9 @@ def plot_industry_aggregates( assert reform_tpi is not None fig1, ax1 = plt.subplots() for i, v in enumerate(var_list): - assert ( - v in VAR_LABELS.keys() - ), "{} is not in the list of variable labels".format(v) + assert v in VAR_LABELS.keys(), ( + "{} is not in the list of variable labels".format(v) + ) if len(var_list) == 1: var_label = "" else: @@ -463,9 +462,9 @@ def plot_gdp_ratio( start_index = start_year - base_params.start_year fig1, ax1 = plt.subplots() for i, v in enumerate(var_list): - assert ( - v in ToGDP_LABELS.keys() - ), "{} is not in the list of variable labels".format(v) + assert v in ToGDP_LABELS.keys(), ( + "{} is not in the list of variable labels".format(v) + ) if plot_type == "levels": plot_var_base = ( base_tpi[v][: base_params.T] / base_tpi["Y"][: base_params.T] diff --git a/ogcore/parameter_plots.py b/ogcore/parameter_plots.py index 3d66a29dd..6025817b5 100644 --- a/ogcore/parameter_plots.py +++ b/ogcore/parameter_plots.py @@ -210,7 +210,8 @@ def plot_ability_profiles( Args: p (OG-Core Specifications class): parameters object - t (int): model period for year, if None, then plot ability matrix for SS + t (int): model period for year, if None, then plot ability + matrix for SS log_scale (bool): whether to plot in log points include_title (bool): whether to include a title in the plot path (string): path to save figure to @@ -1197,9 +1198,9 @@ def plot_2D_taxfunc( if age is not None: assert isinstance(age, int) assert age >= E - s = ( - age - E - ) # Note: assumed age is given in E + model periods (but age below is also assumed to be calendar years) + # Note: assumed age is given in E + model periods (but age + # below is also assumed to be calendar years) + s = age - E else: s = 0 # if not age-specific, all ages have the same values t = year - start_year diff --git a/ogcore/parameter_tables.py b/ogcore/parameter_tables.py index 2a4100377..d107f63ca 100644 --- a/ogcore/parameter_tables.py +++ b/ogcore/parameter_tables.py @@ -108,42 +108,33 @@ def tax_rate_table( len_rates = len(base_etr_rates[start_index : start_index + num_years]) table = { "Year": years[:len_rates], - "Baseline " - + VAR_LABELS["etr"]: base_etr_rates[ + "Baseline " + VAR_LABELS["etr"]: base_etr_rates[ start_index : start_index + num_years ], - "Reform " - + VAR_LABELS["etr"]: reform_etr_rates[ + "Reform " + VAR_LABELS["etr"]: reform_etr_rates[ start_index : start_index + num_years ], - "Differences in " - + VAR_LABELS["etr"]: reform_etr_rates[ + "Differences in " + VAR_LABELS["etr"]: reform_etr_rates[ start_index : start_index + num_years ] - base_etr_rates[start_index : start_index + num_years], - "Baseline " - + VAR_LABELS["mtrx"]: base_mtrx_rates[ + "Baseline " + VAR_LABELS["mtrx"]: base_mtrx_rates[ start_index : start_index + num_years ], - "Reform " - + VAR_LABELS["mtrx"]: reform_mtrx_rates[ + "Reform " + VAR_LABELS["mtrx"]: reform_mtrx_rates[ start_index : start_index + num_years ], - "Differences in " - + VAR_LABELS["mtrx"]: reform_mtrx_rates[ + "Differences in " + VAR_LABELS["mtrx"]: reform_mtrx_rates[ start_index : start_index + num_years ] - base_mtrx_rates[start_index : start_index + num_years], - "Baseline " - + VAR_LABELS["mtry"]: base_mtry_rates[ + "Baseline " + VAR_LABELS["mtry"]: base_mtry_rates[ start_index : start_index + num_years ], - "Reform " - + VAR_LABELS["mtry"]: reform_mtry_rates[ + "Reform " + VAR_LABELS["mtry"]: reform_mtry_rates[ start_index : start_index + num_years ], - "Differences in " - + VAR_LABELS["mtry"]: reform_mtry_rates[ + "Differences in " + VAR_LABELS["mtry"]: reform_mtry_rates[ start_index : start_index + num_years ] - base_mtry_rates[start_index : start_index + num_years], @@ -152,12 +143,10 @@ def tax_rate_table( len_rates = len(base_rates[start_index : start_index + num_years]) table = { "Year": years[:len_rates], - "Baseline " - + VAR_LABELS[rate_type.lower()]: base_rates[ + "Baseline " + VAR_LABELS[rate_type.lower()]: base_rates[ start_index : start_index + num_years ], - "Reform " - + VAR_LABELS[rate_type.lower()]: reform_rates[ + "Reform " + VAR_LABELS[rate_type.lower()]: reform_rates[ start_index : start_index + num_years ], "Difference": reform_rates[start_index : start_index + num_years] diff --git a/ogcore/pensions.py b/ogcore/pensions.py index 2af526dc7..0727ba8c2 100644 --- a/ogcore/pensions.py +++ b/ogcore/pensions.py @@ -13,7 +13,8 @@ def replacement_rate_vals(nssmat, wss, factor_ss, j, p): Calculates replacement rate values for the social security system. .. math:: - \theta_{j,R,t+R} = \frac{PIA_{j,R,t+R} \times 12}{factor \times w_{t+R}} + \theta_{j,R,t+R} = + \frac{PIA_{j,R,t+R} \times 12}{factor \times w_{t+R}} Args: nssmat (Numpy array): initial guess at labor supply, size = SxJ @@ -206,7 +207,8 @@ def DB_amount(w, e, n, j, p): .. math:: pension{j,s,t} = \biggl[\frac{\sum_{s=R-ny}^{R-1}w_{t}e_{j,s,t} - n_{j,s,t}}{ny}\biggr]\times Cy \times \alpha_{DB} \quad \forall s > R + n_{j,s,t}}{ny}\biggr]\times Cy \times \alpha_{DB} + \quad \forall s > R Args: w (array_like): real wage rate @@ -296,7 +298,8 @@ def NDC_amount(w, e, n, r, Y, j, p): .. math:: pension{j,s,t} = \biggl[\sum_{s=E}^{R-1}\tau^{p}_{t}w_{t} - e_{j,s,t}n_{j,s,t}(1 + g_{NDC,t})^{R-s-1}\biggr]\delta_{R, t} \quad \forall s > R + e_{j,s,t}n_{j,s,t}(1 + g_{NDC,t})^{R-s-1}\biggr] + \delta_{R, t} \quad \forall s > R Args: w (array_like): real wage rate @@ -378,7 +381,8 @@ def PS_amount(w, e, n, j, factor, p): Calculate public pension from a points system. .. math:: - pension{j,s,t} = \sum_{s=E}^{R-1}w_{t}e_{j,s,t}n_{j,s,t}\times v_{t} \quad \forall s > R + pension{j,s,t} = \sum_{s=E}^{R-1}w_{t}e_{j,s,t}n_{j,s,t} + \times v_{t} \quad \forall s > R Args: w (array_like): real wage rate @@ -492,7 +496,8 @@ def deriv_NDC(r, w, e, Y, per_rmn, p): .. math:: \frac{\partial \theta_{j,u,t+u-s}}{\partial n_{j,s,t}} = \begin{cases} - \tau^{p}_{t}w_{t}e_{j,s}(1+g_{NDC,t})^{u - s}\delta_{R,t}, & \text{if}\ s= np.percentile(x[:, 0], plot_start)) & (x[:, 0] <= np.percentile(x[:, 0], plot_end)) @@ -2230,8 +2235,8 @@ def interp(x): wsse_uncstr = None def mono_interp(x_vec): - # replace last point in data with two copies further out to make smooth - # extrapolation + # replace last point in data with two copies further out + # to make smooth extrapolation x_new = np.append( x_binned[:-1], [1.005 * x_binned[-1], 1.01 * x_binned[-1]] ) diff --git a/ogcore/utils.py b/ogcore/utils.py index a58e2d4f2..f743a11ca 100644 --- a/ogcore/utils.py +++ b/ogcore/utils.py @@ -255,7 +255,7 @@ def dict_compare( else: unequal_items = [] for k, v in dict1.items(): - if type(v) == np.ndarray: + if isinstance(v, np.ndarray): check &= comp_array( k, v, @@ -646,9 +646,11 @@ def print_progress( else: sys.stdout.write("Accessing " + source_name + " data files...\n") - sys.stdout.write( - "\r%s |%s| %s%s %s" % (prefix, bar, percents, "%", suffix) - ), + ( + sys.stdout.write( + "\r%s |%s| %s%s %s" % (prefix, bar, percents, "%", suffix) + ), + ) if iteration == total: sys.stdout.write("\n") @@ -1054,9 +1056,10 @@ def depth(L): # for now, just have this work for 3 deep lists since # the only OG-Core use case is for tax function parameters assert depth(list_in) == 3, "please give a list that is three lists deep" - assert depth(list_in) == len( - dims - ), "please make sure the depth of nested list is equal to the length of dims to extrapolate" + assert depth(list_in) == len(dims), ( + "please make sure the depth of nested list is equal to the" + " length of dims to extrapolate" + ) # Extrapolate along the first dimension if len(list_in) > T + S: list_in = list_in[: T + S] @@ -1076,8 +1079,12 @@ def depth(L): class CustomHttpAdapter(requests.adapters.HTTPAdapter): """ - The UN Data Portal server doesn't support "RFC 5746 secure renegotiation". This causes and error when the client is using OpenSSL 3, which enforces that standard by default. - The fix is to create a custom SSL context that allows for legacy connections. This defines a function get_legacy_session() that should be used instead of requests(). + The UN Data Portal server doesn't support "RFC 5746 secure + renegotiation". This causes and error when the client is using + OpenSSL 3, which enforces that standard by default. + The fix is to create a custom SSL context that allows for legacy + connections. This defines a function get_legacy_session() that + should be used instead of requests(). """ # "Transport adapter" that allows us to use custom ssl_context. @@ -1096,7 +1103,9 @@ def init_poolmanager(self, connections, maxsize, block=False): def get_legacy_session(): ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) - ctx.options |= 0x4 # OP_LEGACY_SERVER_CONNECT #in Python 3.12 you will be able to switch from 0x4 to ssl.OP_LEGACY_SERVER_CONNECT. + # OP_LEGACY_SERVER_CONNECT; in Python 3.12 you will be able to + # switch from 0x4 to ssl.OP_LEGACY_SERVER_CONNECT. + ctx.options |= 0x4 session = requests.session() session.mount("https://", CustomHttpAdapter(ctx)) return session @@ -1331,9 +1340,7 @@ def params_to_json(p, path=None): ] for v in annual_list: val = getattr(p, v.replace("_annual", "")) - annual_value = (1 + val) ** ( - p.S / ((p.ending_age - p.starting_age)) - ) - 1 + annual_value = (1 + val) ** (p.S / (p.ending_age - p.starting_age)) - 1 if isinstance(annual_value, np.ndarray): converted_data[v] = annual_value.tolist() else: diff --git a/pyproject.toml b/pyproject.toml index bd1d32813..32cd2adde 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,15 +1,111 @@ [build-system] -requires = ["setuptools>=61.0"] +requires = ["setuptools>=64", "wheel"] build-backend = "setuptools.build_meta" -# Configuration for Black. +[project] +name = "ogcore" +version = "0.15.4" +authors = [ + {name = "Jason DeBacker and Richard W. Evans"}, +] +description = "A general equilibrium overlapping generations model for fiscal policy analysis" +readme = "README.md" +license = {text = "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication"} +requires-python = ">=3.11, <3.14" +classifiers = [ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "Natural Language :: English", + "License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development :: Libraries :: Python Modules", +] +dependencies = [ + "numpy>=1.26,<2.4", + "scipy>=1.7.1", + "pandas>=1.2.5", + "numba", + "matplotlib", + "dask>=2.30.0", + "distributed>=2.30.1", + "paramtools>=0.20.0", + "requests", + "pygam", +] -# NOTE: you have to use single-quoted strings in TOML for regular expressions. -# It's the equivalent of r-strings in Python. Multiline strings are treated as -# verbose regular expressions by Black. Use [ ] to denote a significant space -# character. +[project.urls] +Homepage = "https://github.com/PSLmodels/OG-Core/" +"Issue Tracker" = "https://github.com/PSLmodels/OG-Core/issues" -[tool.black] +[project.optional-dependencies] +dev = [ + "pytest>=6.0", + "pytest-cov", + "pytest-xdist", + "coverage", + "ruff", + "openpyxl>=3.1.2", + "linecheck", +] +docs = [ + "jupyter-book<2.0.0", + "jupyter", + "ipykernel", + "sphinx>=3.5.4", + "sphinx-argparse", + "sphinxcontrib-bibtex>=2.0.0", + "sphinx-math-dollar", + "pydata-sphinx-theme", +] + +[tool.setuptools.packages.find] +include = ["ogcore*"] + +[tool.setuptools.package-data] +ogcore = [ + "default_parameters.json", + "model_variables.json", + "OGcorePlots.mplstyle", +] + +[tool.ruff] line-length = 79 -target-version = ["py312", "py313"] -include = '\.pyi?$' +target-version = "py311" +exclude = [ + "**/*.ipynb", + "benchmark_root_methods*.py", + "tests/do_not_test_*.py", + "tests/run_benchmarks.py", + "examples/run_ogcore_example_corp_tax_cut_*.py", +] + +[tool.ruff.lint] +select = ["E", "F", "W"] +ignore = ["F841"] + +[tool.ruff.lint.pycodestyle] +max-doc-length = 120 + +[tool.pytest.ini_options] +minversion = "6.0" +testpaths = ["tests"] +filterwarnings = [ + "ignore::RuntimeWarning:.*invalid value encountered.*", + "ignore::RuntimeWarning:.*divide by zero encountered in divide.*", + "ignore::RuntimeWarning:.*invalid value encountered in power.*", +] +markers = [ + "local: marks tests that run locally and not on GH Actions (mostly due to run time)", + "benchmark: marks tests that measure performance and memory usage", + "distributed: marks tests that use distributed Dask clients", + "memory: marks tests focused on memory usage measurement", + "performance: marks tests focused on compute time measurement", + "slow: marks tests that take longer to run", + "real: marks tests using real OG-Core tax function code", + "platform: marks tests for platform-specific optimization", +] diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 4fdda3c2b..000000000 --- a/pytest.ini +++ /dev/null @@ -1,18 +0,0 @@ -# pytest.ini -[pytest] -filterwarnings = - ignore::RuntimeWarning:.*invalid value encountered.* - ignore::RuntimeWarning:.*divide by zero encountered in divide.* - ignore::RuntimeWarning:.*invalid value encountered in power.* -minversion = 6.0 -testpaths = - ./tests -markers = - local: marks tests that run locally and not on GH Actions (mostly due to run time) - benchmark: marks tests that measure performance and memory usage - distributed: marks tests that use distributed Dask clients - memory: marks tests focused on memory usage measurement - performance: marks tests focused on compute time measurement - slow: marks tests that take longer to run - real: marks tests using real OG-Core tax function code - platform: marks tests for platform-specific optimization diff --git a/setup.py b/setup.py deleted file mode 100755 index d87c9be33..000000000 --- a/setup.py +++ /dev/null @@ -1,56 +0,0 @@ -import setuptools - -with open("README.md", "r", encoding="utf-8") as fh: - longdesc = fh.read() - -setuptools.setup( - name="ogcore", - version="0.15.4", - author="Jason DeBacker and Richard W. Evans", - license="CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", - description="A general equilibrium overlapping generations model for fiscal policy analysis", - long_description_content_type="text/markdown", - long_description=longdesc, - url="https://github.com/PSLmodels/OG-Core/", - download_url="https://github.com/PLSmodels/OG-Core/", - project_urls={ - "Issue Tracker": "https://github.com/PSLmodels/OG-Core/issues", - }, - packages=["ogcore"], - package_data={ - "ogcore": [ - "default_parameters.json", - "model_variables.json", - "OGcorePlots.mplstyle", - ] - }, - include_packages=True, - python_requires=">=3.11, <3.14", - install_requires=[ - "numpy", - "scipy>=1.7.1", - "pandas>=1.2.5", - "numba", - "matplotlib", - "dask>=2.30.0", - "distributed>=2.30.1", - "paramtools>=0.20.0", - "requests", - "pip", - "pygam", - ], - classifiers=[ - "Development Status :: 2 - Pre-Alpha", - "Intended Audience :: Developers", - "Natural Language :: English", - "License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Topic :: Software Development :: Libraries :: Python Modules", - ], - tests_require=["pytest"], -) diff --git a/tests/run_benchmarks.py b/tests/run_benchmarks.py index f5b50c4d3..d4b7f3c3b 100644 --- a/tests/run_benchmarks.py +++ b/tests/run_benchmarks.py @@ -21,9 +21,8 @@ import argparse import subprocess import platform -from pathlib import Path from datetime import datetime -from typing import Dict, List, Optional +from typing import Dict, Optional # Add the parent directory to path so we can import test modules sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) diff --git a/tests/test_SS.py b/tests/test_SS.py index f9404a85c..a4039c059 100644 --- a/tests/test_SS.py +++ b/tests/test_SS.py @@ -8,7 +8,7 @@ import numpy as np import os import pickle -from ogcore import SS, utils, aggregates, fiscal +from ogcore import SS, utils, aggregates from ogcore.parameters import Specifications from ogcore import firm diff --git a/tests/test_TPI.py b/tests/test_TPI.py index 508df8a21..6ca928eae 100644 --- a/tests/test_TPI.py +++ b/tests/test_TPI.py @@ -19,7 +19,6 @@ import sys import json from ogcore import SS, TPI, utils -import ogcore.aggregates as aggr from ogcore.parameters import Specifications NUM_WORKERS = min(multiprocessing.cpu_count(), 4) diff --git a/tests/test_aggregates.py b/tests/test_aggregates.py index 822a1fdaf..0730dd469 100644 --- a/tests/test_aggregates.py +++ b/tests/test_aggregates.py @@ -217,7 +217,6 @@ def test_get_B(b, p, method, PreTP, expected): "omega": np.ones((160, 40)) / 40, "omega_SS": np.ones(40) / 40, "imm_rates": np.zeros((160, 40)), - "rho": rho_vec.tolist(), } # update parameters instance with new values for test p.update_specifications(new_param_values) diff --git a/tests/test_elliptical_u_est.py b/tests/test_elliptical_u_est.py index b5d150af0..1d56f4ba0 100644 --- a/tests/test_elliptical_u_est.py +++ b/tests/test_elliptical_u_est.py @@ -1,4 +1,3 @@ -import pytest import numpy as np from ogcore import elliptical_u_est as ee diff --git a/tests/test_firm.py b/tests/test_firm.py index 9b42618ef..4d7bc06c2 100644 --- a/tests/test_firm.py +++ b/tests/test_firm.py @@ -1,4 +1,3 @@ -from math import exp import pytest from ogcore import firm import numpy as np diff --git a/tests/test_fiscal.py b/tests/test_fiscal.py index 5155bd857..5ccd04ffd 100644 --- a/tests/test_fiscal.py +++ b/tests/test_fiscal.py @@ -33,7 +33,8 @@ D_f4 = df["D_f4"].values.copy() r_gov1 = ( np.ones_like(D1) * 0.05 - 0.02 -) # 0.02 is the default r_gov_shift parameter and the default scale parameter is 1.0, meaning r_gov1 = 0.05 - 0.02 = 0.03 +) # 0.02 is the default r_gov_shift parameter and the default scale +# parameter is 1.0, meaning r_gov1 = 0.05 - 0.02 = 0.03 r_gov2 = r_gov1 r_gov3 = r_gov1 r_gov4 = df["r_gov4"].values.copy() diff --git a/tests/test_output_tables.py b/tests/test_output_tables.py index 3da0614e7..491722580 100644 --- a/tests/test_output_tables.py +++ b/tests/test_output_tables.py @@ -4,7 +4,6 @@ import pytest import os -import sys import pandas as pd import numpy as np from ogcore import utils, output_tables diff --git a/tests/test_parameters.py b/tests/test_parameters.py index ec2645986..e11c99b3c 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -3,7 +3,6 @@ import pytest import numpy as np from ogcore.parameters import Specifications, revision_warnings_errors -from ogcore import utils # get path to puf if puf.csv in ogcore/ directory CUR_PATH = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/test_run_example.py b/tests/test_run_example.py index c957f560e..8ccf4e35a 100644 --- a/tests/test_run_example.py +++ b/tests/test_run_example.py @@ -1,6 +1,6 @@ """ -This test tests whether starting a `run_ogcore_example.py` run of the model does -not break down (is still running) after 5 minutes or 300 seconds. +This test tests whether starting a `run_ogcore_example.py` run of the model +does not break down (is still running) after 5 minutes or 300 seconds. """ import multiprocessing diff --git a/tests/test_txfunc.py b/tests/test_txfunc.py index 6f0a34869..a8c91e56e 100644 --- a/tests/test_txfunc.py +++ b/tests/test_txfunc.py @@ -1,5 +1,3 @@ -import sys - from ogcore import txfunc from distributed import Client, LocalCluster import pytest @@ -227,7 +225,8 @@ def test_replace_outliers(): @pytest.mark.parametrize( "rate_type,tax_func_type,true_params", [ - # ("etr", "DEP", [6.28E-12, 4.36E-05, 1.04E-23, 7.77E-09, 0.80, 0.80, 0.84, -0.14, -0.15, 0.15, 0.16, -0.15]), + # ("etr", "DEP", [6.28E-12, 4.36E-05, 1.04E-23, 7.77E-09, + # 0.80, 0.80, 0.84, -0.14, -0.15, 0.15, 0.16, -0.15]), ( "etr", "DEP_totalinc", @@ -862,7 +861,7 @@ def test_monotone_spline(): ) # Test whether mono_interp is a function assert hasattr(mono_interp1, "__call__") - # Not sure what baseline should be to add tests here for "correctness" of values + # Not sure what baseline should be to add tests here for "correctness" # Simulate some data N = 100 diff --git a/tests/test_utils.py b/tests/test_utils.py index 669d8f2fb..a5e4efd26 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -5,7 +5,6 @@ import numpy as np import tempfile import os -import io import pickle from ogcore.parameters import Specifications @@ -781,7 +780,8 @@ def test_avg_by_bin(): (4, 3, 2), np.ones((4, 3, 2)) * 2.3, ), - # (np.array([[2.3, 2.3, 2.3], [2.3, 2.3, 2.3]]), (4, 3, 2), np.ones((4, 3, 2)) * 2.3), use this one to test assert error + # (np.array([[2.3, 2.3, 2.3], [2.3, 2.3, 2.3]]), (4, 3, 2), + # np.ones((4, 3, 2)) * 2.3), use this one to test assert error ] @@ -880,9 +880,11 @@ def test_extrapolate_nested_list(list_in, dims): "Immediate start and phase in", "Start one period in, immediate phase in", "Start one period in, phase in over two periods", - "Start one period in, phase in over two periods, partial period effect", + "Start one period in, phase in over two periods, partial period" + " effect", "0 effect", - "Start one period in, phase in over two periods, partial period effect > 1", + "Start one period in, phase in over two periods, partial period" + " effect > 1", ], ) def test_shift_bio_clock(start_period, end_period, total_effect, expected):