diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 8ee5e7574..b012d2456 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,5 +1,5 @@ - [ ] Closes # (insert issue number) -- [ ] Executed ``pre-commit run --all-files`` with no errors +- [ ] Executed `pre-commit run --all-files` with no errors - [ ] The change is fully covered by automated unit tests - [ ] Documented in docs/ as appropriate - [ ] Added an entry to the CHANGES file diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml new file mode 100644 index 000000000..e7a1e6914 --- /dev/null +++ b/.github/workflows/bench.yml @@ -0,0 +1,32 @@ +name: codspeed-benchmarks + +on: + push: + branches: + - "master" + - "develop" + pull_request: + # `workflow_dispatch` allows CodSpeed to trigger backtest + # performance analysis in order to generate initial data. + workflow_dispatch: + +jobs: + benchmarks: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: "3.12" + + - name: Install dependencies + run: pip install "numpy>=1.23,<2.0.0" + + - name: Install bench dependencies + run: pip install .[codspeed] + + - name: Run benchmarks + uses: CodSpeedHQ/action@v2 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: pytest . --codspeed diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c74cbacf1..be1afe013 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,54 +1,45 @@ -name: CI +name: Test and release when tagged -on: [push, pull_request] +on: [push] jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 + with: + environments: lint + - run: pixi run --environment lint lint + test-linux: + runs-on: ubuntu-latest strategy: - fail-fast: false matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] - numpy: [null, "numpy>=1.19,<2.0.0"] + environment: + - test-py311 + - test-py312 + - test-py313 + numpy: [null, "numpy>=1.23,<2.0.0", "numpy>=2.0.0rc1"] uncertainties: [null, "uncertainties==3.1.6", "uncertainties>=3.1.6,<4.0.0"] extras: [null] include: - - python-version: 3.8 # Minimal versions - numpy: numpy==1.19.5 - extras: matplotlib==2.2.5 - - python-version: 3.8 + - environment: "test-py311" # Minimal versions + numpy: "numpy>=1.23,<2.0.0" + extras: matplotlib==3.5.3 + - environment: "test-py311" numpy: "numpy" uncertainties: "uncertainties" - extras: "sparse xarray netCDF4 dask[complete] graphviz babel==2.8" - runs-on: ubuntu-latest - - env: - TEST_OPTS: "-rfsxEX -s --cov=pint --cov-config=.coveragerc" - + extras: "sparse xarray netCDF4 dask[complete]==2024.5.1 graphviz babel==2.8 mip>=1.13" + - environment: "test-py311" + numpy: "numpy==1.26.1" + uncertainties: null + extras: "babel==2.15 matplotlib==3.9.0" steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 with: - fetch-depth: 100 - - - name: Get tags - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Get pip cache dir - id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" - - - name: Setup caching - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: pip-${{ matrix.python-version }} - restore-keys: | - pip-${{ matrix.python-version }} - + environments: ${{ matrix.environment }} - name: Install numpy if: ${{ matrix.numpy != null }} run: pip install "${{matrix.numpy}}" @@ -61,178 +52,79 @@ jobs: if: ${{ matrix.extras != null }} run: pip install ${{matrix.extras}} + - name: Install locales + if: ${{ matrix.extras != null }} + run: | + sudo apt-get install language-pack-es language-pack-fr language-pack-ro + sudo localedef -i es_ES -f UTF-8 es_ES + sudo localedef -i fr_FR -f UTF-8 fr_FR + sudo localedef -i ro_RO -f UTF-8 ro_RO + - name: Install dependencies run: | sudo apt install -y graphviz - pip install pytest pytest-cov pytest-subtests packaging - pip install . + pip install packaging - name: Install pytest-mpl if: contains(matrix.extras, 'matplotlib') run: pip install pytest-mpl - - - name: Run Tests - run: | - pytest $TEST_OPTS - - - name: Coverage report - run: coverage report -m - - - name: Coveralls Parallel - env: - COVERALLS_FLAG_NAME: ${{ matrix.test-number }} - COVERALLS_PARALLEL: true - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_SERVICE_NAME: github - run: | - pip install coveralls - coveralls + - run: pixi run --environment ${{ matrix.environment }} test test-windows: + runs-on: windows-latest strategy: - fail-fast: false matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] - numpy: [ "numpy>=1.19,<2.0.0" ] - runs-on: windows-latest - - env: - TEST_OPTS: "-rfsxEX -s -k issue1498b" - + environment: + - test-py311 + - test-py312 + - test-py313 + numpy: [null, "numpy>=1.23,<2.0.0", "numpy>=2.0.0rc1"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 with: - fetch-depth: 100 - - - name: Get tags - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Get pip cache dir - id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" - - - name: Setup caching - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: pip-windows-${{ matrix.python-version }} - restore-keys: | - pip-windows-${{ matrix.python-version }} - + environments: ${{ matrix.environment }} - name: Install numpy if: ${{ matrix.numpy != null }} run: pip install "${{matrix.numpy}}" - - # - name: Install uncertainties - # if: ${{ matrix.uncertainties != null }} - # run: pip install "${{matrix.uncertainties}}" - # - # - name: Install extras - # if: ${{ matrix.extras != null }} - # run: pip install ${{matrix.extras}} - - - name: Install dependencies - run: | - # sudo apt install -y graphviz - pip install pytest pytest-cov pytest-subtests packaging - pip install . - - # - name: Install pytest-mpl - # if: contains(matrix.extras, 'matplotlib') - # run: pip install pytest-mpl - - - name: Run tests - run: pytest ${env:TEST_OPTS} + - run: pixi run --environment ${{ matrix.environment }} test test-macos: + runs-on: macos-latest strategy: - fail-fast: false matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] - numpy: [null, "numpy>=1.19,<2.0.0" ] - runs-on: macos-latest - - env: - TEST_OPTS: "-rfsxEX -s --cov=pint --cov-config=.coveragerc" - + environment: + - test-py311 + - test-py312 + - test-py313 + numpy: [null, "numpy>=1.23,<2.0.0", "numpy>=2.0.0rc1"] steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 100 - - - name: Get tags - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Get pip cache dir - id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" - - - name: Setup caching - uses: actions/cache@v2 + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 with: - path: ${{ steps.pip-cache.outputs.dir }} - key: pip-${{ matrix.python-version }} - restore-keys: | - pip-${{ matrix.python-version }} - + environments: ${{ matrix.environment }} - name: Install numpy if: ${{ matrix.numpy != null }} run: pip install "${{matrix.numpy}}" + - run: pixi run --environment ${{ matrix.environment }} test - - name: Install dependencies - run: | - pip install pytest pytest-cov pytest-subtests packaging - pip install . + publish: + if: github.ref_type == 'tag' + needs: [test-linux, test-windows, test-macos, lint] - - name: Run Tests - run: | - pytest $TEST_OPTS - - - name: Coverage report - run: coverage report -m - - - name: Coveralls Parallel - env: - COVERALLS_FLAG_NAME: ${{ matrix.test-number }} - COVERALLS_PARALLEL: true - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_SERVICE_NAME: github - run: | - pip install coveralls - coveralls - - coveralls: - needs: test-linux runs-on: ubuntu-latest - steps: - - uses: actions/setup-python@v2 - with: - python-version: 3.x - - name: Coveralls Finished - continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_SERVICE_NAME: github - run: | - pip install coveralls - coveralls --finish + environment: + name: pypi + url: https://pypi.org/p/pint + permissions: + id-token: write # for trusted publising to PyPI - # Dummy task to summarize all. See https://github.com/bors-ng/bors-ng/issues/1300 - ci-success: - name: ci - if: ${{ success() }} - needs: test-linux - runs-on: ubuntu-latest steps: - - name: CI succeeded - run: exit 0 + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 + with: + environments: build + - name: Build the package + run: pixi run --environment build build + - name: Publish to PyPI + run: pixi run --environment build publish diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 234068354..c0f6e09d1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -5,41 +5,21 @@ on: [push, pull_request] jobs: docbuild: runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 100 - - - name: Get tags - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* - - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 with: - python-version: 3.8 - - - name: Get pip cache dir - id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" - - - name: Setup pip cache - uses: actions/cache@v2 + environments: docs + - run: pixi run --environment docs docbuild + doctest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: prefix-dev/setup-pixi@v0.8.2 with: - path: ${{ steps.pip-cache.outputs.dir }} - key: pip-docs - restore-keys: pip-docs - - - name: Install dependencies + environments: docs + - name: Install locales run: | - sudo apt install -y pandoc - pip install --upgrade pip setuptools wheel - pip install -r "requirements_docs.txt" - pip install docutils==0.14 commonmark==0.8.1 recommonmark==0.5.0 babel==2.8 - pip install . - - - name: Build documentation - run: sphinx-build -n -j auto -b html -d build/doctrees docs build/html - - - name: Doc Tests - run: sphinx-build -a -j auto -b doctest -d build/doctrees docs build/doctest + sudo apt-get install language-pack-fr + sudo localedef -i fr_FR -f UTF-8 fr_FR + - run: pixi run --environment docs doctest diff --git a/.github/workflows/lint-autoupdate.yml b/.github/workflows/lint-autoupdate.yml deleted file mode 100644 index 3bf4a21cd..000000000 --- a/.github/workflows/lint-autoupdate.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: pre-commit - -on: - schedule: - - cron: "0 0 * * 0" # every Sunday at 00:00 UTC - workflow_dispatch: - - -jobs: - autoupdate: - name: autoupdate - runs-on: ubuntu-latest - if: github.repository == 'hgrecco/pint' - steps: - - name: checkout - uses: actions/checkout@v2 - - name: Cache pip and pre-commit - uses: actions/cache@v2 - with: - path: | - ~/.cache/pre-commit - ~/.cache/pip - key: ${{ runner.os }}-pre-commit-autoupdate - - name: setup python - uses: actions/setup-python@v2 - with: - python-version: 3.x - - name: upgrade pip - run: python -m pip install --upgrade pip - - name: install dependencies - run: python -m pip install --upgrade pre-commit - - name: version info - run: python -m pip list - - name: autoupdate - uses: technote-space/create-pr-action@bfd4392c80dbeb54e0bacbcf4750540aecae6ed4 - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - EXECUTE_COMMANDS: | - python -m pre_commit autoupdate - python -m pre_commit run --all-files - COMMIT_MESSAGE: 'pre-commit: autoupdate hook versions' - COMMIT_NAME: 'github-actions[bot]' - COMMIT_EMAIL: 'github-actions[bot]@users.noreply.github.com' - PR_TITLE: 'pre-commit: autoupdate hook versions' - PR_BRANCH_PREFIX: 'pre-commit/' - PR_BRANCH_NAME: 'autoupdate-${PR_ID}' diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index e2d26381c..000000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Lint - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: 3.x - - name: Lint - uses: pre-commit/action@v2.0.0 - with: - extra_args: --all-files --show-diff-on-failure diff --git a/.gitignore b/.gitignore index b514ccedf..338e93c5a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,27 @@ +# ignore all hidden files +.* +# except +!.gitignore +!.gitattributes +!.github/ +!.pre-commit-config.yaml +!.copier-answers.yml + +# Python +__pycache__ +*.py[cod] +*$py.class + *~ __pycache__ *egg-info* -*.pyc -.DS_Store docs/_build/ -.idea -.vscode build/ dist/ MANIFEST -*pytest_cache* -.eggs -.mypy_cache pip-wheel-metadata pint/testsuite/dask-worker-space +venv # WebDAV file system cache files .DAV/ @@ -22,7 +30,6 @@ pint/testsuite/dask-worker-space tags test/ -.coverage* # notebook stuff *.ipynb_checkpoints* @@ -34,7 +41,6 @@ notebooks/pandas_test.csv dask-worker-space # airspeed velocity bechmark -.asv/ benchmarks/hashes.txt # local python environment diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83587c6ce..b5ddef545 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,20 +1,52 @@ -exclude: '^pint/_vendor' repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml -- repo: https://github.com/psf/black - rev: 22.10.0 - hooks: - - id: black -- repo: https://github.com/pycqa/isort - rev: 5.10.1 - hooks: - - id: isort -- repo: https://github.com/pycqa/flake8 - rev: 6.0.0 - hooks: - - id: flake8 +- repo: local + hooks: + # ensure pixi environments are up to date + # workaround for https://github.com/prefix-dev/pixi/issues/1482 + - id: pixi-install + name: Update pixi lint environment + entry: pixi install -e default -e lint + pass_filenames: false + language: system + always_run: true + require_serial: true + + # pre-commit-hooks + - id: trailing-whitespace-fixer + name: Fix trailing whitespace with pre-commit-hooks + entry: pixi run -e lint trailing-whitespace-fixer + language: system + types: [text] + + # pre-commit-hooks + - id: end-of-file-fixer + name: Fix end-of-file with pre-commit-hooks + entry: pixi run -e lint end-of-file-fixer + language: system + types: [text] + + - id: ruff check + name: Lint with ruff + entry: pixi run -e lint ruff check --force-exclude --fix + language: system + types_or: [python, pyi, jupyter] + require_serial: true + + - id: ruff format + name: Format with ruff + entry: pixi run -e lint ruff format --force-exclude + language: system + types_or: [python, pyi, jupyter] + require_serial: true + + - id: mdformat + name: Format markdown with mdformat + entry: pixi run -e lint mdformat + language: system + types: [markdown] + + - id: taplo + name: Format TOML with taplo + entry: pixi run -e lint taplo fmt + language: system + types: [toml] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 2bda3d495..15309232b 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,13 +1,15 @@ version: 2 build: - image: latest + os: ubuntu-22.04 + tools: + python: "3.11" + apt_packages: + - graphviz sphinx: configuration: docs/conf.py fail_on_warning: false python: - version: 3.8 install: - requirements: requirements_docs.txt - method: pip path: . - system_packages: false diff --git a/AUTHORS b/AUTHORS index e74dc6744..6eb0b6709 100644 --- a/AUTHORS +++ b/AUTHORS @@ -52,6 +52,7 @@ Other contributors, listed alphabetically, are: * Thomas Kluyver * Tom Nicholas * Tom Ritchford +* Valispace * Virgil Dupras * Zebedee Nicholls diff --git a/CHANGES b/CHANGES index 1cc6b51db..f948d6102 100644 --- a/CHANGES +++ b/CHANGES @@ -1,22 +1,325 @@ Pint Changelog ============== +0.25.main+valispace (2024-10-10) +-------------------------------- +- sync with original master + +0.25 (unreleased) +----------------- +0.25.0 (unreleased) +------------------- + +- Bump minimum Python version to 3.11.: +- Upgrade code to Python 3.11. +- Move to pixi/uv/ruff. +- Refactor compat to make it easier to test. +- Implemented several pixi environment and tasks to simplify development. +- Add docs to the functions in ``pint.testing`` (PR #2070) +- Fix round function returning float instead of int (#2081) +- Fix return type of `PlainQuantity.to` (#2088) +- Update constants to CODATA 2022 recommended values. (#2049) +- Fixed issue with `.to_compact` and Magnitudes with uncertainties / Quantities with units (PR #2069, issue #2044) +- Add conductivity dimension. (#2112) +- Add absorbance unit and dimension. (#2114) +- Add membrane filtration flux and permeability dimensionality, and shorthand "LMH". (#2116) +- Fix find_shortest_path to use breadth first search (#2146) +- Fix typo in ``pyproject.toml``: rename ``AS_MIP`` to ``HAS_MIP`` so that MIP support is correctly detected. (#2152) + + +0.24.4 (2024-11-07) +------------------- + +- add error for prefixed non multi units (#1998) +- build: typing_extensions version +- build: switch from appdirs to platformdirs +- fix GenericPlainRegistry getattr type (#2045) +- Replace references to the deprecated `UnitRegistry.default_format` (#2058) +- fix: upgrade to flexparser>=0.4, exceptions are no longer dataclasses. + (required for Python 3.13) + + +0.24.2 (2024-07-28) +------------------- + +- Fix the default behaviour for pint-convert (cli) for importing uncertainties package (PR #2032, Issue #2016) +- Added mu and mc as alternatives for SI micro prefix +- Added ℓ as alternative for liter +- Support permille units and `‰` symbol (PR #2033, Issue #1963) +- Switch from appdirs to platformdirs. +- Fixes issues related to GenericPlainRegistry.__getattr__ type (PR #2038, Issues #1946 and #1804) +- Removed deprecated references in documentation and tests (PR #2058, Issue #2057) + + +0.24.1 (2024-06-24) +----------------- +------------------- + +- Fix custom formatter needing the registry object. (PR #2011) +- Support python 3.9 following difficulties installing with NumPy 2. (PR #2019) +- Fix default formatting of dimensionless unit issue. (PR #2012) +- Fix bug preventing custom formatters with modifiers working. (PR #2021) + + +0.24 (2024-06-07) +----------------- + +- Fix detection of invalid conversion between offset and delta units. (PR #1905) +- Added dBW, decibel Watts, which is used in RF high power applications +- NumPy 2.0 support + (PR #1985, #1971) +- Implement numpy roll (Related to issue #981) +- Implement numpy correlate + (PR #1990) +- Add `dim_sort` function to _formatter_helpers. +- Add `dim_order` and `default_sort_func` properties to FullFormatter. + (PR #1926, fixes Issue #1841) +- Minimum version requirement added for typing_extensions>=4.0.0. + (PR #1996) +- Documented packages using pint. + (PR #1960) +- Fixed bug causing operations between arrays of quantity scalars and quantity holding + array resulting in incorrect units. + (PR #1677) +- Fix LaTeX siuntix formatting when using non_int_type=decimal.Decimal. + (PR #1977) +- Added refractive index units. + (PR #1816) +- Fix converting to offset units of higher dimension e.g. gauge pressure + (PR #1949) +- Fix unhandled TypeError when auto_reduce_dimensions=True and non_int_type=Decimal + (PR #1853) +- Creating prefixed offset units now raises an error. + (PR #1998) +- Improved error message in `get_dimensionality()` when non existent units are passed. + (PR #1874, Issue #1716) + + +0.23 (2023-12-08) +----------------- + +- Add _get_conversion_factor to registry with cache. +- Homogenize input and ouput of internal regitry functions to + facility typing, subclassing and wrapping. + (_yield_unit_triplets, ) +- Generated downstream_status page to track the + state of downstream projects. +- Improve typing annotation. +- Updated to flexparser 0.2. +- Faster wraps + (PR #1862) +- Add codspeed github action. +- Move benchmarks to pytest-benchmarks. +- Support pytest on python 3.12 wrt Fraction formatting change + (#1818) +- Fixed Transformation type protocol. + (PR #1805, PR #1832) +- Documented to_preferred and created added an autoautoconvert_to_preferred registry option. + (PR #1803) +- Enable Pint to parse uncertainty numbers. + (See #1611, #1614) +- Optimize matplotlib unit conversion for Quantity arrays + (PR #1819) +- Add numpy.linalg.norm implementation. + (PR #1251) + + +0.22 (2023-05-25) +----------------- + +- Drop Python 3.8 compatability following NEP-29. +- Drop NumPy < 1.21 following NEP-29. +- Improved typing experience. +- Migrated fully to pyproject.toml. +- Migrated to ruff. +- In order to make static typing possible as required by mypy + and similar tools, the way to subclass the registry has been + changed. +- Allow non-quantity atol parameters for isclose and allclose. + (PR #1783) + + +0.21 (2023-05-01) +----------------- + +- Add PEP621/631 support. + (Issue #1647) +- Exposed matplotlib unit formatter (PR #1703) +- Fix error when when re-registering a formatter. + (PR #1629) +- Add new SI prefixes: ronna-, ronto-, quetta-, quecto-. + (PR #1652) +- Fix unit check with `atol` using `np.allclose` & `np.isclose`. + (Issue #1658) +- Implementation for numpy.positive added for Quantity. + (PR #1663) +- Changed frequency to angular frequency in the docs. + (PR #1668) +- Remove deprecated `alen` numpy function + (PR #1678) +- Updated URLs for log and offset unit errors. + (PR #1727) +- Patched TYPE_CHECKING import regression. + (PR #1686) +- Parse '°' along with previous text, rather than adding a space, + allowing, eg 'Δ°C' as a unit. + (PR #1729) +- Improved escaping of special characters for LaTeX format + (PR #1712) +- Avoid addition of spurious trailing zeros when converting units and non-int-type is + Decimal (PR #1625). +- Implementation for numpy.delete added for Quantity. + (PR #1669) +- Fixed Quantity type returned from `__dask_postcompute__`. + (PR #1722) +- Added Townsend unit + (PR #1738) +- Fix get_compatible_units() in dynamically added units. + (Issue #1725) +- Fix pint-convert script + (Issue #1646) +- Honor non_int_type when dividing. + (Issue #1505) +- Fix `trapz`, `dot`, and `cross` to work properly with non-multiplicative units + (Issue #1593) + + 0.21 (unreleased) ----------------- +0.21.dev0+valispace (2023-01-18) +-------------------------------- + +----------------- + +- Fix detection of invalid conversion between offset and delta units. (PR #1905) +- Added dBW, decibel Watts, which is used in RF high power applications +- NumPy 2.0 support + (PR #1985, #1971) +- Implement numpy roll (Related to issue #981) +- Implement numpy correlate + (PR #1990) +- Add `dim_sort` function to _formatter_helpers. +- Add `dim_order` and `default_sort_func` properties to FullFormatter. + (PR #1926, fixes Issue #1841) +- Minimum version requirement added for typing_extensions>=4.0.0. + (PR #1996) +- Documented packages using pint. + (PR #1960) +- Fixed bug causing operations between arrays of quantity scalars and quantity holding + array resulting in incorrect units. + (PR #1677) +- Fix LaTeX siuntix formatting when using non_int_type=decimal.Decimal. + (PR #1977) +- Added refractive index units. + (PR #1816) +- Fix converting to offset units of higher dimension e.g. gauge pressure + (PR #1949) +- Fix unhandled TypeError when auto_reduce_dimensions=True and non_int_type=Decimal + (PR #1853) +- Creating prefixed offset units now raises an error. + (PR #1998) +- Improved error message in `get_dimensionality()` when non existent units are passed. + (PR #1874, Issue #1716) + + +0.23 (2023-12-08) +----------------- + +- Add _get_conversion_factor to registry with cache. +- Homogenize input and ouput of internal regitry functions to + facility typing, subclassing and wrapping. + (_yield_unit_triplets, ) +- Generated downstream_status page to track the + state of downstream projects. +- Improve typing annotation. +- Updated to flexparser 0.2. +- Faster wraps + (PR #1862) +- Add codspeed github action. +- Move benchmarks to pytest-benchmarks. +- Support pytest on python 3.12 wrt Fraction formatting change + (#1818) +- Fixed Transformation type protocol. + (PR #1805, PR #1832) +- Documented to_preferred and created added an autoautoconvert_to_preferred registry option. + (PR #1803) +- Enable Pint to parse uncertainty numbers. + (See #1611, #1614) +- Optimize matplotlib unit conversion for Quantity arrays + (PR #1819) +- Add numpy.linalg.norm implementation. + (PR #1251) + + +0.22 (2023-05-25) +----------------- + +- Drop Python 3.8 compatability following NEP-29. +- Drop NumPy < 1.21 following NEP-29. +- Improved typing experience. +- Migrated fully to pyproject.toml. +- Migrated to ruff. +- In order to make static typing possible as required by mypy + and similar tools, the way to subclass the registry has been + changed. +- Allow non-quantity atol parameters for isclose and allclose. + (PR #1783) + + +0.21 (2023-05-01) +----------------- + +- Add PEP621/631 support. + (Issue #1647) +- Exposed matplotlib unit formatter (PR #1703) - Fix error when when re-registering a formatter. (PR #1629) - Add new SI prefixes: ronna-, ronto-, quetta-, quecto-. (PR #1652) +- Fix unit check with `atol` using `np.allclose` & `np.isclose`. + (Issue #1658) - Implementation for numpy.positive added for Quantity. (PR #1663) - Changed frequency to angular frequency in the docs. (PR #1668) +- Support Δ° symbol for offset units Celsius and Fahrenheit. + (PR #11-valispace) +- Remove deprecated `alen` numpy function + (PR #1678) +- Updated URLs for log and offset unit errors. + (PR #1727) +- Patched TYPE_CHECKING import regression. + (PR #1686) +- Parse '°' along with previous text, rather than adding a space, + allowing, eg 'Δ°C' as a unit. + (PR #1729) +- Improved escaping of special characters for LaTeX format + (PR #1712) +- Avoid addition of spurious trailing zeros when converting units and non-int-type is + Decimal (PR #1625). +- Implementation for numpy.delete added for Quantity. + (PR #1669) +- Fixed Quantity type returned from `__dask_postcompute__`. + (PR #1722) +- Added Townsend unit + (PR #1738) +- Fix get_compatible_units() in dynamically added units. + (Issue #1725) +- Fix pint-convert script + (Issue #1646) +- Honor non_int_type when dividing. + (Issue #1505) +- Fix `trapz`, `dot`, and `cross` to work properly with non-multiplicative units + (Issue #1593) ### Breaking Changes - Support percent and ppm units. Support the `%` symbol. (Issue #1277) +- Fix error when parsing subtraction operator followed by white space. + (PR #1701) +- Removed Td as an alias for denier (within the Textile group) 0.20.1 (2022-10-27) ------------------- @@ -44,15 +347,24 @@ Pint Changelog (Issue #1030, #574) - Added angular frequency documentation page. - Move ASV benchmarks to dedicated folder. (Issue #1542) +- An ndim attribute has been added to Quantity and DataFrame has been added to upcast + types for pint-pandas compatibility. (#1596) +- Fix a recursion error that would be raised when passing quantities to `cond` and `x`. + (Issue #1510, #1530) +- Update test_non_int tests for pytest. +- Better support for uncertainties (See #1611, #1614) - Implement `numpy.broadcast_arrays` (#1607) - An ndim attribute has been added to Quantity and DataFrame has been added to upcast -types for pint-pandas compatibility. (#1596) + types for pint-pandas compatibility. (#1596) - Fix a recursion error that would be raised when passing quantities to `cond` and `x`. (Issue #1510, #1530) - Update test_non_int tests for pytest. - Create NaN-value quantities of appropriate non-int-type (Issue #1570). - New documentation format and organization! - Better support for pandas and dask. +- Fix masked arrays (with multiple values) incorrectly being passed through + setitem (Issue #1584) +- Add Quantity.to_preferred 0.19.2 (2022-04-23) ------------------- diff --git a/MANIFEST.in b/MANIFEST.in index 96c6d4928..d6b725cdd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,9 @@ -include AUTHORS CHANGES LICENSE README.rst BADGES.rst version.txt .coveragerc .readthedocs.yaml +include AUTHORS CHANGES LICENSE README.rst BADGES.rst version.txt .coveragerc .readthedocs.yaml .pre-commit-config.yaml recursive-include pint * recursive-include docs * -recursive-include bench * +recursive-include benchmarks * prune docs/_build prune docs/_themes/.git +prune pint/.pytest_cache exclude .editorconfig bors.toml pull_request_template.md requirements_docs.txt version.py -global-exclude *.pyc *~ .DS_Store *__pycache__* *.pyo .travis-exclude.yml +global-exclude *.pyc *~ .DS_Store *__pycache__* *.pyo .travis-exclude.yml *.lock diff --git a/README.rst b/README.rst index 86c8f77fc..3c16a4541 100644 --- a/README.rst +++ b/README.rst @@ -2,8 +2,13 @@ :target: https://pypi.python.org/pypi/pint :alt: Latest Version -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/python/black +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json + :target: https://github.com/astral-sh/ruff + :alt: Ruff + +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/format.json + :target: https://github.com/astral-sh/ruff + :alt: Ruff-Format .. image:: https://readthedocs.org/projects/pint/badge/ :target: https://pint.readthedocs.org/ @@ -43,7 +48,7 @@ and constants. Due to its modular design, you can extend (or even rewrite!) the complete list without changing the source code. It supports a lot of numpy mathematical operations **without monkey patching or wrapping numpy**. -It has a complete test coverage. It runs in Python 3.8+ with no other dependency. +It has a complete test coverage. It runs in Python 3.9+ with no other dependency. It is licensed under BSD. It is extremely easy and natural to use: @@ -153,7 +158,7 @@ see CHANGES_ .. _`NumPy`: http://www.numpy.org/ .. _`PEP 3101`: https://www.python.org/dev/peps/pep-3101/ .. _`Babel`: http://babel.pocoo.org/ -.. _`Pandas Extension Types`: https://pandas.pydata.org/pandas-docs/stable/extending.html#extension-types -.. _`pint-pandas Jupyter notebook`: https://github.com/hgrecco/pint-pandas/blob/master/notebooks/pandas_support.ipynb +.. _`Pandas Extension Types`: https://pandas.pydata.org/pandas-docs/stable/development/extending.html#extension-types +.. _`pint-pandas Jupyter notebook`: https://github.com/hgrecco/pint-pandas/blob/master/notebooks/pint-pandas.ipynb .. _`AUTHORS`: https://github.com/hgrecco/pint/blob/master/AUTHORS .. _`CHANGES`: https://github.com/hgrecco/pint/blob/master/CHANGES diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json deleted file mode 100644 index b66f5abc1..000000000 --- a/benchmarks/asv.conf.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - // The version of the config file format. Do not change, unless - // you know what you are doing. - "version": 1, - - // The name of the project being benchmarked - "project": "pint", - - // The project's homepage - "project_url": "https://github.com/hgrecco/pint", - - // The URL or local path of the source code repository for the - // project being benchmarked - "repo": ".", - - // The Python project's subdirectory in your repo. If missing or - // the empty string, the project is assumed to be located at the root - // of the repository. - // "repo_subdir": "", - - // Customizable commands for building, installing, and - // uninstalling the project. See asv.conf.json documentation. - // - // "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"], - // "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"], - // "build_command": [ - // "python setup.py build", - // "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}" - // ], - - // List of branches to benchmark. If not provided, defaults to "master" - // (for git) or "default" (for mercurial). - // "branches": ["master"], // for git - // "branches": ["default"], // for mercurial - - // The DVCS being used. If not set, it will be automatically - // determined from "repo" by looking at the protocol in the URL - // (if remote), or by looking for special directories, such as - // ".git" (if local). - // "dvcs": "git", - - // The tool to use to create environments. May be "conda", - // "virtualenv" or other value depending on the plugins in use. - // If missing or the empty string, the tool will be automatically - // determined by looking for tools on the PATH environment - // variable. - "environment_type": "conda", - - // timeout in seconds for installing any dependencies in environment - // defaults to 10 min - //"install_timeout": 600, - - // the plain URL to show a commit for the project. - "show_commit_url": "http://github.com/hgrecco/pint/commit/", - - // The Pythons you'd like to test against. If not provided, defaults - // to the current version of Python used to run `asv`. - "pythons": ["3.9"], - - // The list of conda channel names to be searched for benchmark - // dependency packages in the specified order - // "conda_channels": ["conda-forge", "defaults"], - - // The matrix of dependencies to test. Each key is the name of a - // package (in PyPI) and the values are version numbers. An empty - // list or empty string indicates to just test against the default - // (latest) version. null indicates that the package is to not be - // installed. If the package to be tested is only available from - // PyPi, and the 'environment_type' is conda, then you can preface - // the package name by 'pip+', and the package will be installed via - // pip (with all the conda available packages installed first, - // followed by the pip installed packages). - - "matrix": { - "numpy": ["1.19"], - // "six": ["", null], // test with and without six installed - // "pip+emcee": [""], // emcee is only available for install with pip. - }, - - // Combinations of libraries/python versions can be excluded/included - // from the set to test. Each entry is a dictionary containing additional - // key-value pairs to include/exclude. - // - // An exclude entry excludes entries where all values match. The - // values are regexps that should match the whole string. - // - // An include entry adds an environment. Only the packages listed - // are installed. The 'python' key is required. The exclude rules - // do not apply to includes. - // - // In addition to package names, the following keys are available: - // - // - python - // Python version, as in the *pythons* variable above. - // - environment_type - // Environment type, as above. - // - sys_platform - // Platform, as in sys.platform. Possible values for the common - // cases: 'linux2', 'win32', 'cygwin', 'darwin'. - // - // "exclude": [ - // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows - // {"environment_type": "conda", "six": null}, // don't run without six on conda - // ], - // - // "include": [ - // // additional env for python2.7 - // {"python": "2.7", "numpy": "1.8"}, - // // additional env if run on windows+conda - // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""}, - // ], - - // The directory (relative to the current directory) that benchmarks are - // stored in. If not provided, defaults to "benchmarks" - // "benchmark_dir": "benchmarks", - - // The directory (relative to the current directory) to cache the Python - // environments in. If not provided, defaults to "env" - "env_dir": ".asv/env", - - // The directory (relative to the current directory) that raw benchmark - // results are stored in. If not provided, defaults to "results". - "results_dir": ".asv/results", - - // The directory (relative to the current directory) that the html tree - // should be written to. If not provided, defaults to "html". - "html_dir": ".asv/html", - - // The number of characters to retain in the commit hashes. - // "hash_length": 8, - - // `asv` will cache results of the recent builds in each - // environment, making them faster to install next time. This is - // the number of builds to keep, per environment. - // "build_cache_size": 2, - - // The commits after which the regression search in `asv publish` - // should start looking for regressions. Dictionary whose keys are - // regexps matching to benchmark names, and values corresponding to - // the commit (exclusive) after which to start looking for - // regressions. The default is to start from the first commit - // with results. If the commit is `null`, regression detection is - // skipped for the matching benchmark. - // - // "regressions_first_commits": { - // "some_benchmark": "352cdf", // Consider regressions only after this commit - // "another_benchmark": null, // Skip regression detection altogether - // }, - - // The thresholds for relative change in results, after which `asv - // publish` starts reporting regressions. Dictionary of the same - // form as in ``regressions_first_commits``, with values - // indicating the thresholds. If multiple entries match, the - // maximum is taken. If no entry matches, the default is 5%. - // - // "regressions_thresholds": { - // "some_benchmark": 0.01, // Threshold of 1% - // "another_benchmark": 0.5, // Threshold of 50% - // }, -} diff --git a/benchmarks/benchmarks/00_common.py b/benchmarks/benchmarks/00_common.py deleted file mode 100644 index 69ae2470a..000000000 --- a/benchmarks/benchmarks/00_common.py +++ /dev/null @@ -1,16 +0,0 @@ -import subprocess -import sys - - -class TimeImport: - def time_import(self): - # on py37+ the "-X importtime" usage gives us a more precise - # measurement of the import time we actually care about, - # without the subprocess or interpreter overhead - cmd = [sys.executable, "-X", "importtime", "-c", "import pint"] - p = subprocess.run(cmd, stderr=subprocess.PIPE) - - line = p.stderr.splitlines()[-1] - field = line.split(b"|")[-2].strip() - total = int(field) # microseconds - return total diff --git a/benchmarks/benchmarks/01_registry_creation.py b/benchmarks/benchmarks/01_registry_creation.py deleted file mode 100644 index 29c90101f..000000000 --- a/benchmarks/benchmarks/01_registry_creation.py +++ /dev/null @@ -1,13 +0,0 @@ -import pint - -from . import util - - -def time_create_registry(args): - if len(args) == 2: - pint.UnitRegistry(args[0], cache_folder=args[1]) - else: - pint.UnitRegistry(*args) - - -time_create_registry.params = [[(None,), tuple(), (util.get_tiny_def(),), ("", None)]] diff --git a/benchmarks/benchmarks/10_registry.py b/benchmarks/benchmarks/10_registry.py deleted file mode 100644 index 1019eb50d..000000000 --- a/benchmarks/benchmarks/10_registry.py +++ /dev/null @@ -1,154 +0,0 @@ -import pathlib - -import pint - -from . import util - -units = ("meter", "kilometer", "second", "minute", "angstrom") - -other_units = ("meter", "angstrom", "kilometer/second", "angstrom/minute") - -all_values = ("int", "float", "complex") - -ureg = None -data = {} - - -def setup(*args): - - global ureg, data - - data["int"] = 1 - data["float"] = 1.0 - data["complex"] = complex(1, 2) - - ureg = pint.UnitRegistry(util.get_tiny_def()) - - -def my_setup(*args): - global data - setup(*args) - for unit in units + other_units: - data["uc_%s" % unit] = pint.registry.to_units_container(unit, ureg) - - -def time_build_cache(): - ureg._build_cache() - - -def time_getattr(key): - getattr(ureg, key) - - -time_getattr.params = units - - -def time_getitem(key): - ureg[key] - - -time_getitem.params = units - - -def time_parse_unit_name(key): - ureg.parse_unit_name(key) - - -time_parse_unit_name.params = units - - -def time_parse_units(key): - ureg.parse_units(key) - - -time_parse_units.params = units - - -def time_parse_expression(key): - ureg.parse_expression("1.0 " + key) - - -time_parse_expression.params = units - - -def time_base_units(unit): - ureg.get_base_units(unit) - - -time_base_units.params = other_units - - -def time_to_units_container_registry(unit): - pint.registry.to_units_container(unit, ureg) - - -time_to_units_container_registry.params = other_units - - -def time_to_units_container_detached(unit): - pint.registry.to_units_container(unit, ureg) - - -time_to_units_container_detached.params = other_units - - -def time_convert_from_uc(key): - src, dst = key - ureg._convert(1.0, data[src], data[dst]) - - -time_convert_from_uc.setup = my_setup -time_convert_from_uc.params = [ - (("uc_meter", "uc_kilometer"), ("uc_kilometer/second", "uc_angstrom/minute")) -] - - -def time_parse_math_expression(): - ureg.parse_expression("3 + 5 * 2 + value", value=10) - - -# This code is duplicated with other benchmarks but simplify comparison - -CACHE_FOLDER = pathlib.Path(".cache") -CACHE_FOLDER.mkdir(exist_ok=True) -pint.UnitRegistry(cache_folder=CACHE_FOLDER) - - -def time_load_definitions_stage_1(cache_folder): - """empty registry creation""" - # Change this into a single part benchmark using setup - _ = pint.UnitRegistry(None, cache_folder=None) - - -time_load_definitions_stage_1.param_names = [ - "cache_folder", -] -time_load_definitions_stage_1.params = [ - None, - CACHE_FOLDER, -] - - -def time_load_definitions_stage_2(cache_folder, *args, **kwargs): - """empty registry creation + parsing default files + definition object loading""" - - # Change this into a single part benchmark using setup - empty_registry = pint.UnitRegistry(None, cache_folder=cache_folder) - empty_registry.load_definitions("default_en.txt", True) - - -time_load_definitions_stage_2.param_names = time_load_definitions_stage_1.param_names -time_load_definitions_stage_2.params = time_load_definitions_stage_1.params - - -def time_load_definitions_stage_3(cache_folder, *args, **kwargs): - """empty registry creation + parsing default files + definition object loading + cache building""" - - # Change this into a single part benchmark using setup - empty_registry = pint.UnitRegistry(None, cache_folder=cache_folder) - loaded_files = empty_registry.load_definitions("default_en.txt", True) - empty_registry._build_cache(loaded_files) - - -time_load_definitions_stage_3.param_names = time_load_definitions_stage_1.param_names -time_load_definitions_stage_3.params = time_load_definitions_stage_1.params diff --git a/benchmarks/benchmarks/20_quantity.py b/benchmarks/benchmarks/20_quantity.py deleted file mode 100644 index 5f6dd418a..000000000 --- a/benchmarks/benchmarks/20_quantity.py +++ /dev/null @@ -1,56 +0,0 @@ -import itertools as it -import operator - -import pint - -from . import util - -units = ("meter", "kilometer", "second", "minute", "angstrom") -all_values = ("int", "float", "complex") -all_values_q = tuple( - "%s_%s" % (a, b) for a, b in it.product(all_values, ("meter", "kilometer")) -) - -op1 = (operator.neg, operator.truth) -op2_cmp = (operator.eq,) # operator.lt) -op2_math = (operator.add, operator.sub, operator.mul, operator.truediv) - -ureg = None -data = {} - - -def setup(*args): - - global ureg, data - - data["int"] = 1 - data["float"] = 1.0 - data["complex"] = complex(1, 2) - - ureg = pint.UnitRegistry(util.get_tiny_def()) - - for key in all_values: - data[key + "_meter"] = data[key] * ureg.meter - data[key + "_kilometer"] = data[key] * ureg.kilometer - - -def time_build_by_mul(key): - data[key] * ureg.meter - - -time_build_by_mul.params = all_values - - -def time_op1(key, op): - op(data[key]) - - -time_op1.params = [all_values_q, op1] - - -def time_op2(keys, op): - key1, key2 = keys - op(data[key1], data[key2]) - - -time_op2.params = [tuple(it.product(all_values_q, all_values_q)), op2_math + op2_cmp] diff --git a/benchmarks/benchmarks/30_numpy.py b/benchmarks/benchmarks/30_numpy.py deleted file mode 100644 index ec838335f..000000000 --- a/benchmarks/benchmarks/30_numpy.py +++ /dev/null @@ -1,97 +0,0 @@ -import itertools as it -import operator - -import numpy as np - -import pint - -from . import util - -lengths = ("short", "mid") -all_values = tuple( - "%s_%s" % (a, b) for a, b in it.product(lengths, ("list", "tuple", "array")) -) -all_arrays = ("short_array", "mid_array") -units = ("meter", "kilometer") -all_arrays_q = tuple("%s_%s" % (a, b) for a, b in it.product(all_arrays, units)) - -ureg = None -data = {} -op1 = (operator.neg,) # operator.truth, -op2_cmp = (operator.eq, operator.lt) -op2_math = (operator.add, operator.sub, operator.mul, operator.truediv) -numpy_op2_cmp = (np.equal, np.less) -numpy_op2_math = (np.add, np.subtract, np.multiply, np.true_divide) - - -def float_range(n): - return (float(x) for x in range(1, n + 1)) - - -def setup(*args): - - global ureg, data - short = list(float_range(3)) - mid = list(float_range(1_000)) - - data["short_list"] = short - data["short_tuple"] = tuple(short) - data["short_array"] = np.asarray(short) - data["mid_list"] = mid - data["mid_tuple"] = tuple(mid) - data["mid_array"] = np.asarray(mid) - - ureg = pint.UnitRegistry(util.get_tiny_def()) - - for key in all_arrays: - data[key + "_meter"] = data[key] * ureg.meter - data[key + "_kilometer"] = data[key] * ureg.kilometer - - -def time_finding_meter_getattr(): - ureg.meter - - -def time_finding_meter_getitem(): - ureg["meter"] - - -def time_base_units(unit): - ureg.get_base_units(unit) - - -time_base_units.params = ["meter", "angstrom", "meter/second", "angstrom/minute"] - - -def time_build_by_mul(key): - data[key] * ureg.meter - - -time_build_by_mul.params = all_arrays - - -def time_op1(key, op): - op(data[key]) - - -time_op1.params = [all_arrays_q, op1 + (np.sqrt, np.square)] - - -def time_op2(keys, op): - key1, key2 = keys - op(data[key1], data[key2]) - - -time_op2.params = [ - ( - ("short_array_meter", "short_array_meter"), - ("short_array_meter", "short_array_kilometer"), - ("short_array_kilometer", "short_array_meter"), - ("short_array_kilometer", "short_array_kilometer"), - ("mid_array_meter", "mid_array_meter"), - ("mid_array_meter", "mid_array_kilometer"), - ("mid_array_kilometer", "mid_array_meter"), - ("mid_array_kilometer", "mid_array_kilometer"), - ), - op2_math + op2_cmp + numpy_op2_math + numpy_op2_cmp, -] diff --git a/benchmarks/benchmarks/util.py b/benchmarks/benchmarks/util.py deleted file mode 100644 index 794979268..000000000 --- a/benchmarks/benchmarks/util.py +++ /dev/null @@ -1,38 +0,0 @@ -import io - -SMALL_VEC_LEN = 3 -MID_VEC_LEN = 1_000 -LARGE_VEC_LEN = 1_000_000 - -TINY_DEF = """ -yocto- = 1e-24 = y- -zepto- = 1e-21 = z- -atto- = 1e-18 = a- -femto- = 1e-15 = f- -pico- = 1e-12 = p- -nano- = 1e-9 = n- -micro- = 1e-6 = µ- = μ- = u- -milli- = 1e-3 = m- -centi- = 1e-2 = c- -deci- = 1e-1 = d- -deca- = 1e+1 = da- = deka- -hecto- = 1e2 = h- -kilo- = 1e3 = k- -mega- = 1e6 = M- -giga- = 1e9 = G- -tera- = 1e12 = T- -peta- = 1e15 = P- -exa- = 1e18 = E- -zetta- = 1e21 = Z- -yotta- = 1e24 = Y- - -meter = [length] = m = metre -second = [time] = s = sec - -angstrom = 1e-10 * meter = Å = ångström = Å -minute = 60 * second = min -""" - - -def get_tiny_def(): - return io.StringIO(TINY_DEF) diff --git a/bors.toml b/bors.toml deleted file mode 100644 index 4e9e7be72..000000000 --- a/bors.toml +++ /dev/null @@ -1,8 +0,0 @@ -status = [ - "ci", - "docbuild", - "lint" -] -delete_merged_branches = true -timeout_sec = 10800 -block_labels = [ "do-not-merge-yet" ] diff --git a/docs/_static/style.css b/docs/_static/style.css index b2bc297d6..a2ac3f7fd 100644 --- a/docs/_static/style.css +++ b/docs/_static/style.css @@ -38,8 +38,14 @@ pre, code { .sd-card .sd-card-header { border: none; - color: #150458 !important; + color: #150458; font-size: var(--pst-font-size-h5); font-weight: bold; padding: 2.5rem 0rem 0.5rem 0rem; } + +html[data-theme=dark] { + .sd-card .sd-card-header { + color: #FFF; + } +} diff --git a/docs/advanced/currencies.rst b/docs/advanced/currencies.rst index 26b66b531..addc94785 100644 --- a/docs/advanced/currencies.rst +++ b/docs/advanced/currencies.rst @@ -84,3 +84,16 @@ currency on its own dimension, and then implement transformations:: More sophisticated formulas, e.g. dealing with flat fees and thresholds, can be implemented with arbitrary python code by programmatically defining a context (see :ref:`contexts`). + +Currency Symbols +---------------- + +Many common currency symbols are not supported by the pint parser. A preprocessor can be used as a workaround: + +.. doctest:: + + >>> import pint + >>> ureg = pint.UnitRegistry(preprocessors = [lambda s: s.replace("€", "EUR")]) + >>> ureg.define("euro = [currency] = € = EUR") + >>> print(ureg.Quantity("1 €")) + 1 euro diff --git a/docs/advanced/custom-registry-class.rst b/docs/advanced/custom-registry-class.rst index 31f3d76fe..395f1b00e 100644 --- a/docs/advanced/custom-registry-class.rst +++ b/docs/advanced/custom-registry-class.rst @@ -9,7 +9,7 @@ Pay as you go Pint registry functionality is divided into facets. The default UnitRegistry inherits from all of them, providing a full fledged and feature rich registry. However, in certain cases you might want -to have a simpler and light registry. Just pick what you need +to have a simpler and lighter registry. Just pick what you need and create your own. - FormattingRegistry: adds the capability to format quantities and units into string. @@ -31,15 +31,16 @@ For example: .. doctest:: >>> import pint - >>> class MyRegistry(pint.facets.NonMultiplicativeRegistry, pint.facets.PlainRegistry): + >>> class MyRegistry(pint.facets.NonMultiplicativeRegistry): ... pass -Subclassing ------------ +.. note:: + `NonMultiplicativeRegistry` is a subclass from `PlainRegistry`, and therefore + it is not required to add it explicitly to `MyRegistry` bases. -If you want to add the default registry class some specific functionality, -you can subclass it: + +You can add some specific functionality to your new registry. .. doctest:: @@ -51,13 +52,20 @@ you can subclass it: ... """ -If you want to create your own Quantity class, you must tell -your registry about it: + +Custom Quantity and Unit class +------------------------------ + +You can also create your own Quantity and Unit class, you must derive +from Quantity (or Unit) and tell your registry about it. + +For example, if you want to create a new `UnitRegistry` subclass you +need to derive the Quantity and Unit classes from it. .. doctest:: >>> import pint - >>> class MyQuantity: + >>> class MyQuantity(pint.UnitRegistry.Quantity): ... ... # Notice that subclassing pint.Quantity ... # is not necessary. @@ -68,16 +76,33 @@ your registry about it: ... def to_my_desired_format(self): ... """Do something else ... """ - >>> - >>> class MyRegistry(pint.UnitRegistry): ... - ... _quantity_class = MyQuantity + >>> class MyUnit(pint.UnitRegistry.Unit): + ... + ... # Notice that subclassing pint.Quantity + ... # is not necessary. + ... # Pint will inspect the Registry class and create + ... # a Quantity class that contains all the + ... # required parents. ... - ... # The same you can be done with - ... # _unit_class - ... # _measurement_class + ... def to_my_desired_format(self): + ... """Do something else + ... """ + +Then, you need to create a custom registry but deriving from `GenericUnitRegistry` so you +can specify the types of + +.. doctest:: + + >>> from typing_extensions import TypeAlias # Python 3.9 + >>> # from typing import TypeAlias # Python 3.10+ + >>> class MyRegistry(pint.registry.GenericUnitRegistry[MyQuantity, pint.Unit]): + ... + ... Quantity: TypeAlias = MyQuantity + ... Unit: TypeAlias = MyUnit + ... While these examples demonstrate how to add functionality to the default -registry class, you can actually subclass just the PlainRegistry or any -combination of facets. +registry class, you can actually subclass just the `PlainRegistry`, and +`GenericPlainRegistry`. diff --git a/docs/advanced/measurement.rst b/docs/advanced/measurement.rst index a49c8212b..0958d8db8 100644 --- a/docs/advanced/measurement.rst +++ b/docs/advanced/measurement.rst @@ -69,4 +69,4 @@ the `Propagation of uncertainty`_ rules. .. _`Propagation of uncertainty`: http://en.wikipedia.org/wiki/Propagation_of_uncertainty -.. _`Uncertainties package`: https://uncertainties-python-package.readthedocs.io/en/latest/ +.. _`Uncertainties package`: https://uncertainties.readthedocs.io/en/latest/ diff --git a/docs/advanced/performance.rst b/docs/advanced/performance.rst index d7b8a0cd5..998cac681 100644 --- a/docs/advanced/performance.rst +++ b/docs/advanced/performance.rst @@ -120,7 +120,7 @@ If you want to use the default cache folder provided by the OS, use **:auto:** >>> import pint >>> ureg = pint.UnitRegistry(cache_folder=":auto:") # doctest: +SKIP -Pint use an included version of appdirs_ to obtain the correct folder, +Pint use an external dependency of platformdirs_ to obtain the correct folder, for example in macOS is `/Users//Library/Caches/pint` In any case, you can check the location of the cache folder. @@ -146,5 +146,5 @@ In any case, you can check the location of the cache folder. .. _`brentq method`: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html -.. _appdirs: https://pypi.org/project/appdirs/ +.. _platformdirs: https://pypi.org/project/platformdirs .. _flexcache: https://github.com/hgrecco/flexcache/ diff --git a/docs/advanced/pitheorem.rst b/docs/advanced/pitheorem.rst index cd3716528..06409d8b5 100644 --- a/docs/advanced/pitheorem.rst +++ b/docs/advanced/pitheorem.rst @@ -33,8 +33,10 @@ Which can be pretty printed using the `Pint` formatter: >>> from pint import formatter >>> result = pi_theorem({'V': '[length]/[time]', 'T': '[time]', 'L': '[length]'}) - >>> print(formatter(result[0].items())) - T * V / L + >>> numerator = [item for item in result[0].items() if item[1]>0] + >>> denominator = [item for item in result[0].items() if item[1]<0] + >>> print(formatter(numerator, denominator)) + V * T / L You can also apply the Buckingham π theorem associated to a Registry. In this case, you can use derived dimensions such as speed: diff --git a/docs/api/facets.rst b/docs/api/facets.rst index f4b6a54e8..d835f5cea 100644 --- a/docs/api/facets.rst +++ b/docs/api/facets.rst @@ -16,7 +16,7 @@ The default UnitRegistry inherits from all of them. :members: :exclude-members: Quantity, Unit, Measurement, Group, Context, System -.. automodule:: pint.facets.formatting +.. automodule:: pint.delegates.formatter :members: :exclude-members: Quantity, Unit, Measurement, Group, Context, System diff --git a/docs/changes.rst b/docs/changes.rst new file mode 100644 index 000000000..d6c5f48c7 --- /dev/null +++ b/docs/changes.rst @@ -0,0 +1 @@ +.. include:: ../CHANGES diff --git a/docs/conf.py b/docs/conf.py index ee74481f8..b27bff94a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,6 +10,7 @@ # # All configuration values have a default; values that are commented out # serve to show the default. +from __future__ import annotations import datetime from importlib.metadata import version @@ -44,6 +45,7 @@ "sphinx_design", ] + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/docs/dev/contributing.rst b/docs/dev/contributing.rst index c63381b5a..e70a3757d 100644 --- a/docs/dev/contributing.rst +++ b/docs/dev/contributing.rst @@ -9,7 +9,6 @@ Pint uses (and thanks): - `github actions`_ to test all commits and PRs. - coveralls_ to monitor coverage test coverage - readthedocs_ to host the documentation. -- `bors-ng`_ as a merge bot and therefore every PR is tested before merging. - black_, isort_ and flake8_ as code linters and pre-commit_ to enforce them. - pytest_ to write tests - sphinx_ to write docs. @@ -133,7 +132,6 @@ features that work best as an extension package versus direct inclusion in Pint .. _github: http://github.com/hgrecco/pint .. _`issue tracker`: https://github.com/hgrecco/pint/issues -.. _`bors-ng`: https://github.com/bors-ng/bors-ng .. _`github docs`: https://help.github.com/articles/closing-issues-via-commit-messages/ .. _`github actions`: https://docs.github.com/en/actions .. _coveralls: https://coveralls.io/ diff --git a/docs/dev/pint-convert.rst b/docs/dev/pint-convert.rst index dbb0804f4..4ba0ad888 100644 --- a/docs/dev/pint-convert.rst +++ b/docs/dev/pint-convert.rst @@ -77,36 +77,39 @@ With the `uncertainties` package, the experimental uncertainty in the physical constants is considered, and the result is given in compact notation, with the uncertainty in the last figures in parentheses: +The uncertainty can be enabled with `-U` (by default it is not enabled): + +.. code-block:: console + + $ pint-convert -p 20 -U Eh eV + 1 hartree = 27.211386245988(52) eV + .. code-block:: console - $ pint-convert Eh eV + $ pint-convert -U Eh eV 1 hartree = 27.21138624599(5) eV The precision is limited by both the maximum number of significant digits (`-p`) and the maximum number of uncertainty digits (`-u`, 2 by default):: - $ pint-convert -p 20 Eh eV + $ pint-convert -U -p 20 Eh eV 1 hartree = 27.211386245988(52) eV - $ pint-convert -p 20 -u 4 Eh eV + $ pint-convert -U -p 20 -u 4 Eh eV 1 hartree = 27.21138624598847(5207) eV -The uncertainty can be disabled with `-U`): - -.. code-block:: console - - $ pint-convert -p 20 -U Eh eV - 1 hartree = 27.211386245988471444 eV - Correlations between experimental constants are also known, and taken into -account. Use `-C` to disable it: +account if uncertainties `-U` is enabled. Use `-C` to disable it: .. code-block:: console $ pint-convert --sys atomic m_p + 1 proton_mass = 1836.15267344 m_e + + $ pint-convert -U --sys atomic m_p 1 proton_mass = 1836.15267344(11) m_e - $ pint-convert --sys atomic -C m_p + $ pint-convert -U --sys atomic -C m_p 1 proton_mass = 1836.15267344(79) m_e Again, note that results may differ slightly, usually in the last figure, from diff --git a/docs/ecosystem.rst b/docs/ecosystem.rst index 7610fd019..95a73bd45 100644 --- a/docs/ecosystem.rst +++ b/docs/ecosystem.rst @@ -7,5 +7,18 @@ Here is a list of known projects, packages and integrations using pint. Pint integrations: ------------------ +- `ucumvert `_ `UCUM `_ (Unified Code for Units of Measure) integration - `pint-pandas `_ Pandas integration - `pint-xarray `_ Xarray integration + + +Packages using pint: +-------------------- + +- `fluids `_ Practical fluid dynamics calculations +- `ht `_ Practical heat transfer calculations +- `chemicals `_ Chemical property calculations and lookups +- `thermo `_ Thermodynamic equilibrium calculations +- `Taurus `_ Control system UI creation +- `InstrumentKit `_ Interacting with laboratory equipment over various buses. +- `NEMO `_ Electricity production cost model diff --git a/docs/getting/index.rst b/docs/getting/index.rst index 9907aeb29..95de7e5a5 100644 --- a/docs/getting/index.rst +++ b/docs/getting/index.rst @@ -8,7 +8,7 @@ The getting started guide aims to get you using pint productively as quickly as Installation ------------ -Pint has no dependencies except Python itself. In runs on Python 3.8+. +Pint has no dependencies except Python itself. It runs on Python 3.9+. .. grid:: 2 diff --git a/docs/getting/overview.rst b/docs/getting/overview.rst index cd639aaa3..f97ad2999 100644 --- a/docs/getting/overview.rst +++ b/docs/getting/overview.rst @@ -14,7 +14,7 @@ Due to its modular design, you can extend (or even rewrite!) the complete list without changing the source code. It supports a lot of numpy mathematical operations **without monkey patching or wrapping numpy**. -It has a complete test coverage. It runs in Python 3.8+ with no other +It has a complete test coverage. It runs in Python 3.9+ with no other dependencies. It is licensed under a `BSD 3-clause style license`_. It is extremely easy and natural to use: @@ -105,6 +105,7 @@ License ------- .. literalinclude:: ../../LICENSE + :language: none .. _`comprehensive list of physical units, prefixes and constants`: https://github.com/hgrecco/pint/blob/master/pint/default_en.txt .. _`uncertainties package`: https://pythonhosted.org/uncertainties/ diff --git a/docs/getting/tutorial.rst b/docs/getting/tutorial.rst index 76ba30d14..a0836fe7e 100644 --- a/docs/getting/tutorial.rst +++ b/docs/getting/tutorial.rst @@ -193,6 +193,38 @@ If you want pint to automatically perform dimensional reduction when producing new quantities, the ``UnitRegistry`` class accepts a parameter ``auto_reduce_dimensions``. Dimensional reduction can be slow, so auto-reducing is disabled by default. +The methods ``to_preferred()`` and ``ito_preferred()`` provide more control over dimensional +reduction by specifying a list of units to combine to get the required dimensionality. + +.. doctest:: + + >>> preferred_units = [ + ... ureg.ft, # distance L + ... ureg.slug, # mass M + ... ureg.s, # duration T + ... ureg.rankine, # temperature Θ + ... ureg.lbf, # force L M T^-2 + ... ureg.W, # power L^2 M T^-3 + ... ] + >>> power = ((1 * ureg.lbf) * (1 * ureg.m / ureg.s)).to_preferred(preferred_units) + >>> print(power) + 4.4482216152605005 watt + +The list of preferred units can also be specified in the unit registry to prevent having to pass it to every call to ``to_preferred()``. + +.. doctest:: + + >>> ureg.default_preferred_units = preferred_units + >>> power = ((1 * ureg.lbf) * (1 * ureg.m / ureg.s)).to_preferred() + >>> print(power) + 4.4482216152605005 watt + +The ``UnitRegistry`` class accepts a parameter ``autoconvert_to_preferred``. If set to ``True``, pint will automatically convert to +preferred units when producing new quantities. This is disabled by default. + +Note when there are multiple good combinations of units to reduce to, to_preferred is not guaranteed to be repeatable. +For example, ``(1 * ureg.lbf * ureg.m).to_preferred(preferred_units)`` may return ``W s`` or ``ft lbf``. + String parsing -------------- @@ -281,7 +313,7 @@ Pint's physical quantities can be easily printed: .. doctest:: - >>> accel = 1.3 * ureg['meter/second**2'] + >>> accel = 1.3 * ureg.parse_units('meter/second**2') >>> # The standard string formatting code >>> print('The str is {!s}'.format(accel)) The str is 1.3 meter / second ** 2 @@ -297,7 +329,7 @@ Pint supports float formatting for numpy arrays as well: .. doctest:: >>> import numpy as np - >>> accel = np.array([-1.1, 1e-6, 1.2505, 1.3]) * ureg['meter/second**2'] + >>> accel = np.array([-1.1, 1e-6, 1.2505, 1.3]) * ureg.parse_units('meter/second**2') >>> # float formatting numpy arrays >>> print('The array is {:.2f}'.format(accel)) The array is [-1.10 0.00 1.25 1.30] meter / second ** 2 @@ -309,7 +341,7 @@ Pint also supports `f-strings`_ from python>=3.6 : .. doctest:: - >>> accel = 1.3 * ureg['meter/second**2'] + >>> accel = 1.3 * ureg.parse_units('meter/second**2') >>> print(f'The str is {accel}') The str is 1.3 meter / second ** 2 >>> print(f'The str is {accel:.3e}') @@ -368,7 +400,7 @@ Pint also supports the LaTeX `siunitx` package: .. doctest:: :skipif: not_installed['uncertainties'] - >>> accel = 1.3 * ureg['meter/second**2'] + >>> accel = 1.3 * ureg.parse_units('meter/second**2') >>> # siunitx Latex print >>> print('The siunitx representation is {:Lx}'.format(accel)) The siunitx representation is \SI[]{1.3}{\meter\per\second\squared} @@ -380,10 +412,10 @@ Additionally, you can specify a default format specification: .. doctest:: - >>> accel = 1.3 * ureg['meter/second**2'] + >>> accel = 1.3 * ureg.parse_units('meter/second**2') >>> 'The acceleration is {}'.format(accel) 'The acceleration is 1.3 meter / second ** 2' - >>> ureg.default_format = 'P' + >>> ureg.formatter.default_format = 'P' >>> 'The acceleration is {}'.format(accel) 'The acceleration is 1.3 meter/second²' @@ -395,8 +427,8 @@ If Babel_ is installed you can translate unit names to any language .. doctest:: - >>> accel.format_babel(locale='fr_FR') - '1.3 mètre par seconde²' + >>> ureg.formatter.format_quantity(accel, locale='fr_FR') + '1,3 mètres par seconde²' You can also specify the format locale at the registry level either at creation: @@ -408,20 +440,22 @@ or later: .. doctest:: - >>> ureg.set_fmt_locale('fr_FR') + >>> ureg.formatter.set_locale('fr_FR') and by doing that, string formatting is now localized: .. doctest:: - >>> accel = 1.3 * ureg['meter/second**2'] + >>> ureg.formatter.default_format = 'P' + >>> accel = 1.3 * ureg.parse_units('meter/second**2') >>> str(accel) - '1.3 mètre par seconde²' + '1,3 mètres par seconde²' >>> "%s" % accel - '1.3 mètre par seconde²' + '1,3 mètres par seconde²' >>> "{}".format(accel) - '1.3 mètre par seconde²' + '1,3 mètres par seconde²' +If you want to customize string formatting, take a look at :ref:`formatting`. .. _`default list of units`: https://github.com/hgrecco/pint/blob/master/pint/default_en.txt diff --git a/docs/index.rst b/docs/index.rst index 8c60992b9..a2bc6454c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -70,6 +70,7 @@ Pint: makes units easy Advanced topics ecosystem API Reference + changes .. toctree:: :maxdepth: 1 diff --git a/docs/user/angular_frequency.rst b/docs/user/angular_frequency.rst index 0bafd05a9..61bdf1614 100644 --- a/docs/user/angular_frequency.rst +++ b/docs/user/angular_frequency.rst @@ -1,12 +1,43 @@ .. _angular_frequency: +Angles and Angular Frequency +============================= + +Angles +------ + +pint treats angle quantities as `dimensionless`, following the conventions of SI. The base unit for angle is the `radian`. +The SI BIPM Brochure (Bureau International des Poids et Mesures) states: + +.. note:: + + Plane and solid angles, when expressed in radians and steradians respectively, are in effect + also treated within the SI as quantities with the unit one (see section 5.4.8). The symbols rad + and sr are written explicitly where appropriate, in order to emphasize that, for radians or + steradians, the quantity being considered is, or involves the plane angle or solid angle + respectively. For steradians it emphasizes the distinction between units of flux and intensity + in radiometry and photometry for example. However, it is a long-established practice in + mathematics and across all areas of science to make use of rad = 1 and sr = 1. + + +This leads to behavior some users may find unintuitive. For example, since angles have no dimensionality, it is not possible to check whether a quantity has an angle dimension. + +.. code-block:: python + + >>> import pint + >>> ureg = pint.UnitRegistry() + >>> angle = ureg('1 rad') + >>> angle.dimensionality + + + Angular Frequency -================= +----------------- `Hertz` is a unit for frequency, that is often also used for angular frequency. For example, a shaft spinning at `60 revolutions per minute` will often be said to spin at `1 Hz`, rather than `1 revolution per second`. -By default, pint treats angle quantities as `dimensionless`, so allows conversions between frequencies and angular frequencies. The base unit for angle is the `radian`. This leads to some unintuitive behaviour, as pint will convert angular frequencies into frequencies by converting angles into `radians`, rather than `revolutions`. This leads to converted values `2 * pi` larger than expected: +Since pint treats angle quantities as `dimensionless`, it allows conversions between frequencies and angular frequencies. This leads to some unintuitive behaviour, as pint will convert angular frequencies into frequencies by converting angles into `radians`, rather than `revolutions`. This leads to converted values `2 * pi` larger than expected: .. code-block:: python @@ -16,7 +47,7 @@ By default, pint treats angle quantities as `dimensionless`, so allows conversio >>> angular_frequency.to('Hz') -pint follows the conventions of SI. The SI BIPM Brochure (Bureau International des Poids et Mesures) states: +The SI BIPM Brochure (Bureau International des Poids et Mesures) states: .. note:: @@ -25,7 +56,7 @@ pint follows the conventions of SI. The SI BIPM Brochure (Bureau International d Although it is formally correct to write all three of these units as the reciprocal second, the use of the different names emphasizes the different nature of the quantities concerned. It is especially important to carefully distinguish frequencies from angular frequencies, because - by definition their numerical values differ by a factor1 of 2π. Ignoring this fact may cause + by definition their numerical values differ by a factor of 2π. Ignoring this fact may cause an error of 2π. Note that in some countries, frequency values are conventionally expressed using “cycle/s” or “cps” instead of the SI unit Hz, although “cycle” and “cps” are not units in the SI. Note also that it is common, although not recommended, to use the term diff --git a/docs/user/defining-quantities.rst b/docs/user/defining-quantities.rst index ec574545f..a7405151a 100644 --- a/docs/user/defining-quantities.rst +++ b/docs/user/defining-quantities.rst @@ -72,7 +72,7 @@ Using string parsing Pint includes a powerful parser for detecting magnitudes and units (with or without prefixes) in strings. Calling the ``UnitRegistry()`` directly -invokes the parsing function: +invokes the parsing function ``UnitRegistry.parse_expression``: .. doctest:: @@ -124,7 +124,7 @@ their appropriate objects: >>> Q_('2.54') >>> type(Q_('2.54')) - + .. note:: Pint's rule for parsing strings with a mixture of numbers and units is that **units are treated with the same precedence as numbers**. @@ -134,7 +134,7 @@ For example, the units of .. doctest:: >>> Q_('3 l / 100 km') - + may be unexpected at first but, are a consequence of applying this rule. Use brackets to get the expected result: diff --git a/docs/user/formatting.rst b/docs/user/formatting.rst index 7b0f15b68..0a6e81ec7 100644 --- a/docs/user/formatting.rst +++ b/docs/user/formatting.rst @@ -1,159 +1,148 @@ .. currentmodule:: pint -.. ipython:: python - :suppress: - - import pint - - String formatting specification =============================== -The conversion of :py:class:`Unit` and :py:class:`Quantity` objects to strings (e.g. -through the :py:class:`str` builtin or f-strings) can be customized using :ref:`format -specifications `. The basic format is: +The conversion of :py:class:`Unit`, :py:class:`Quantity` and :py:class:`Measurement` +objects to strings (e.g. through the :py:class:`str` builtin or f-strings) can be +customized using :ref:`format specifications `. The basic format is: .. code-block:: none - [magnitude format][modifier][unit format] + [magnitude format][modifier][pint format] where each part is optional and the order of these is arbitrary. -In case the format is omitted, the corresponding value in the object's -``.default_format`` attribute (:py:attr:`Quantity.default_format` or -:py:attr:`Unit.default_format`) is filled in. For example: - -.. ipython:: - - In [1]: ureg = pint.UnitRegistry() - ...: ureg.default_format = "~P" - - In [2]: u = ureg.Unit("m ** 2 / s ** 2") - ...: f"{u}" - - In [3]: u.default_format = "~C" - ...: f"{u}" - - In [4]: u.default_format, ureg.default_format - - In [5]: q = ureg.Quantity(1.25, "m ** 2 / s ** 2") - ...: f"{q}" - - In [6]: q.default_format = ".3fP" - ...: f"{q}" - - In [7]: q.default_format, ureg.default_format +.. doctest:: -.. note:: + >>> import pint + >>> ureg = pint.UnitRegistry() + >>> q = 2.3e-6 * ureg.m ** 3 / (ureg.s ** 2 * ureg.kg) + >>> f"{q:~P}" # short pretty + '2.3×10⁻⁶ m³/kg/s²' + >>> f"{q:~#P}" # compact short pretty + '2.3 mm³/g/s²' + >>> f"{q:P#~}" # also compact short pretty + '2.3 mm³/g/s²' + >>> f"{q:.2f~#P}" # short compact pretty with 2 float digits + '2.30 mm³/g/s²' + >>> f"{q:#~}" # short compact default + '2.3 mm ** 3 / g / s ** 2' - In the future, the magnitude and unit format spec will be evaluated - independently, such that with a global default of - ``ureg.default_format = ".3f"`` and ``f"{q:P}`` the format that - will be used is ``".3fP"``. This behavior can be opted into by - setting :py:attr:`UnitRegistry.separate_format_defaults` to :py:obj:`True`. +In case the format is omitted, the corresponding value in the formatter +``.default_format`` attribute is filled in. For example: -If both are not set, the global default of ``"D"`` and the magnitude's default -format are used instead. +.. doctest:: -.. note:: + >>> ureg.formatter.default_format = "P" + >>> f"{q}" + '2.3×10⁻⁶ meter³/kilogram/second²' - Modifiers may be used without specifying any format: ``"~"`` is a valid format - specification and is equal to ``"~D"``. - - -Unit Format Specifications --------------------------- -The :py:class:`Unit` class ignores the magnitude format part, and the unit format -consists of just the format type. - -Let's look at some examples: - -.. ipython:: python - - ureg = pint.UnitRegistry() - u = ureg.kg * ureg.m / ureg.s ** 2 - - f"{u:P}" # using the pretty format - f"{u:~P}" # short pretty - f"{u:P~}" # also short pretty - - # default format - u.default_format - ureg.default_format - str(u) # default: default - f"{u:~}" # default: short default - ureg.default_format = "C" # registry default to compact - str(u) # default: compact - f"{u}" # default: compact - u.default_format = "P" - f"{u}" # default: pretty - u.default_format = "" # TODO: switch to None - ureg.default_format = "" # TODO: switch to None - f"{u}" # default: default - -Unit Format Types +Pint Format Types ----------------- -``pint`` comes with a variety of unit formats: +``pint`` comes with a variety of unit formats. These impact the complete representation: ======= =============== ====================================================================== -Spec Name Example +Spec Name Examples ======= =============== ====================================================================== -``D`` default ``kilogram * meter / second ** 2`` -``P`` pretty ``kilogram·meter/second²`` -``H`` HTML ``kilogram meter/second2`` -``L`` latex ``\frac{\mathrm{kilogram} \cdot \mathrm{meter}}{\mathrm{second}^{2}}`` -``Lx`` latex siunitx ``\si[]{\kilo\gram\meter\per\second\squared}`` -``C`` compact ``kilogram*meter/second**2`` +``D`` default ``3.4e+09 kilogram * meter / second ** 2`` +``P`` pretty ``3.4×10⁹ kilogram·meter/second²`` +``H`` HTML ``3.4×109 kilogram meter/second2`` +``L`` latex ``3.4\\times 10^{9}\\ \\frac{\\mathrm{kilogram} \\cdot \\mathrm{meter}}{\\mathrm{second}^{2}}`` +``Lx`` latex siunitx ``\\SI[]{3.4e+09}{\\kilo\\gram\\meter\\per\\second\\squared}`` +``C`` compact ``3.4e+09 kilogram*meter/second**2`` ======= =============== ====================================================================== -Custom Unit Format Types ------------------------- -Using :py:func:`pint.register_unit_format`, it is possible to add custom -formats: - -.. ipython:: +These examples are using `g`` as numeric modifier. :py:class:`Measurement` are also affected +by these modifiers. - In [1]: u = ureg.Unit("m ** 3 / (s ** 2 * kg)") - In [2]: @pint.register_unit_format("simple") - ...: def format_unit_simple(unit, registry, **options): - ...: return " * ".join(f"{u} ** {p}" for u, p in unit.items()) +Quantity modifiers +------------------ - In [3]: f"{u:~simple}" +======== =================================================== ================================ +Modifier Meaning Example +======== =================================================== ================================ +``#`` Call :py:meth:`Quantity.to_compact` first ``1.0 m·mg/s²`` (``f"{q:#~P}"``) +======== =================================================== ================================ -where ``unit`` is a :py:class:`dict` subclass containing the unit names and -their exponents. +Unit modifiers +-------------- -Quantity Format Specifications ------------------------------- -The magnitude format is forwarded to the magnitude (for a unit-spec of ``H`` the -magnitude's ``_repr_html_`` is called). +======== =================================================== ================================ +Modifier Meaning Example +======== =================================================== ================================ +``~`` Use the unit's symbol instead of its canonical name ``kg·m/s²`` (``f"{u:~P}"``) +======== =================================================== ================================ -Let's look at some more examples: +Magnitude modifiers +------------------- -.. ipython:: python +Pint uses the :ref:`format specifications `. However, it is important to remember +that only the type honors the locale. Using any other numeric format (e.g. `g`, `e`, `f`) +will result in a non-localized representation of the number. - q = 1e-6 * u - # modifiers - f"{q:~P}" # short pretty - f"{q:~#P}" # compact short pretty - f"{q:P#~}" # also compact short pretty +Custom formats +-------------- +Using :py:func:`pint.register_unit_format`, it is possible to add custom +formats: - # additional magnitude format - f"{q:.2f~#P}" # short compact pretty with 2 float digits - f"{q:#~}" # short compact default +.. doctest:: -Quantity Format Types ---------------------- -There are no special quantity formats yet. + >>> @pint.register_unit_format("Z") + ... def format_unit_simple(unit, registry, **options): + ... return " * ".join(f"{u} ** {p}" for u, p in unit.items()) + >>> f"{q:Z}" + '2.3e-06 kilogram ** -1 * meter ** 3 * second ** -2' -Modifiers ---------- -======== =================================================== ================================ -Modifier Meaning Example -======== =================================================== ================================ -``~`` Use the unit's symbol instead of its canonical name ``kg·m/s²`` (``f"{u:~P}"``) -``#`` Call :py:meth:`Quantity.to_compact` first ``1.0 m·mg/s²`` (``f"{q:#~P}"``) -======== =================================================== ================================ +where ``unit`` is a :py:class:`dict` subclass containing the unit names and +their exponents, ``registry`` is the current instance of :py:class:``UnitRegistry`` and +``options`` is not yet implemented. + +You can choose to replace the complete formatter. Briefly, the formatter if an object with the +following methods: `format_magnitude`, `format_unit`, `format_quantity`, `format_uncertainty`, +`format_measurement`. The easiest way to create your own formatter is to subclass one that you +like and replace the methods you need. For example, to replace the unit formatting: + +.. doctest:: + + >>> from pint.delegates.formatter.plain import DefaultFormatter + >>> class MyFormatter(DefaultFormatter): + ... + ... default_format = "" + ... + ... def format_unit(self, unit, uspec, sort_func, **babel_kwds) -> str: + ... return "ups!" + ... + >>> ureg.formatter = MyFormatter() + >>> ureg.formatter._registry = ureg + >>> str(q) + '2.3e-06 ups!' + + +By replacing other methods, you can customize the output as much as you need. + +SciForm_ is a library that can be used to format the magnitude of the number. This can be used +in a customer formatter as follows: + +.. doctest:: + + >>> from sciform import Formatter + >>> sciform_formatter = Formatter(round_mode="sig_fig", ndigits=4, exp_mode="engineering") + + >>> class MyFormatter(DefaultFormatter): + ... + ... default_format = "" + ... + ... def format_magnitude(self, value, spec, **options) -> str: + ... return sciform_formatter(value) + ... + >>> ureg.formatter = MyFormatter() + >>> ureg.formatter._registry = ureg + >>> str(q * 10) + '23.00e-06 meter ** 3 / second ** 2 / kilogram' + + +.. _SciForm: https://sciform.readthedocs.io/en/stable/ \ No newline at end of file diff --git a/docs/user/log_units.rst b/docs/user/log_units.rst index 03e007914..117881363 100644 --- a/docs/user/log_units.rst +++ b/docs/user/log_units.rst @@ -17,6 +17,21 @@ as well as some conversions between them and their base units where applicable. These units behave much like those described in :ref:`nonmult`, so many of the recommendations there apply here as well. +Mathematical operations with logarithmic units are often ambiguous. +For example, the sum of two powers with decibel units is a logarithmic quantity of the power squared, thus without obvious meaning and not decibel units. +Therefore the main Pint distribution raises an ``OffsetUnitCalculusError`` as a result of the sum of two quantities with decibel units, +as it does for all other ambiguous mathematical operations. + +Valispace's fork of Pint makes some options. +We distiguish between *absolute logarithmic units* and *relative logarithmic units*. + +Absolute logarithmic units are the logarithmic units with a constant reference, e.g. `dBW` corresponds to a power change in relation to 1 `Watt`. +We consider general logarithmic units like `dB` as general absolute logarithmic units. + +Relative logarithmic units are the logarithmic units of gains and losses, thus a power change in relation to the previous power level. +In coherence with the default behaviour of subtraction between absolute logarithmic units, +relative logarithmic units are represented by `delta_` before the name of the correspondent absolute logarithmic unit, e.g. `delta_dBu` corresponds to a power change in relation to a power level in `dBu`. + Setting up the ``UnitRegistry()`` --------------------------------- @@ -35,6 +50,16 @@ If you can't pass that flag you will need to define all logarithmic units be restricted in the kinds of operations you can do without explicitly calling `.to_base_units()` first. +The sum of decibel absolute units will raise an error by default. +However, you can set up your ``UnitRegistry()`` with the ``logarithmic_math`` flag, like: + +.. doctest:: + + >>> ureg = UnitRegistry(autoconvert_offset_to_baseunit=True, logarithmic_math=True) + >>> Q_ = ureg.Quantity + +If you switch on this flag, it will convert additions of quantities with logarithmic units into logarithmic additions. + Defining log quantities ----------------------- @@ -49,6 +74,8 @@ you can define simple logarithmic quantities like most others: >>> ureg('20 dB') + >>> ureg('20 delta_dB') + Converting to and from base units @@ -111,16 +138,16 @@ will not work: .. doctest:: >>> -161.0 * ureg('dBm/Hz') == (-161.0 * ureg.dBm / ureg.Hz) - False + np.False_ But this will: .. doctest:: >>> ureg('-161.0 dBm/Hz') == (-161.0 * ureg.dBm / ureg.Hz) - True + np.True_ >>> Q_(-161.0, 'dBm') / ureg.Hz == (-161.0 * ureg.dBm / ureg.Hz) - True + np.True_ To begin using this feature while avoiding problems, define logarithmic units as single-unit quantities and convert them to their base units as quickly as diff --git a/docs/user/nonmult.rst b/docs/user/nonmult.rst index a649d2ad1..905dd0835 100644 --- a/docs/user/nonmult.rst +++ b/docs/user/nonmult.rst @@ -18,7 +18,7 @@ For example, to convert from celsius to fahrenheit: >>> from pint import UnitRegistry >>> ureg = UnitRegistry() - >>> ureg.default_format = '.3f' + >>> ureg.formatter.default_format = '.3f' >>> Q_ = ureg.Quantity >>> home = Q_(25.4, ureg.degC) >>> print(home.to('degF')) diff --git a/docs/user/numpy.ipynb b/docs/user/numpy.ipynb index 25866261b..0b1b22197 100644 --- a/docs/user/numpy.ipynb +++ b/docs/user/numpy.ipynb @@ -33,15 +33,19 @@ "outputs": [], "source": [ "# Import NumPy\n", + "from __future__ import annotations\n", + "\n", "import numpy as np\n", "\n", "# Import Pint\n", "import pint\n", + "\n", "ureg = pint.UnitRegistry()\n", "Q_ = ureg.Quantity\n", "\n", "# Silence NEP 18 warning\n", "import warnings\n", + "\n", "with warnings.catch_warnings():\n", " warnings.simplefilter(\"ignore\")\n", " Q_([])" @@ -68,7 +72,7 @@ }, "outputs": [], "source": [ - "legs1 = Q_(np.asarray([3., 4.]), 'meter')\n", + "legs1 = Q_(np.asarray([3.0, 4.0]), \"meter\")\n", "print(legs1)" ] }, @@ -82,7 +86,7 @@ }, "outputs": [], "source": [ - "legs1 = [3., 4.] * ureg.meter\n", + "legs1 = [3.0, 4.0] * ureg.meter\n", "print(legs1)" ] }, @@ -107,7 +111,7 @@ }, "outputs": [], "source": [ - "print(legs1.to('kilometer'))" + "print(legs1.to(\"kilometer\"))" ] }, { @@ -134,7 +138,7 @@ "outputs": [], "source": [ "try:\n", - " legs1.to('joule')\n", + " legs1.to(\"joule\")\n", "except pint.DimensionalityError as exc:\n", " print(exc)" ] @@ -160,7 +164,7 @@ }, "outputs": [], "source": [ - "legs2 = [400., 300.] * ureg.centimeter\n", + "legs2 = [400.0, 300.0] * ureg.centimeter\n", "print(legs2)" ] }, @@ -214,7 +218,7 @@ }, "outputs": [], "source": [ - "angles = np.arccos(legs2/hyps)\n", + "angles = np.arccos(legs2 / hyps)\n", "print(angles)" ] }, @@ -239,7 +243,7 @@ }, "outputs": [], "source": [ - "print(angles.to('degree'))" + "print(angles.to(\"degree\"))" ] }, { @@ -302,6 +306,7 @@ "outputs": [], "source": [ "from pint.facets.numpy.numpy_func import HANDLED_FUNCTIONS\n", + "\n", "print(sorted(list(HANDLED_FUNCTIONS)))" ] }, @@ -374,27 +379,27 @@ "source": [ "from graphviz import Digraph\n", "\n", - "g = Digraph(graph_attr={'size': '8,5'}, node_attr={'fontname': 'courier'})\n", - "g.edge('Dask array', 'NumPy ndarray')\n", - "g.edge('Dask array', 'CuPy ndarray')\n", - "g.edge('Dask array', 'Sparse COO')\n", - "g.edge('Dask array', 'NumPy masked array', style='dashed')\n", - "g.edge('CuPy ndarray', 'NumPy ndarray')\n", - "g.edge('Sparse COO', 'NumPy ndarray')\n", - "g.edge('NumPy masked array', 'NumPy ndarray')\n", - "g.edge('Jax array', 'NumPy ndarray')\n", - "g.edge('Pint Quantity', 'Dask array', style='dashed')\n", - "g.edge('Pint Quantity', 'NumPy ndarray')\n", - "g.edge('Pint Quantity', 'CuPy ndarray', style='dashed')\n", - "g.edge('Pint Quantity', 'Sparse COO')\n", - "g.edge('Pint Quantity', 'NumPy masked array', style='dashed')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'Dask array')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'CuPy ndarray', style='dashed')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'Sparse COO')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'NumPy ndarray')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'NumPy masked array', style='dashed')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'Pint Quantity')\n", - "g.edge('xarray Dataset/DataArray/Variable', 'Jax array', style='dashed')\n", + "g = Digraph(graph_attr={\"size\": \"8,5\"}, node_attr={\"fontname\": \"courier\"})\n", + "g.edge(\"Dask array\", \"NumPy ndarray\")\n", + "g.edge(\"Dask array\", \"CuPy ndarray\")\n", + "g.edge(\"Dask array\", \"Sparse COO\")\n", + "g.edge(\"Dask array\", \"NumPy masked array\", style=\"dashed\")\n", + "g.edge(\"CuPy ndarray\", \"NumPy ndarray\")\n", + "g.edge(\"Sparse COO\", \"NumPy ndarray\")\n", + "g.edge(\"NumPy masked array\", \"NumPy ndarray\")\n", + "g.edge(\"Jax array\", \"NumPy ndarray\")\n", + "g.edge(\"Pint Quantity\", \"Dask array\", style=\"dashed\")\n", + "g.edge(\"Pint Quantity\", \"NumPy ndarray\")\n", + "g.edge(\"Pint Quantity\", \"CuPy ndarray\", style=\"dashed\")\n", + "g.edge(\"Pint Quantity\", \"Sparse COO\")\n", + "g.edge(\"Pint Quantity\", \"NumPy masked array\", style=\"dashed\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"Dask array\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"CuPy ndarray\", style=\"dashed\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"Sparse COO\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"NumPy ndarray\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"NumPy masked array\", style=\"dashed\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"Pint Quantity\")\n", + "g.edge(\"xarray Dataset/DataArray/Variable\", \"Jax array\", style=\"dashed\")\n", "g" ] }, @@ -424,10 +429,10 @@ "import xarray as xr\n", "\n", "# Load tutorial data\n", - "air = xr.tutorial.load_dataset('air_temperature')['air'][0]\n", + "air = xr.tutorial.load_dataset(\"air_temperature\")[\"air\"][0]\n", "\n", "# Convert to Quantity\n", - "air.data = Q_(air.data, air.attrs.pop('units', ''))\n", + "air.data = Q_(air.data, air.attrs.pop(\"units\", \"\"))\n", "\n", "print(air)\n", "print()\n", @@ -494,7 +499,7 @@ "m = np.ma.masked_array([2, 3, 5, 7], mask=[False, True, False, True])\n", "\n", "# Must create using Quantity class\n", - "print(repr(ureg.Quantity(m, 'm')))\n", + "print(repr(ureg.Quantity(m, \"m\")))\n", "print()\n", "\n", "# DO NOT create using multiplication until\n", @@ -568,14 +573,14 @@ "x[x < 0.95] = 0\n", "\n", "data = xr.DataArray(\n", - " Q_(x.map_blocks(COO), 'm'),\n", - " dims=('z', 'y', 'x'),\n", + " Q_(x.map_blocks(COO), \"m\"),\n", + " dims=(\"z\", \"y\", \"x\"),\n", " coords={\n", - " 'z': np.arange(100),\n", - " 'y': np.arange(100) - 50,\n", - " 'x': np.arange(100) * 1.5 - 20\n", + " \"z\": np.arange(100),\n", + " \"y\": np.arange(100) - 50,\n", + " \"x\": np.arange(100) * 1.5 - 20,\n", " },\n", - " name='test'\n", + " name=\"test\",\n", ")\n", "\n", "print(data)\n", @@ -627,11 +632,6 @@ } ], "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -641,8 +641,7 @@ "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.2" + "pygments_lexer": "ipython3" } }, "nbformat": 4, diff --git a/docs/user/plotting.rst b/docs/user/plotting.rst index a008d4559..3c3fc39c1 100644 --- a/docs/user/plotting.rst +++ b/docs/user/plotting.rst @@ -70,6 +70,31 @@ This also allows controlling the actual plotting units for the x and y axes: ax.axhline(26400 * ureg.feet, color='tab:red') ax.axvline(120 * ureg.minutes, color='tab:green') +Users have the possibility to change the format of the units on the plot: + +.. plot:: + :include-source: true + + import matplotlib.pyplot as plt + import numpy as np + import pint + + ureg = pint.UnitRegistry() + ureg.setup_matplotlib(True) + + ureg.mpl_formatter = "{:~P}" + + y = np.linspace(0, 30) * ureg.miles + x = np.linspace(0, 5) * ureg.hours + + fig, ax = plt.subplots() + ax.yaxis.set_units(ureg.inches) + ax.xaxis.set_units(ureg.seconds) + + ax.plot(x, y, 'tab:blue') + ax.axhline(26400 * ureg.feet, color='tab:red') + ax.axvline(120 * ureg.minutes, color='tab:green') + For more information, visit the Matplotlib_ home page. .. _Matplotlib: https://matplotlib.org diff --git a/downstream_status.md b/downstream_status.md new file mode 100644 index 000000000..32dc4e8e4 --- /dev/null +++ b/downstream_status.md @@ -0,0 +1,24 @@ +In Pint, we work hard to avoid breaking projects that depend on us. +If you are the maintainer of one of such projects, you can +help us get ahead of problems in simple way. + +Pint will publish a release candidate (rc) at least a week before each new +version. By default, `pip` does not install these versions unless a +[pre](https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-pre) option +is used so this will not affect your users. + +In addition to your standard CI routines, create a CI that install Pint's +release candidates. You can also (or alternatively) create CI that install +Pint's master branch in GitHub. + +Take a look at the [Pint Downstream Demo](https://github.com/hgrecco/pint-downstream-demo) +if you need a template. + +Then, add your project badges to this file so it can be used as a Dashboard (always putting the stable first) + +| Project | stable | pre-release | nightly | +| ----------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Pint Downstream Demo](https://github.com/hgrecco/pint-downstream-demo) | [![CI](https://github.com/hgrecco/pint-downstream-demo/actions/workflows/ci.yml/badge.svg)](https://github.com/hgrecco/pint-downstream-demo/actions/workflows/ci.yml) | [![CI-pint-pre](https://github.com/hgrecco/pint-downstream-demo/actions/workflows/ci-pint-pre.yml/badge.svg)](https://github.com/hgrecco/pint-downstream-demo/actions/workflows/ci-pint-pre.yml) | [![CI-pint-master](https://github.com/hgrecco/pint-downstream-demo/actions/workflows/ci-pint-master.yml/badge.svg)](https://github.com/hgrecco/pint-downstream-demo/actions/workflows/ci-pint-master.yml) | +| [Pint Pandas](https://github.com/hgrecco/pint-pandas) | [![CI](https://github.com/hgrecco/pint-pandas/actions/workflows/ci.yml/badge.svg)](https://github.com/hgrecco/pint-pandas/actions/workflows/ci.yml) | [![CI-pint-pre](https://github.com/hgrecco/pint-pandas/actions/workflows/ci-pint-pre.yml/badge.svg)](https://github.com/hgrecco/pint-pandas/actions/workflows/ci-pint-pre.yml) | [![CI-pint-master](https://github.com/hgrecco/pint-pandas/actions/workflows/ci-pint-master.yml/badge.svg)](https://github.com/hgrecco/pint-pandas/actions/workflows/ci-pint-master.yml) | +| [MetPy](https://github.com/Unidata/MetPy) | [![CI](https://github.com/Unidata/MetPy/actions/workflows/tests-pypi.yml/badge.svg)](https://github.com/Unidata/MetPy/actions/workflows/tests-pypi.yml) | | [![CI-pint-master](https://github.com/Unidata/MetPy/actions/workflows/nightly-builds.yml/badge.svg)](https://github.com/Unidata/MetPy/actions/workflows/nightly-builds.yml) | +| [pint-xarray](https://github.com/xarray-contrib/pint-xarray) | [![CI](https://github.com/xarray-contrib/pint-xarray/actions/workflows/ci.yml/badge.svg)](https://github.com/xarray-contrib/pint-xarray/actions/workflows/ci.yml) | | [![CI-pint-master](https://github.com/xarray-contrib/pint-xarray/actions/workflows/nightly.yml/badge.svg)](https://github.com/xarray-contrib/pint-xarray/actions/workflows/nightly.yml) | diff --git a/pint/__init__.py b/pint/__init__.py index ee80048e1..abfef2703 100644 --- a/pint/__init__.py +++ b/pint/__init__.py @@ -15,6 +15,7 @@ from importlib.metadata import version +from .delegates.formatter._format_helpers import formatter from .errors import ( # noqa: F401 DefinitionSyntaxError, DimensionalityError, @@ -25,7 +26,7 @@ UndefinedUnitError, UnitStrippedWarning, ) -from .formatting import formatter, register_unit_format +from .formatting import register_unit_format from .registry import ApplicationRegistry, LazyRegistry, UnitRegistry from .util import logger, pi_theorem # noqa: F401 @@ -35,6 +36,7 @@ Unit = UnitRegistry.Unit Measurement = UnitRegistry.Measurement Context = UnitRegistry.Context +Group = UnitRegistry.Group try: # pragma: no cover __version__ = version("pint") diff --git a/pint/_typing.py b/pint/_typing.py index cfb803be0..241459ef1 100644 --- a/pint/_typing.py +++ b/pint/_typing.py @@ -1,18 +1,52 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Tuple, TypeVar, Union +from collections.abc import Callable +from decimal import Decimal +from fractions import Fraction +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, Union +from .compat import Never, TypeAlias + +if TYPE_CHECKING: + from .facets.plain import PlainQuantity as Quantity + from .facets.plain import PlainUnit as Unit + from .util import UnitsContainer + + +HAS_NUMPY = False if TYPE_CHECKING: - from .facets.plain import Quantity, Unit, UnitsContainer + from .compat import HAS_NUMPY + +if HAS_NUMPY: + from .compat import np + + Scalar: TypeAlias = Union[float, int, Decimal, Fraction, np.number[Any]] + Array = np.ndarray[Any, Any] +else: + Scalar: TypeAlias = Union[float, int, Decimal, Fraction] + Array: TypeAlias = Never -UnitLike = Union[str, "UnitsContainer", "Unit"] +# TODO: Change when Python 3.10 becomes minimal version. +Magnitude = Union[Scalar, Array] + +UnitLike = Union[str, dict[str, Scalar], "UnitsContainer", "Unit"] QuantityOrUnitLike = Union["Quantity", UnitLike] -Shape = Tuple[int, ...] +Shape = tuple[int, ...] -_MagnitudeType = TypeVar("_MagnitudeType") S = TypeVar("S") FuncType = Callable[..., Any] F = TypeVar("F", bound=FuncType) + + +# TODO: Improve or delete types +QuantityArgument = Any + +T = TypeVar("T") + + +class Handler(Protocol): + def __getitem__(self, item: type[T]) -> Callable[[T], None]: + ... diff --git a/pint/_vendor/appdirs.py b/pint/_vendor/appdirs.py deleted file mode 100644 index c32636a1a..000000000 --- a/pint/_vendor/appdirs.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version__ = "1.4.4" -__version_info__ = tuple(int(segment) for segment in __version__.split(".")) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the plain app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the plain app data dir for Windows, and "log" to the - plain cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/pint/_vendor/flexcache.py b/pint/_vendor/flexcache.py deleted file mode 100644 index 7b3969846..000000000 --- a/pint/_vendor/flexcache.py +++ /dev/null @@ -1,427 +0,0 @@ -""" - flexcache.flexcache - ~~~~~~~~~~~~~~~~~~~ - - Classes for persistent caching and invalidating cached objects, - which are built from a source object and a (potentially expensive) - conversion function. - - Header - ------ - Contains summary information about the source object that will - be saved together with the cached file. - - It's capabilities are divided in three groups: - - The Header itself which contains the information that will - be saved alongside the cached file - - The Naming logic which indicates how the cached filename is - built. - - The Invalidation logic which indicates whether a cached file - is valid (i.e. truthful to the actual source file). - - DiskCache - --------- - Saves and loads to the cache a transformed versions of a source object. - - :copyright: 2022 by flexcache Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -import abc -import hashlib -import json -import pathlib -import pickle -import platform -import typing -from dataclasses import asdict as dc_asdict -from dataclasses import dataclass -from dataclasses import fields as dc_fields -from typing import Any, Iterable - -######### -# Header -######### - - -@dataclass(frozen=True) -class BaseHeader(abc.ABC): - """Header with no information except the converter_id - - All header files must inherit from this. - """ - - # The actual source of the data (or a reference to it) - # that is going to be converted. - source: Any - - # An identification of the function that is used to - # convert the source into the result object. - converter_id: str - - _source_type = object - - def __post_init__(self): - # TODO: In more modern python versions it would be - # good to check for things like tuple[str]. - if not isinstance(self.source, self._source_type): - raise TypeError( - f"Source must be {self._source_type}, " f"not {type(self.source)}" - ) - - def for_cache_name(self) -> typing.Generator[bytes]: - """The basename for the cache file is a hash hexdigest - built by feeding this collection of values. - - A class can provide it's own set of values by rewriting - `_for_cache_name`. - """ - for el in self._for_cache_name(): - if isinstance(el, str): - yield el.encode("utf-8") - else: - yield el - - def _for_cache_name(self) -> typing.Generator[bytes | str]: - """The basename for the cache file is a hash hexdigest - built by feeding this collection of values. - - Change the behavior by writing your own. - """ - yield self.converter_id - - @abc.abstractmethod - def is_valid(self, cache_path: pathlib.Path) -> bool: - """Return True if the cache_path is an cached version - of the source_object represented by this header. - """ - - -@dataclass(frozen=True) -class BasicPythonHeader(BaseHeader): - """Header with basic Python information.""" - - system: str = platform.system() - python_implementation: str = platform.python_implementation() - python_version: str = platform.python_version() - - -##################### -# Invalidation logic -##################### - - -class InvalidateByExist: - """The cached file is valid if exists and is newer than the source file.""" - - def is_valid(self, cache_path: pathlib.Path) -> bool: - return cache_path.exists() - - -class InvalidateByPathMTime(abc.ABC): - """The cached file is valid if exists and is newer than the source file.""" - - @property - @abc.abstractmethod - def source_path(self) -> pathlib.Path: - ... - - def is_valid(self, cache_path: pathlib.Path): - return ( - cache_path.exists() - and cache_path.stat().st_mtime > self.source_path.stat().st_mtime - ) - - -class InvalidateByMultiPathsMtime(abc.ABC): - """The cached file is valid if exists and is newer than the newest source file.""" - - @property - @abc.abstractmethod - def source_paths(self) -> pathlib.Path: - ... - - @property - def newest_date(self): - return max((t.stat().st_mtime for t in self.source_paths), default=0) - - def is_valid(self, cache_path: pathlib.Path): - return cache_path.exists() and cache_path.stat().st_mtime > self.newest_date - - -############### -# Naming logic -############### - - -class NameByFields: - """Name is built taking into account all fields in the Header - (except the source itself). - """ - - def _for_cache_name(self): - yield from super()._for_cache_name() - for field in dc_fields(self): - if field.name not in ("source", "converter_id"): - yield getattr(self, field.name) - - -class NameByFileContent: - """Given a file source object, the name is built from its content.""" - - _source_type = pathlib.Path - - @property - def source_path(self) -> pathlib.Path: - return self.source - - def _for_cache_name(self): - yield from super()._for_cache_name() - yield self.source_path.read_bytes() - - @classmethod - def from_string(cls, s: str, converter_id: str): - return cls(pathlib.Path(s), converter_id) - - -@dataclass(frozen=True) -class NameByObj: - """Given a pickable source object, the name is built from its content.""" - - pickle_protocol: int = pickle.HIGHEST_PROTOCOL - - def _for_cache_name(self): - yield from super()._for_cache_name() - yield pickle.dumps(self.source, protocol=self.pickle_protocol) - - -class NameByPath: - """Given a file source object, the name is built from its resolved path.""" - - _source_type = pathlib.Path - - @property - def source_path(self) -> pathlib.Path: - return self.source - - def _for_cache_name(self): - yield from super()._for_cache_name() - yield bytes(self.source_path.resolve()) - - @classmethod - def from_string(cls, s: str, converter_id: str): - return cls(pathlib.Path(s), converter_id) - - -class NameByMultiPaths: - """Given multiple file source object, the name is built from their resolved path - in ascending order. - """ - - _source_type = tuple - - @property - def source_paths(self) -> tuple[pathlib.Path]: - return self.source - - def _for_cache_name(self): - yield from super()._for_cache_name() - yield from sorted(bytes(p.resolve()) for p in self.source_paths) - - @classmethod - def from_strings(cls, ss: Iterable[str], converter_id: str): - return cls(tuple(pathlib.Path(s) for s in ss), converter_id) - - -class NameByHashIter: - """Given multiple hashes, the name is built from them in ascending order.""" - - _source_type = tuple - - def _for_cache_name(self): - yield from super()._for_cache_name() - yield from sorted(h for h in self.source) - - -class DiskCache: - """A class to store and load cached objects to disk, which - are built from a source object and conversion function. - - The basename for the cache file is a hash hexdigest - built by feeding a collection of values determined by - the Header object. - - Parameters - ---------- - cache_folder - indicates where the cache files will be saved. - """ - - # Maps classes to header class - _header_classes: dict[type, BaseHeader] = None - - # Hasher object constructor (e.g. a member of hashlib) - # must implement update(b: bytes) and hexdigest() methods - _hasher = hashlib.sha1 - - # If True, for each cached file the header is also stored. - _store_header: bool = True - - def __init__(self, cache_folder: str | pathlib.Path): - self.cache_folder = pathlib.Path(cache_folder) - self.cache_folder.mkdir(parents=True, exist_ok=True) - self._header_classes = self._header_classes or {} - - def register_header_class(self, object_class: type, header_class: BaseHeader): - self._header_classes[object_class] = header_class - - def cache_stem_for(self, header: BaseHeader) -> str: - """Generate a hash representing the basename of a memoized file - for a given header. - - The naming strategy is defined by the header class used. - """ - hd = self._hasher() - for value in header.for_cache_name(): - hd.update(value) - return hd.hexdigest() - - def cache_path_for(self, header: BaseHeader) -> pathlib.Path: - """Generate a Path representing the location of a memoized file - for a given filepath or object. - - The naming strategy is defined by the header class used. - """ - h = self.cache_stem_for(header) - return self.cache_folder.joinpath(h).with_suffix(".pickle") - - def _get_header_class(self, source_object) -> BaseHeader: - for k, v in self._header_classes.items(): - if isinstance(source_object, k): - return v - raise TypeError(f"Cannot find header class for {type(source_object)}") - - def load(self, source_object, converter=None, pass_hash=False) -> tuple[Any, str]: - """Given a source_object, return the converted value stored - in the cache together with the cached path stem - - When the cache is not found: - - If a converter callable is given, use it on the source - object, store the result in the cache and return it. - - Return None, otherwise. - - Two signatures for the converter are valid: - - source_object -> transformed object - - (source_object, cached_path_stem) -> transformed_object - - To use the second one, use `pass_hash=True`. - - If you want to do the conversion yourself outside this class, - use the converter argument to provide a name for it. This is - important as the cached_path_stem depends on the converter name. - """ - header_class = self._get_header_class(source_object) - - if isinstance(converter, str): - converter_id = converter - converter = None - else: - converter_id = getattr(converter, "__name__", "") - - header = header_class(source_object, converter_id) - - cache_path = self.cache_path_for(header) - - converted_object = self.rawload(header, cache_path) - - if converted_object: - return converted_object, cache_path.stem - if converter is None: - return None, cache_path.stem - - if pass_hash: - converted_object = converter(source_object, cache_path.stem) - else: - converted_object = converter(source_object) - - self.rawsave(header, converted_object, cache_path) - - return converted_object, cache_path.stem - - def save(self, converted_object, source_object, converter_id="") -> str: - """Given a converted_object and its corresponding source_object, - store it in the cache and return the cached_path_stem. - """ - - header_class = self._get_header_class(source_object) - header = header_class(source_object, converter_id) - return self.rawsave(header, converted_object, self.cache_path_for(header)).stem - - def rawload( - self, header: BaseHeader, cache_path: pathlib.Path = None - ) -> Any | None: - """Load the converted_object from the cache if it is valid. - - The invalidating strategy is defined by the header class used. - - The cache_path is optional, it will be calculated from the header - if not given. - """ - if cache_path is None: - cache_path = self.cache_path_for(header) - - if header.is_valid(cache_path): - with cache_path.open(mode="rb") as fi: - return pickle.load(fi) - - def rawsave( - self, header: BaseHeader, converted, cache_path: pathlib.Path = None - ) -> pathlib.Path: - """Save the converted object (in pickle format) and - its header (in json format) to the cache folder. - - The cache_path is optional, it will be calculated from the header - if not given. - """ - if cache_path is None: - cache_path = self.cache_path_for(header) - - if self._store_header: - with cache_path.with_suffix(".json").open("w", encoding="utf-8") as fo: - json.dump({k: str(v) for k, v in dc_asdict(header).items()}, fo) - with cache_path.open(mode="wb") as fo: - pickle.dump(converted, fo) - return cache_path - - -class DiskCacheByHash(DiskCache): - """Convenience class used for caching conversions that take a path, - naming by hashing its content. - """ - - @dataclass(frozen=True) - class Header(NameByFileContent, InvalidateByExist, BaseHeader): - pass - - _header_classes = { - pathlib.Path: Header, - str: Header.from_string, - } - - -class DiskCacheByMTime(DiskCache): - """Convenience class used for caching conversions that take a path, - naming by hashing its full path and invalidating by the file - modification time. - """ - - @dataclass(frozen=True) - class Header(NameByPath, InvalidateByPathMTime, BaseHeader): - pass - - _header_classes = { - pathlib.Path: Header, - str: Header.from_string, - } diff --git a/pint/_vendor/flexparser.py b/pint/_vendor/flexparser.py deleted file mode 100644 index 8945b6ed5..000000000 --- a/pint/_vendor/flexparser.py +++ /dev/null @@ -1,1455 +0,0 @@ -""" - flexparser.flexparser - ~~~~~~~~~~~~~~~~~~~~~ - - Classes and functions to create parsers. - - The idea is quite simple. You write a class for every type of content - (called here ``ParsedStatement``) you need to parse. Each class should - have a ``from_string`` constructor. We used extensively the ``typing`` - module to make the output structure easy to use and less error prone. - - For more information, take a look at https://github.com/hgrecco/flexparser - - :copyright: 2022 by flexparser Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -import collections -import dataclasses -import enum -import functools -import hashlib -import hmac -import inspect -import logging -import pathlib -import re -import sys -import typing as ty -from collections.abc import Iterator -from dataclasses import dataclass -from functools import cached_property -from importlib import resources -from typing import Optional, Tuple, Type - -_LOGGER = logging.getLogger("flexparser") - -_SENTINEL = object() - - -################ -# Exceptions -################ - - -@dataclass(frozen=True) -class Statement: - """Base class for parsed elements within a source file.""" - - start_line: int = dataclasses.field(init=False, default=None) - start_col: int = dataclasses.field(init=False, default=None) - - end_line: int = dataclasses.field(init=False, default=None) - end_col: int = dataclasses.field(init=False, default=None) - - raw: str = dataclasses.field(init=False, default=None) - - @classmethod - def from_statement(cls, statement: Statement): - out = cls() - out.set_position(*statement.get_position()) - out.set_raw(statement.raw) - return out - - @classmethod - def from_statement_iterator_element(cls, values: ty.Tuple[int, int, int, int, str]): - out = cls() - out.set_position(*values[:-1]) - out.set_raw(values[-1]) - return out - - @property - def format_position(self): - if self.start_line is None: - return "N/A" - return "%d,%d-%d,%d" % self.get_position() - - @property - def raw_strip(self): - return self.raw.strip() - - def get_position(self): - return self.start_line, self.start_col, self.end_line, self.end_col - - def set_position(self, start_line, start_col, end_line, end_col): - object.__setattr__(self, "start_line", start_line) - object.__setattr__(self, "start_col", start_col) - object.__setattr__(self, "end_line", end_line) - object.__setattr__(self, "end_col", end_col) - return self - - def set_raw(self, raw): - object.__setattr__(self, "raw", raw) - return self - - def set_simple_position(self, line, col, width): - return self.set_position(line, col, line, col + width) - - -@dataclass(frozen=True) -class ParsingError(Statement, Exception): - """Base class for all parsing exceptions in this package.""" - - def __str__(self): - return Statement.__str__(self) - - -@dataclass(frozen=True) -class UnknownStatement(ParsingError): - """A string statement could not bee parsed.""" - - def __str__(self): - return f"Could not parse '{self.raw}' ({self.format_position})" - - -@dataclass(frozen=True) -class UnhandledParsingError(ParsingError): - """Base class for all parsing exceptions in this package.""" - - ex: Exception - - def __str__(self): - return f"Unhandled exception while parsing '{self.raw}' ({self.format_position}): {self.ex}" - - -@dataclass(frozen=True) -class UnexpectedEOF(ParsingError): - """End of file was found within an open block.""" - - -############################# -# Useful methods and classes -############################# - - -@dataclass(frozen=True) -class Hash: - algorithm_name: str - hexdigest: str - - def __eq__(self, other: Hash): - return ( - isinstance(other, Hash) - and self.algorithm_name != "" - and self.algorithm_name == other.algorithm_name - and hmac.compare_digest(self.hexdigest, other.hexdigest) - ) - - @classmethod - def from_bytes(cls, algorithm, b: bytes): - hasher = algorithm(b) - return cls(hasher.name, hasher.hexdigest()) - - @classmethod - def from_file_pointer(cls, algorithm, fp: ty.BinaryIO): - return cls.from_bytes(algorithm, fp.read()) - - @classmethod - def nullhash(cls): - return cls("", "") - - -def _yield_types( - obj, valid_subclasses=(object,), recurse_origin=(tuple, list, ty.Union) -): - """Recursively transverse type annotation if the - origin is any of the types in `recurse_origin` - and yield those type which are subclasses of `valid_subclasses`. - - """ - if ty.get_origin(obj) in recurse_origin: - for el in ty.get_args(obj): - yield from _yield_types(el, valid_subclasses, recurse_origin) - else: - if inspect.isclass(obj) and issubclass(obj, valid_subclasses): - yield obj - - -class classproperty: # noqa N801 - """Decorator for a class property - - In Python 3.9+ can be replaced by - - @classmethod - @property - def myprop(self): - return 42 - - """ - - def __init__(self, fget): - self.fget = fget - - def __get__(self, owner_self, owner_cls): - return self.fget(owner_cls) - - -def is_relative_to(self, *other): - """Return True if the path is relative to another path or False. - - In Python 3.9+ can be replaced by - - path.is_relative_to(other) - """ - try: - self.relative_to(*other) - return True - except ValueError: - return False - - -class DelimiterInclude(enum.IntEnum): - """Specifies how to deal with delimiters while parsing.""" - - #: Split at delimiter, not including in any string - SPLIT = enum.auto() - - #: Split after, keeping the delimiter with previous string. - SPLIT_AFTER = enum.auto() - - #: Split before, keeping the delimiter with next string. - SPLIT_BEFORE = enum.auto() - - #: Do not split at delimiter. - DO_NOT_SPLIT = enum.auto() - - -class DelimiterAction(enum.IntEnum): - """Specifies how to deal with delimiters while parsing.""" - - #: Continue parsing normally. - CONTINUE = enum.auto() - - #: Capture everything til end of line as a whole. - CAPTURE_NEXT_TIL_EOL = enum.auto() - - #: Stop parsing line and move to next. - STOP_PARSING_LINE = enum.auto() - - #: Stop parsing content. - STOP_PARSING = enum.auto() - - -DO_NOT_SPLIT_EOL = { - "\r\n": (DelimiterInclude.DO_NOT_SPLIT, DelimiterAction.CONTINUE), - "\n": (DelimiterInclude.DO_NOT_SPLIT, DelimiterAction.CONTINUE), - "\r": (DelimiterInclude.DO_NOT_SPLIT, DelimiterAction.CONTINUE), -} - -SPLIT_EOL = { - "\r\n": (DelimiterInclude.SPLIT, DelimiterAction.CONTINUE), - "\n": (DelimiterInclude.SPLIT, DelimiterAction.CONTINUE), - "\r": (DelimiterInclude.SPLIT, DelimiterAction.CONTINUE), -} - -_EOLs_set = set(DO_NOT_SPLIT_EOL.keys()) - - -@functools.lru_cache -def _build_delimiter_pattern(delimiters: ty.Tuple[str, ...]) -> re.Pattern: - """Compile a tuple of delimiters into a regex expression with a capture group - around the delimiter. - """ - return re.compile("|".join(f"({re.escape(el)})" for el in delimiters)) - - -############ -# Iterators -############ - -DelimiterDictT = ty.Dict[str, ty.Tuple[DelimiterInclude, DelimiterAction]] - - -class Spliter: - """Content iterator splitting according to given delimiters. - - The pattern can be changed dynamically sending a new pattern to the generator, - see DelimiterInclude and DelimiterAction for more information. - - The current scanning position can be changed at any time. - - Parameters - ---------- - content : str - delimiters : ty.Dict[str, ty.Tuple[DelimiterInclude, DelimiterAction]] - - Yields - ------ - start_line : int - line number of the start of the content (zero-based numbering). - start_col : int - column number of the start of the content (zero-based numbering). - end_line : int - line number of the end of the content (zero-based numbering). - end_col : int - column number of the end of the content (zero-based numbering). - part : str - part of the text between delimiters. - """ - - _pattern: ty.Optional[re.Pattern] - _delimiters: DelimiterDictT - - __stop_searching_in_line = False - - __pending = "" - __first_line_col = None - - __lines = () - __lineno = 0 - __colno = 0 - - def __init__(self, content: str, delimiters: DelimiterDictT): - self.set_delimiters(delimiters) - self.__lines = content.splitlines(keepends=True) - - def set_position(self, lineno: int, colno: int): - self.__lineno, self.__colno = lineno, colno - - def set_delimiters(self, delimiters: DelimiterDictT): - for k, v in delimiters.items(): - if v == (DelimiterInclude.DO_NOT_SPLIT, DelimiterAction.STOP_PARSING): - raise ValueError( - f"The delimiter action for {k} is not a valid combination ({v})" - ) - # Build a pattern but removing eols - _pat_dlm = tuple(set(delimiters.keys()) - _EOLs_set) - if _pat_dlm: - self._pattern = _build_delimiter_pattern(_pat_dlm) - else: - self._pattern = None - # We add the end of line as delimiters if not present. - self._delimiters = {**DO_NOT_SPLIT_EOL, **delimiters} - - def __iter__(self): - return self - - def __next__(self): - if self.__lineno >= len(self.__lines): - raise StopIteration - - while True: - if self.__stop_searching_in_line: - # There must be part of a line pending to parse - # due to stop - line = self.__lines[self.__lineno] - mo = None - self.__stop_searching_in_line = False - else: - # We get the current line and the find the first delimiter. - line = self.__lines[self.__lineno] - if self._pattern is None: - mo = None - else: - mo = self._pattern.search(line, self.__colno) - - if mo is None: - # No delimiter was found, - # which should happen at end of the content or end of line - for k in DO_NOT_SPLIT_EOL.keys(): - if line.endswith(k): - dlm = line[-len(k) :] - end_col, next_col = len(line) - len(k), 0 - break - else: - # No EOL found, this is end of content - dlm = None - end_col, next_col = len(line), 0 - - next_line = self.__lineno + 1 - - else: - next_line = self.__lineno - end_col, next_col = mo.span() - dlm = mo.group() - - part = line[self.__colno : end_col] - - include, action = self._delimiters.get( - dlm, (DelimiterInclude.SPLIT, DelimiterAction.STOP_PARSING) - ) - - if include == DelimiterInclude.SPLIT: - next_pending = "" - elif include == DelimiterInclude.SPLIT_AFTER: - end_col += len(dlm) - part = part + dlm - next_pending = "" - elif include == DelimiterInclude.SPLIT_BEFORE: - next_pending = dlm - elif include == DelimiterInclude.DO_NOT_SPLIT: - self.__pending += line[self.__colno : end_col] + dlm - next_pending = "" - else: - raise ValueError(f"Unknown action {include}.") - - if action == DelimiterAction.STOP_PARSING: - # this will raise a StopIteration in the next call. - next_line = len(self.__lines) - elif action == DelimiterAction.STOP_PARSING_LINE: - next_line = self.__lineno + 1 - next_col = 0 - - start_line = self.__lineno - start_col = self.__colno - end_line = self.__lineno - - self.__lineno = next_line - self.__colno = next_col - - if action == DelimiterAction.CAPTURE_NEXT_TIL_EOL: - self.__stop_searching_in_line = True - - if include == DelimiterInclude.DO_NOT_SPLIT: - self.__first_line_col = start_line, start_col - else: - if self.__first_line_col is None: - out = ( - start_line, - start_col - len(self.__pending), - end_line, - end_col, - self.__pending + part, - ) - else: - out = ( - *self.__first_line_col, - end_line, - end_col, - self.__pending + part, - ) - self.__first_line_col = None - self.__pending = next_pending - return out - - -class StatementIterator: - """Content peekable iterator splitting according to given delimiters. - - The pattern can be changed dynamically sending a new pattern to the generator, - see DelimiterInclude and DelimiterAction for more information. - - Parameters - ---------- - content : str - delimiters : dict[str, ty.Tuple[DelimiterInclude, DelimiterAction]] - - Yields - ------ - Statement - """ - - _cache: ty.Deque[Statement] - - def __init__( - self, content: str, delimiters: DelimiterDictT, strip_spaces: bool = True - ): - self._cache = collections.deque() - self._spliter = Spliter(content, delimiters) - self._strip_spaces = strip_spaces - - def __iter__(self): - return self - - def set_delimiters(self, delimiters: DelimiterDictT): - self._spliter.set_delimiters(delimiters) - if self._cache: - value = self.peek() - # Elements are 1 based indexing, while splitter is 0 based. - self._spliter.set_position(value.start_line - 1, value.start_col) - self._cache.clear() - - def _get_next_strip(self) -> Statement: - part = "" - while not part: - start_line, start_col, end_line, end_col, part = next(self._spliter) - lo = len(part) - part = part.lstrip() - start_col += lo - len(part) - - lo = len(part) - part = part.rstrip() - end_col -= lo - len(part) - - return Statement.from_statement_iterator_element( - (start_line + 1, start_col, end_line + 1, end_col, part) - ) - - def _get_next(self) -> Statement: - if self._strip_spaces: - return self._get_next_strip() - - part = "" - while not part: - start_line, start_col, end_line, end_col, part = next(self._spliter) - - return Statement.from_statement_iterator_element( - (start_line + 1, start_col, end_line + 1, end_col, part) - ) - - def peek(self, default=_SENTINEL) -> Statement: - """Return the item that will be next returned from ``next()``. - - Return ``default`` if there are no items left. If ``default`` is not - provided, raise ``StopIteration``. - - """ - if not self._cache: - try: - self._cache.append(self._get_next()) - except StopIteration: - if default is _SENTINEL: - raise - return default - return self._cache[0] - - def __next__(self) -> Statement: - if self._cache: - return self._cache.popleft() - else: - return self._get_next() - - -########### -# Parsing -########### - -# Configuration type -CT = ty.TypeVar("CT") -PST = ty.TypeVar("PST", bound="ParsedStatement") -LineColStr = Tuple[int, int, str] -FromString = ty.Union[None, PST, ParsingError] -Consume = ty.Union[PST, ParsingError] -NullableConsume = ty.Union[None, PST, ParsingError] - -Single = ty.Union[PST, ParsingError] -Multi = ty.Tuple[ty.Union[PST, ParsingError], ...] - - -@dataclass(frozen=True) -class ParsedStatement(ty.Generic[CT], Statement): - """A single parsed statement. - - In order to write your own, you need to subclass it as a - frozen dataclass and implement the parsing logic by overriding - `from_string` classmethod. - - Takes two arguments: the string to parse and an object given - by the parser which can be used to store configuration information. - - It should return an instance of this class if parsing - was successful or None otherwise - """ - - @classmethod - def from_string(cls: Type[PST], s: str) -> FromString[PST]: - """Parse a string into a ParsedStatement. - - Return files and their meaning: - 1. None: the string cannot be parsed with this class. - 2. A subclass of ParsedStatement: the string was parsed successfully - 3. A subclass of ParsingError the string could be parsed with this class but there is - an error. - """ - raise NotImplementedError( - "ParsedStatement subclasses must implement " - "'from_string' or 'from_string_and_config'" - ) - - @classmethod - def from_string_and_config(cls: Type[PST], s: str, config: CT) -> FromString[PST]: - """Parse a string into a ParsedStatement. - - Return files and their meaning: - 1. None: the string cannot be parsed with this class. - 2. A subclass of ParsedStatement: the string was parsed successfully - 3. A subclass of ParsingError the string could be parsed with this class but there is - an error. - """ - return cls.from_string(s) - - @classmethod - def from_statement_and_config( - cls: Type[PST], statement: Statement, config: CT - ) -> FromString[PST]: - try: - out = cls.from_string_and_config(statement.raw, config) - except Exception as ex: - out = UnhandledParsingError(ex) - - if out is None: - return None - - out.set_position(*statement.get_position()) - out.set_raw(statement.raw) - return out - - @classmethod - def consume( - cls: Type[PST], statement_iterator: StatementIterator, config: CT - ) -> NullableConsume[PST]: - """Peek into the iterator and try to parse. - - Return files and their meaning: - 1. None: the string cannot be parsed with this class, the iterator is kept an the current place. - 2. a subclass of ParsedStatement: the string was parsed successfully, advance the iterator. - 3. a subclass of ParsingError: the string could be parsed with this class but there is - an error, advance the iterator. - """ - statement = statement_iterator.peek() - parsed_statement = cls.from_statement_and_config(statement, config) - if parsed_statement is None: - return None - next(statement_iterator) - return parsed_statement - - -OPST = ty.TypeVar("OPST", bound="ParsedStatement") -IPST = ty.TypeVar("IPST", bound="ParsedStatement") -CPST = ty.TypeVar("CPST", bound="ParsedStatement") -BT = ty.TypeVar("BT", bound="Block") -RBT = ty.TypeVar("RBT", bound="RootBlock") - - -@dataclass(frozen=True) -class Block(ty.Generic[OPST, IPST, CPST, CT]): - """A sequence of statements with an opening, body and closing.""" - - opening: Consume[OPST] - body: Tuple[Consume[IPST], ...] - closing: Consume[CPST] - - delimiters = {} - - @property - def start_line(self): - return self.opening.start_line - - @property - def start_col(self): - return self.opening.start_col - - @property - def end_line(self): - return self.closing.end_line - - @property - def end_col(self): - return self.closing.end_col - - def get_position(self): - return self.start_line, self.start_col, self.end_line, self.end_col - - @property - def format_position(self): - if self.start_line is None: - return "N/A" - return "%d,%d-%d,%d" % self.get_position() - - @classmethod - def subclass_with(cls, *, opening=None, body=None, closing=None): - @dataclass(frozen=True) - class CustomBlock(Block): - pass - - if opening: - CustomBlock.__annotations__["opening"] = Single[ty.Union[opening]] - if body: - CustomBlock.__annotations__["body"] = Multi[ty.Union[body]] - if closing: - CustomBlock.__annotations__["closing"] = Single[ty.Union[closing]] - - return CustomBlock - - def __iter__(self) -> Iterator[Statement]: - yield self.opening - for el in self.body: - if isinstance(el, Block): - yield from el - else: - yield el - yield self.closing - - def iter_blocks(self) -> Iterator[ty.Union[Block, Statement]]: - yield self.opening - yield from self.body - yield self.closing - - ################################################### - # Convenience methods to iterate parsed statements - ################################################### - - _ElementT = ty.TypeVar("_ElementT", bound=Statement) - - def filter_by(self, *klass: Type[_ElementT]) -> Iterator[_ElementT]: - """Yield elements of a given class or classes.""" - yield from (el for el in self if isinstance(el, klass)) # noqa Bug in pycharm. - - @cached_property - def errors(self) -> ty.Tuple[ParsingError, ...]: - """Tuple of errors found.""" - return tuple(self.filter_by(ParsingError)) - - @property - def has_errors(self) -> bool: - """True if errors were found during parsing.""" - return bool(self.errors) - - #################### - # Statement classes - #################### - - @classproperty - def opening_classes(cls) -> Iterator[Type[OPST]]: - """Classes representing any of the parsed statement that can open this block.""" - opening = ty.get_type_hints(cls)["opening"] - yield from _yield_types(opening, ParsedStatement) - - @classproperty - def body_classes(cls) -> Iterator[Type[IPST]]: - """Classes representing any of the parsed statement that can be in the body.""" - body = ty.get_type_hints(cls)["body"] - yield from _yield_types(body, (ParsedStatement, Block)) - - @classproperty - def closing_classes(cls) -> Iterator[Type[CPST]]: - """Classes representing any of the parsed statement that can close this block.""" - closing = ty.get_type_hints(cls)["closing"] - yield from _yield_types(closing, ParsedStatement) - - ########## - # Consume - ########## - - @classmethod - def consume_opening( - cls: Type[BT], statement_iterator: StatementIterator, config: CT - ) -> NullableConsume[OPST]: - """Peek into the iterator and try to parse with any of the opening classes. - - See `ParsedStatement.consume` for more details. - """ - for c in cls.opening_classes: - el = c.consume(statement_iterator, config) - if el is not None: - return el - return None - - @classmethod - def consume_body( - cls, statement_iterator: StatementIterator, config: CT - ) -> Consume[IPST]: - """Peek into the iterator and try to parse with any of the body classes. - - If the statement cannot be parsed, a UnknownStatement is returned. - """ - for c in cls.body_classes: - el = c.consume(statement_iterator, config) - if el is not None: - return el - el = next(statement_iterator) - return UnknownStatement.from_statement(el) - - @classmethod - def consume_closing( - cls: Type[BT], statement_iterator: StatementIterator, config: CT - ) -> NullableConsume[CPST]: - """Peek into the iterator and try to parse with any of the opening classes. - - See `ParsedStatement.consume` for more details. - """ - for c in cls.closing_classes: - el = c.consume(statement_iterator, config) - if el is not None: - return el - return None - - @classmethod - def consume_body_closing( - cls: Type[BT], opening: OPST, statement_iterator: StatementIterator, config: CT - ) -> BT: - body = [] - closing = None - last_line = opening.end_line - while closing is None: - try: - closing = cls.consume_closing(statement_iterator, config) - if closing is not None: - continue - el = cls.consume_body(statement_iterator, config) - body.append(el) - last_line = el.end_line - except StopIteration: - closing = cls.on_stop_iteration(config) - closing.set_position(last_line + 1, 0, last_line + 1, 0) - - return cls(opening, tuple(body), closing) - - @classmethod - def consume( - cls: Type[BT], statement_iterator: StatementIterator, config: CT - ) -> Optional[BT]: - """Try consume the block. - - Possible outcomes: - 1. The opening was not matched, return None. - 2. A subclass of Block, where body and closing migh contain errors. - """ - opening = cls.consume_opening(statement_iterator, config) - if opening is None: - return None - - return cls.consume_body_closing(opening, statement_iterator, config) - - @classmethod - def on_stop_iteration(cls, config): - return UnexpectedEOF() - - -@dataclass(frozen=True) -class BOS(ParsedStatement[CT]): - """Beginning of source.""" - - # Hasher algorithm name and hexdigest - content_hash: Hash - - @classmethod - def from_string_and_config(cls: Type[PST], s: str, config: CT) -> FromString[PST]: - raise RuntimeError("BOS cannot be constructed from_string_and_config") - - @property - def location(self) -> SourceLocationT: - return "" - - -@dataclass(frozen=True) -class BOF(BOS): - """Beginning of file.""" - - path: pathlib.Path - - # Modification time of the file. - mtime: float - - @property - def location(self) -> SourceLocationT: - return self.path - - -@dataclass(frozen=True) -class BOR(BOS): - """Beginning of resource.""" - - package: str - resource_name: str - - @property - def location(self) -> SourceLocationT: - return self.package, self.resource_name - - -@dataclass(frozen=True) -class EOS(ParsedStatement[CT]): - """End of sequence.""" - - @classmethod - def from_string_and_config(cls: Type[PST], s: str, config: CT) -> FromString[PST]: - return cls() - - -class RootBlock(ty.Generic[IPST, CT], Block[BOS, IPST, EOS, CT]): - """A sequence of statement flanked by the beginning and ending of stream.""" - - opening: Single[BOS] - closing: Single[EOS] - - @classmethod - def subclass_with(cls, *, body=None): - @dataclass(frozen=True) - class CustomRootBlock(RootBlock): - pass - - if body: - CustomRootBlock.__annotations__["body"] = Multi[ty.Union[body]] - - return CustomRootBlock - - @classmethod - def consume_opening( - cls: Type[RBT], statement_iterator: StatementIterator, config: CT - ) -> NullableConsume[BOS]: - raise RuntimeError( - "Implementation error, 'RootBlock.consume_opening' should never be called" - ) - - @classmethod - def consume( - cls: Type[RBT], statement_iterator: StatementIterator, config: CT - ) -> RBT: - block = super().consume(statement_iterator, config) - if block is None: - raise RuntimeError( - "Implementation error, 'RootBlock.consume' should never return None" - ) - return block - - @classmethod - def consume_closing( - cls: Type[RBT], statement_iterator: StatementIterator, config: CT - ) -> NullableConsume[EOS]: - return None - - @classmethod - def on_stop_iteration(cls, config): - return EOS() - - -################# -# Source parsing -################# - -ResourceT = ty.Tuple[str, str] # package name, resource name -StrictLocationT = ty.Union[pathlib.Path, ResourceT] -SourceLocationT = ty.Union[str, StrictLocationT] - - -@dataclass(frozen=True) -class ParsedSource(ty.Generic[RBT, CT]): - - parsed_source: RBT - - # Parser configuration. - config: CT - - @property - def location(self) -> StrictLocationT: - return self.parsed_source.opening.location - - @cached_property - def has_errors(self) -> bool: - return self.parsed_source.has_errors - - def errors(self): - yield from self.parsed_source.errors - - -@dataclass(frozen=True) -class CannotParseResourceAsFile(Exception): - """The requested python package resource cannot be located as a file - in the file system. - """ - - package: str - resource_name: str - - -class Parser(ty.Generic[RBT, CT]): - """Parser class.""" - - #: class to iterate through statements in a source unit. - _statement_iterator_class: Type[StatementIterator] = StatementIterator - - #: Delimiters. - _delimiters: DelimiterDictT = SPLIT_EOL - - _strip_spaces: bool = True - - #: root block class containing statements and blocks can be parsed. - _root_block_class: Type[RBT] - - #: source file text encoding. - _encoding = "utf-8" - - #: configuration passed to from_string functions. - _config: CT - - #: try to open resources as files. - _prefer_resource_as_file: bool - - #: parser algorithm to us. Must be a callable member of hashlib - _hasher = hashlib.blake2b - - def __init__(self, config: CT, prefer_resource_as_file=True): - self._config = config - self._prefer_resource_as_file = prefer_resource_as_file - - def parse(self, source_location: SourceLocationT) -> ParsedSource[RBT, CT]: - """Parse a file into a ParsedSourceFile or ParsedResource. - - Parameters - ---------- - source_location: - if str or pathlib.Path is interpreted as a file. - if (str, str) is interpreted as (package, resource) using the resource python api. - """ - if isinstance(source_location, tuple) and len(source_location) == 2: - if self._prefer_resource_as_file: - try: - return self.parse_resource_from_file(*source_location) - except CannotParseResourceAsFile: - pass - return self.parse_resource(*source_location) - - if isinstance(source_location, str): - return self.parse_file(pathlib.Path(source_location)) - - if isinstance(source_location, pathlib.Path): - return self.parse_file(source_location) - - raise TypeError( - f"Unknown type {type(source_location)}, " - "use str or pathlib.Path for files or " - "(package: str, resource_name: str) tuple " - "for a resource." - ) - - def parse_bytes(self, b: bytes, bos: BOS = None) -> ParsedSource[RBT, CT]: - if bos is None: - bos = BOS(Hash.from_bytes(self._hasher, b)).set_simple_position(0, 0, 0) - - sic = self._statement_iterator_class( - b.decode(self._encoding), self._delimiters, self._strip_spaces - ) - - parsed = self._root_block_class.consume_body_closing(bos, sic, self._config) - - return ParsedSource( - parsed, - self._config, - ) - - def parse_file(self, path: pathlib.Path) -> ParsedSource[RBT, CT]: - """Parse a file into a ParsedSourceFile. - - Parameters - ---------- - path - path of the file. - """ - with path.open(mode="rb") as fi: - content = fi.read() - - bos = BOF( - Hash.from_bytes(self._hasher, content), path, path.stat().st_mtime - ).set_simple_position(0, 0, 0) - return self.parse_bytes(content, bos) - - def parse_resource_from_file( - self, package: str, resource_name: str - ) -> ParsedSource[RBT, CT]: - """Parse a resource into a ParsedSourceFile, opening as a file. - - Parameters - ---------- - package - package name where the resource is located. - resource_name - name of the resource - """ - if sys.version_info < (3, 9): - # Remove when Python 3.8 is dropped - with resources.path(package, resource_name) as p: - path = p.resolve() - else: - with resources.as_file( - resources.files(package).joinpath(resource_name) - ) as p: - path = p.resolve() - - if path.exists(): - return self.parse_file(path) - - raise CannotParseResourceAsFile(package, resource_name) - - def parse_resource(self, package: str, resource_name: str) -> ParsedSource[RBT, CT]: - """Parse a resource into a ParsedResource. - - Parameters - ---------- - package - package name where the resource is located. - resource_name - name of the resource - """ - if sys.version_info < (3, 9): - # Remove when Python 3.8 is dropped - with resources.open_binary(package, resource_name) as fi: - content = fi.read() - else: - with resources.files(package).joinpath(resource_name).open("rb") as fi: - content = fi.read() - - bos = BOR( - Hash.from_bytes(self._hasher, content), package, resource_name - ).set_simple_position(0, 0, 0) - - return self.parse_bytes(content, bos) - - -########## -# Project -########## - - -class IncludeStatement(ParsedStatement): - """ "Include statements allow to merge files.""" - - @property - def target(self) -> str: - raise NotImplementedError( - "IncludeStatement subclasses must implement target property." - ) - - -class ParsedProject( - ty.Dict[ - ty.Optional[ty.Tuple[StrictLocationT, str]], - ParsedSource, - ] -): - """Collection of files, independent or connected via IncludeStatement. - - Keys are either an absolute pathname or a tuple package name, resource name. - - None is the name of the root. - - """ - - @cached_property - def has_errors(self) -> bool: - return any(el.has_errors for el in self.values()) - - def errors(self): - for el in self.values(): - yield from el.errors() - - def _iter_statements(self, items, seen, include_only_once): - """Iter all definitions in the order they appear, - going into the included files. - """ - for source_location, parsed in items: - seen.add(source_location) - for parsed_statement in parsed.parsed_source: - if isinstance(parsed_statement, IncludeStatement): - location = parsed.location, parsed_statement.target - if location in seen and include_only_once: - raise ValueError(f"{location} was already included.") - yield from self._iter_statements( - ((location, self[location]),), seen, include_only_once - ) - else: - yield parsed_statement - - def iter_statements(self, include_only_once=True): - """Iter all definitions in the order they appear, - going into the included files. - - Parameters - ---------- - include_only_once - if true, each file cannot be included more than once. - """ - yield from self._iter_statements([(None, self[None])], set(), include_only_once) - - def _iter_blocks(self, items, seen, include_only_once): - """Iter all definitions in the order they appear, - going into the included files. - """ - for source_location, parsed in items: - seen.add(source_location) - for parsed_statement in parsed.parsed_source.iter_blocks(): - if isinstance(parsed_statement, IncludeStatement): - location = parsed.location, parsed_statement.target - if location in seen and include_only_once: - raise ValueError(f"{location} was already included.") - yield from self._iter_blocks( - ((location, self[location]),), seen, include_only_once - ) - else: - yield parsed_statement - - def iter_blocks(self, include_only_once=True): - """Iter all definitions in the order they appear, - going into the included files. - - Parameters - ---------- - include_only_once - if true, each file cannot be included more than once. - """ - yield from self._iter_blocks([(None, self[None])], set(), include_only_once) - - -def default_locator(source_location: StrictLocationT, target: str) -> StrictLocationT: - """Return a new location from current_location and target.""" - - if isinstance(source_location, pathlib.Path): - current_location = pathlib.Path(source_location).resolve() - - if current_location.is_file(): - current_path = current_location.parent - else: - current_path = current_location - - target_path = pathlib.Path(target) - if target_path.is_absolute(): - raise ValueError( - f"Cannot refer to absolute paths in import statements ({source_location}, {target})." - ) - - tmp = (current_path / target_path).resolve() - if not is_relative_to(tmp, current_path): - raise ValueError( - f"Cannot refer to locations above the current location ({source_location}, {target})" - ) - - return tmp.absolute() - - elif isinstance(source_location, tuple) and len(source_location) == 2: - return source_location[0], target - - raise TypeError( - f"Cannot handle type {type(source_location)}, " - "use str or pathlib.Path for files or " - "(package: str, resource_name: str) tuple " - "for a resource." - ) - - -DefinitionT = ty.Union[ty.Type[Block], ty.Type[ParsedStatement]] - -SpecT = ty.Union[ - ty.Type[Parser], - DefinitionT, - ty.Iterable[DefinitionT], - ty.Type[RootBlock], -] - - -def build_parser_class(spec: SpecT, *, strip_spaces: bool = True, delimiters=None): - """Build a custom parser class. - - Parameters - ---------- - spec - specification of the content to parse. Can be one of the following things: - - Parser class. - - Block or ParsedStatement derived class. - - Iterable of Block or ParsedStatement derived class. - - RootBlock derived class. - strip_spaces : bool - if True, spaces will be stripped for each statement before calling - ``from_string_and_config``. - delimiters : dict - Specify how the source file is split into statements (See below). - - Delimiters dictionary - --------------------- - The delimiters are specified with the keys of the delimiters dict. - The dict files can be used to further customize the iterator. Each - consist of a tuple of two elements: - 1. A value of the DelimiterMode to indicate what to do with the - delimiter string: skip it, attach keep it with previous or next string - 2. A boolean indicating if parsing should stop after fiSBT - encountering this delimiter. - """ - - if delimiters is None: - delimiters = SPLIT_EOL - - if isinstance(spec, type) and issubclass(spec, Parser): - CustomParser = spec - else: - if isinstance(spec, (tuple, list)): - - for el in spec: - if not issubclass(el, (Block, ParsedStatement)): - raise TypeError( - "Elements in root_block_class must be of type Block or ParsedStatement, " - f"not {el}" - ) - - @dataclass(frozen=True) - class CustomRootBlock(RootBlock): - pass - - CustomRootBlock.__annotations__["body"] = Multi[ty.Union[spec]] - - elif isinstance(spec, type) and issubclass(spec, RootBlock): - - CustomRootBlock = spec - - elif isinstance(spec, type) and issubclass(spec, (Block, ParsedStatement)): - - @dataclass(frozen=True) - class CustomRootBlock(RootBlock): - pass - - CustomRootBlock.__annotations__["body"] = Multi[spec] - - else: - raise TypeError( - "`spec` must be of type RootBlock or tuple of type Block or ParsedStatement, " - f"not {type(spec)}" - ) - - class CustomParser(Parser): - - _delimiters = delimiters - _root_block_class = CustomRootBlock - _strip_spaces = strip_spaces - - return CustomParser - - -def parse( - entry_point: SourceLocationT, - spec: SpecT, - config=None, - *, - strip_spaces: bool = True, - delimiters=None, - locator: ty.Callable[[StrictLocationT, str], StrictLocationT] = default_locator, - prefer_resource_as_file: bool = True, - **extra_parser_kwargs, -) -> ParsedProject: - """Parse sources into a ParsedProject dictionary. - - Parameters - ---------- - entry_point - file or resource, given as (package_name, resource_name). - spec - specification of the content to parse. Can be one of the following things: - - Parser class. - - Block or ParsedStatement derived class. - - Iterable of Block or ParsedStatement derived class. - - RootBlock derived class. - config - a configuration object that will be passed to `from_string_and_config` - classmethod. - strip_spaces : bool - if True, spaces will be stripped for each statement before calling - ``from_string_and_config``. - delimiters : dict - Specify how the source file is split into statements (See below). - locator : Callable - function that takes the current location and a target of an IncludeStatement - and returns a new location. - prefer_resource_as_file : bool - if True, resources will try to be located in the filesystem if - available. - extra_parser_kwargs - extra keyword arguments to be given to the parser. - - Delimiters dictionary - --------------------- - The delimiters are specified with the keys of the delimiters dict. - The dict files can be used to further customize the iterator. Each - consist of a tuple of two elements: - 1. A value of the DelimiterMode to indicate what to do with the - delimiter string: skip it, attach keep it with previous or next string - 2. A boolean indicating if parsing should stop after fiSBT - encountering this delimiter. - """ - - CustomParser = build_parser_class( - spec, strip_spaces=strip_spaces, delimiters=delimiters - ) - parser = CustomParser( - config, prefer_resource_as_file=prefer_resource_as_file, **extra_parser_kwargs - ) - - pp = ParsedProject() - - # : ty.List[Optional[ty.Union[LocatorT, str]], ...] - pending: ty.List[ty.Tuple[StrictLocationT, str]] = [] - if isinstance(entry_point, (str, pathlib.Path)): - entry_point = pathlib.Path(entry_point) - if not entry_point.is_absolute(): - entry_point = pathlib.Path.cwd() / entry_point - - elif not (isinstance(entry_point, tuple) and len(entry_point) == 2): - raise TypeError( - f"Cannot handle type {type(entry_point)}, " - "use str or pathlib.Path for files or " - "(package: str, resource_name: str) tuple " - "for a resource." - ) - - pp[None] = parsed = parser.parse(entry_point) - pending.extend( - (parsed.location, el.target) - for el in parsed.parsed_source.filter_by(IncludeStatement) - ) - - while pending: - source_location, target = pending.pop(0) - pp[(source_location, target)] = parsed = parser.parse( - locator(source_location, target) - ) - pending.extend( - (parsed.location, el.target) - for el in parsed.parsed_source.filter_by(IncludeStatement) - ) - - return pp - - -def parse_bytes( - content: bytes, - spec: SpecT, - config=None, - *, - strip_spaces: bool = True, - delimiters=None, - **extra_parser_kwargs, -) -> ParsedProject: - """Parse sources into a ParsedProject dictionary. - - Parameters - ---------- - content - bytes. - spec - specification of the content to parse. Can be one of the following things: - - Parser class. - - Block or ParsedStatement derived class. - - Iterable of Block or ParsedStatement derived class. - - RootBlock derived class. - config - a configuration object that will be passed to `from_string_and_config` - classmethod. - strip_spaces : bool - if True, spaces will be stripped for each statement before calling - ``from_string_and_config``. - delimiters : dict - Specify how the source file is split into statements (See below). - """ - - CustomParser = build_parser_class( - spec, strip_spaces=strip_spaces, delimiters=delimiters - ) - parser = CustomParser(config, prefer_resource_as_file=False, **extra_parser_kwargs) - - pp = ParsedProject() - - pp[None] = parsed = parser.parse_bytes(content) - - if any(parsed.parsed_source.filter_by(IncludeStatement)): - raise ValueError("parse_bytes does not support using an IncludeStatement") - - return pp diff --git a/pint/babel_names.py b/pint/babel_names.py index 09fa04601..408ef8f8c 100644 --- a/pint/babel_names.py +++ b/pint/babel_names.py @@ -10,7 +10,7 @@ from .compat import HAS_BABEL -_babel_units = dict( +_babel_units: dict[str, str] = dict( standard_gravity="acceleration-g-force", millibar="pressure-millibar", metric_ton="mass-metric-ton", @@ -141,6 +141,6 @@ if not HAS_BABEL: _babel_units = {} -_babel_systems = dict(mks="metric", imperial="uksystem", US="ussystem") +_babel_systems: dict[str, str] = dict(mks="metric", imperial="uksystem", US="ussystem") -_babel_lengths = ["narrow", "short", "long"] +_babel_lengths: list[str] = ["narrow", "short", "long"] diff --git a/pint/compat.py b/pint/compat.py index 4e0fba86b..31f902e4b 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -1,26 +1,47 @@ """ - pint.compat - ~~~~~~~~~~~ +pint.compat +~~~~~~~~~~~ - Compatibility layer. +Compatibility layer. - :copyright: 2013 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. +:copyright: 2013 by Pint Authors, see AUTHORS for more details. +:license: BSD, see LICENSE for more details. """ from __future__ import annotations import math -import tokenize +import sys +from collections.abc import Callable, Iterable, Mapping from decimal import Decimal -from io import BytesIO +from importlib import import_module from numbers import Number +from typing import ( + Any, + # Remove once all dependent packages change their imports. + Never, # noqa + NoReturn, + Self, # noqa + TypeAlias, # noqa + Unpack, # noqa +) + +if sys.version_info >= (3, 13): + from warnings import deprecated # noqa +else: + from typing_extensions import deprecated # noqa + +def missing_dependency( + package: str, display_name: str | None = None +) -> Callable[..., NoReturn]: + """Return a helper function that raises an exception when used. -def missing_dependency(package, display_name=None): + It provides a way delay a missing dependency exception until it is used. + """ display_name = display_name or package - def _inner(*args, **kwargs): + def _inner(*args: Any, **kwargs: Any) -> NoReturn: raise Exception( "This feature requires %s. Please install it by running:\n" "pip install %s" % (display_name, package) @@ -29,180 +50,48 @@ def _inner(*args, **kwargs): return _inner -def tokenizer(input_string): - for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline): - if tokinfo.type != tokenize.ENCODING: - yield tokinfo - - -# TODO: remove this warning after v0.10 -class BehaviorChangeWarning(UserWarning): - pass - - -try: - import numpy as np - from numpy import datetime64 as np_datetime64 - from numpy import ndarray - - HAS_NUMPY = True - NUMPY_VER = np.__version__ - NUMERIC_TYPES = (Number, Decimal, ndarray, np.number) - - def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): - if isinstance(value, (dict, bool)) or value is None: - raise TypeError("Invalid magnitude for Quantity: {0!r}".format(value)) - elif isinstance(value, str) and value == "": - raise ValueError("Quantity magnitude cannot be an empty string.") - elif isinstance(value, (list, tuple)): - return np.asarray(value) - if force_ndarray or ( - force_ndarray_like and not is_duck_array_type(type(value)) - ): - return np.asarray(value) - return value - - def _test_array_function_protocol(): - # Test if the __array_function__ protocol is enabled - try: - - class FakeArray: - def __array_function__(self, *args, **kwargs): - return - - np.concatenate([FakeArray()]) - return True - except ValueError: - return False - - HAS_NUMPY_ARRAY_FUNCTION = _test_array_function_protocol() - - NP_NO_VALUE = np._NoValue - -except ImportError: - - np = None - - class ndarray: - pass - - class np_datetime64: - pass - - HAS_NUMPY = False - NUMPY_VER = "0" - NUMERIC_TYPES = (Number, Decimal) - HAS_NUMPY_ARRAY_FUNCTION = False - NP_NO_VALUE = None - - def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): - if force_ndarray or force_ndarray_like: - raise ValueError( - "Cannot force to ndarray or ndarray-like when NumPy is not present." - ) - elif isinstance(value, (dict, bool)) or value is None: - raise TypeError("Invalid magnitude for Quantity: {0!r}".format(value)) - elif isinstance(value, str) and value == "": - raise ValueError("Quantity magnitude cannot be an empty string.") - elif isinstance(value, (list, tuple)): - raise TypeError( - "lists and tuples are valid magnitudes for " - "Quantity only when NumPy is present." - ) - return value - - -try: - from uncertainties import ufloat +def fully_qualified_name(t: type) -> str: + """Return the fully qualified name of a type.""" + module = t.__module__ + name = t.__qualname__ - HAS_UNCERTAINTIES = True -except ImportError: - ufloat = None - HAS_UNCERTAINTIES = False + if module is None or module == "builtins": + return name -try: - from babel import Locale as Loc - from babel import units as babel_units - - babel_parse = Loc.parse - - HAS_BABEL = hasattr(babel_units, "format_unit") -except ImportError: - HAS_BABEL = False - -# Defines Logarithm and Exponential for Logarithmic Converter -if HAS_NUMPY: - from numpy import exp # noqa: F401 - from numpy import log # noqa: F401 -else: - from math import exp # noqa: F401 - from math import log # noqa: F401 + return f"{module}.{name}" -if not HAS_BABEL: - babel_parse = babel_units = missing_dependency("Babel") # noqa: F811 - -# Define location of pint.Quantity in NEP-13 type cast hierarchy by defining upcast -# types using guarded imports -upcast_types = [] -# pint-pandas (PintArray) -try: - from pint_pandas import PintArray +def check_upcast_type(obj: type) -> bool: + """Check if the type object is an upcast type.""" - upcast_types.append(PintArray) -except ImportError: - pass - -# Pandas (Series) -try: - from pandas import DataFrame, Series - - upcast_types += [DataFrame, Series] -except ImportError: - pass - -# xarray (DataArray, Dataset, Variable) -try: - from xarray import DataArray, Dataset, Variable - - upcast_types += [DataArray, Dataset, Variable] -except ImportError: - pass - -try: - from dask import array as dask_array - from dask.base import compute, persist, visualize - -except ImportError: - compute, persist, visualize = None, None, None - dask_array = None + # TODO: merge or unify name with is_upcast_type + fqn = fully_qualified_name(obj) + if fqn not in upcast_type_map: + return False + else: + module_name, class_name = fqn.rsplit(".", 1) + cls = getattr(import_module(module_name), class_name) -def is_upcast_type(other) -> bool: - """Check if the type object is a upcast type using preset list. + upcast_type_map[fqn] = cls + # This is to check we are importing the same thing. + # and avoid weird problems. Maybe instead of return + # we should raise an error if false. + return obj in upcast_type_map.values() - Parameters - ---------- - other : object - Returns - ------- - bool - """ - return other in upcast_types +def is_upcast_type(other: type) -> bool: + """Check if the type object is an upcast type.""" + # TODO: merge or unify name with check_upcast_type -def is_duck_array_type(cls) -> bool: - """Check if the type object represents a (non-Quantity) duck array type. + if other in upcast_type_map.values(): + return True + return check_upcast_type(other) - Parameters - ---------- - cls : class - Returns - ------- - bool - """ +def is_duck_array_type(cls: type) -> bool: + """Check if the type object represents a (non-Quantity) duck array type.""" # TODO (NEP 30): replace duck array check with hasattr(other, "__duckarray__") return issubclass(cls, ndarray) or ( not hasattr(cls, "_magnitude") @@ -214,20 +103,21 @@ def is_duck_array_type(cls) -> bool: ) -def is_duck_array(obj): +def is_duck_array(obj: type) -> bool: + """Check if an object represents a (non-Quantity) duck array type.""" return is_duck_array_type(type(obj)) -def eq(lhs, rhs, check_all: bool): +def eq(lhs: Any, rhs: Any, check_all: bool) -> bool | Iterable[bool]: """Comparison of scalars and arrays. Parameters ---------- - lhs : object + lhs left-hand side - rhs : object + rhs right-hand side - check_all : bool + check_all if True, reduce sequence to single bool; return True if all the elements are equal. @@ -241,56 +131,278 @@ def eq(lhs, rhs, check_all: bool): return out -def isnan(obj, check_all: bool): - """Test for NaN or NaT +def isnan(obj: Any, check_all: bool) -> bool | Iterable[bool]: + """Test for NaN or NaT. Parameters ---------- - obj : object + obj scalar or vector - check_all : bool + check_all if True, reduce sequence to single bool; return True if any of the elements are NaN. Returns ------- bool or array_like of bool. - Always return False for non-numeric types. + Always return False for non-numeric types. """ if is_duck_array_type(type(obj)): - if obj.dtype.kind in "if": + if obj.dtype.kind in "ifc": out = np.isnan(obj) elif obj.dtype.kind in "Mm": out = np.isnat(obj) else: - # Not a numeric or datetime type - out = np.full(obj.shape, False) + if HAS_UNCERTAINTIES: + try: + out = unp.isnan(obj) + except TypeError: + # Not a numeric or UFloat type + out = np.full(obj.shape, False) + else: + # Not a numeric or datetime type + out = np.full(obj.shape, False) return out.any() if check_all else out if isinstance(obj, np_datetime64): return np.isnat(obj) + elif HAS_UNCERTAINTIES and isinstance(obj, UFloat): + return unp.isnan(obj) try: return math.isnan(obj) except TypeError: return False -def zero_or_nan(obj, check_all: bool): - """Test if obj is zero, NaN, or NaT +def zero_or_nan(obj: Any, check_all: bool) -> bool | Iterable[bool]: + """Test if obj is zero, NaN, or NaT. Parameters ---------- - obj : object + obj scalar or vector - check_all : bool + check_all if True, reduce sequence to single bool; return True if all the elements are zero, NaN, or NaT. Returns ------- bool or array_like of bool. - Always return False for non-numeric types. + Always return False for non-numeric types. """ out = eq(obj, 0, False) + isnan(obj, False) if check_all and is_duck_array_type(type(out)): return out.all() return out + + +# TODO: remove this warning after v0.10 +class BehaviorChangeWarning(UserWarning): + pass + + +############## +# try imports +############## + +try: + import babel # noqa: F401 + from babel import units as babel_units + + HAS_BABEL = hasattr(babel_units, "format_unit") +except ImportError: + HAS_BABEL = False + +try: + import uncertainties # noqa: F401 + + HAS_UNCERTAINTIES = True +except ImportError: + HAS_UNCERTAINTIES = False + +try: + import numpy # noqa: F401 + + HAS_NUMPY = True +except ImportError: + HAS_NUMPY = False + +try: + import mip # noqa: F401 + + HAS_MIP = True +except ImportError: + HAS_MIP = False + +try: + import dask # noqa: F401 + + HAS_DASK = True +except ImportError: + HAS_DASK = False + + +############################## +# Imports are handled here +# in order to be able to have +# them as constants +# in mypy configuration. +############################## + +if HAS_BABEL: + from babel import Locale + from babel import units as babel_units + + babel_parse = Locale.parse +else: + babel_parse = missing_dependency("Babel") # noqa: F811 # type:ignore + babel_units = babel_parse + Locale = missing_dependency + +if HAS_UNCERTAINTIES: + from uncertainties import UFloat, ufloat + + unp = None +else: + UFloat = ufloat = unp = None + + +if HAS_NUMPY: + import numpy as np + from numpy import datetime64 as np_datetime64 + from numpy import ( + exp, # noqa: F401 + log, # noqa: F401 + ndarray, + ) + + NUMPY_VER = np.__version__ + if HAS_UNCERTAINTIES: + from uncertainties import unumpy as unp + + NUMERIC_TYPES = (Number, Decimal, ndarray, np.number, UFloat) + else: + NUMERIC_TYPES = (Number, Decimal, ndarray, np.number) + + def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): + if isinstance(value, (dict, bool)) or value is None: + raise TypeError(f"Invalid magnitude for Quantity: {value!r}") + elif isinstance(value, str) and value == "": + raise ValueError("Quantity magnitude cannot be an empty string.") + elif isinstance(value, (list, tuple)): + return np.asarray(value) + elif HAS_UNCERTAINTIES: + from pint.facets.measurement.objects import Measurement + + if isinstance(value, Measurement): + return ufloat(value.value, value.error) + if force_ndarray or ( + force_ndarray_like and not is_duck_array_type(type(value)) + ): + return np.asarray(value) + return value + + def _test_array_function_protocol(): + # Test if the __array_function__ protocol is enabled + try: + + class FakeArray: + def __array_function__(self, *args, **kwargs): + return + + np.concatenate([FakeArray()]) + return True + except ValueError: + return False + + HAS_NUMPY_ARRAY_FUNCTION = _test_array_function_protocol() + + NP_NO_VALUE = np._NoValue + +else: + np = None + + class ndarray: + pass + + class np_datetime64: + pass + + from math import ( + exp, # noqa: F401 + log, # noqa: F401 + ) + + NUMPY_VER = "0" + NUMERIC_TYPES = (Number, Decimal) + HAS_NUMPY_ARRAY_FUNCTION = False + NP_NO_VALUE = None + + def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): + if force_ndarray or force_ndarray_like: + raise ValueError( + "Cannot force to ndarray or ndarray-like when NumPy is not present." + ) + elif isinstance(value, (dict, bool)) or value is None: + raise TypeError(f"Invalid magnitude for Quantity: {value!r}") + elif isinstance(value, str) and value == "": + raise ValueError("Quantity magnitude cannot be an empty string.") + elif isinstance(value, (list, tuple)): + raise TypeError( + "lists and tuples are valid magnitudes for " + "Quantity only when NumPy is present." + ) + elif HAS_UNCERTAINTIES: + from pint.facets.measurement.objects import Measurement + + if isinstance(value, Measurement): + return ufloat(value.value, value.error) + return value + + +if HAS_MIP: + import mip + + mip_model = mip.model + mip_Model = mip.Model + mip_INF = mip.INF + mip_INTEGER = mip.INTEGER + mip_xsum = mip.xsum + mip_OptimizationStatus = mip.OptimizationStatus +else: + mip_missing = missing_dependency("mip") + mip_model = mip_missing + mip_Model = mip_missing + mip_INF = mip_missing + mip_INTEGER = mip_missing + mip_xsum = mip_missing + mip_OptimizationStatus = mip_missing + + +# Define location of pint.Quantity in NEP-13 type cast hierarchy by defining upcast +# types using guarded imports + +if HAS_DASK: + from dask import array as dask_array + from dask.base import compute, persist, visualize +else: + compute, persist, visualize = None, None, None + dask_array = None + + +# TODO: merge with upcast_type_map + +#: List upcast type names +upcast_type_names = ( + "pint_pandas.pint_array.PintArray", + "xarray.core.dataarray.DataArray", + "xarray.core.dataset.Dataset", + "xarray.core.variable.Variable", + "pandas.core.series.Series", + "pandas.core.frame.DataFrame", + "pandas.Series", + "pandas.DataFrame", + "xarray.core.dataarray.DataArray", +) + +#: Map type name to the actual type (for upcast types). +upcast_type_map: Mapping[str, type | None] = {k: None for k in upcast_type_names} diff --git a/pint/constants_en.txt b/pint/constants_en.txt index 9babc8fa2..2f6fcfb50 100644 --- a/pint/constants_en.txt +++ b/pint/constants_en.txt @@ -46,22 +46,21 @@ wien_wavelength_displacement_law_constant = ℎ * c / (k * wien_x) wien_frequency_displacement_law_constant = wien_u * k / ℎ #### MEASURED CONSTANTS #### -# Recommended CODATA-2018 values +# Recommended CODATA-2022 values # To some extent, what is measured and what is derived is a bit arbitrary. # The choice of measured constants is based on convenience and on available uncertainty. # The uncertainty in the last significant digits is given in parentheses as a comment. newtonian_constant_of_gravitation = 6.67430e-11 m^3/(kg s^2) = _ = gravitational_constant # (15) -rydberg_constant = 1.0973731568160e7 * m^-1 = R_∞ = R_inf # (21) -electron_g_factor = -2.00231930436256 = g_e # (35) -atomic_mass_constant = 1.66053906660e-27 kg = m_u # (50) -electron_mass = 9.1093837015e-31 kg = m_e = atomic_unit_of_mass = a_u_mass # (28) -proton_mass = 1.67262192369e-27 kg = m_p # (51) -neutron_mass = 1.67492749804e-27 kg = m_n # (95) -lattice_spacing_of_Si = 1.920155716e-10 m = d_220 # (32) -K_alpha_Cu_d_220 = 0.80232719 # (22) -K_alpha_Mo_d_220 = 0.36940604 # (19) -K_alpha_W_d_220 = 0.108852175 # (98) +rydberg_constant = 1.0973731568157e7 * m^-1 = R_∞ = R_inf # (12) +electron_g_factor = -2.00231930436092 = g_e # (36) +atomic_mass_constant = 1.66053906892e-27 kg = m_u # (52) +electron_mass = 9.1093837139e-31 kg = m_e = atomic_unit_of_mass = a_u_mass # (28) +proton_mass = 1.67262192595e-27 kg = m_p # (52) +neutron_mass = 1.67492750056e-27 kg = m_n # (85) +x_unit_Cu = 1.00207697e-13 m = Xu_Cu # (28) +x_unit_Mo = 1.00209952e-13 m = Xu_Mo # (53) +angstrom_star = 1.00001495e-10 = Å_star # (90) #### DERIVED CONSTANTS #### diff --git a/pint/context.py b/pint/context.py deleted file mode 100644 index 4839926ea..000000000 --- a/pint/context.py +++ /dev/null @@ -1,20 +0,0 @@ -""" - pint.context - ~~~~~~~~~~~~ - - Functions and classes related to context definitions and application. - - :copyright: 2016 by Pint Authors, see AUTHORS for more details.. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - pass - -#: Regex to match the header parts of a context. - -#: Regex to match variable names in an equation. diff --git a/pint/converters.py b/pint/converters.py index 12248a85c..fbe3b5fb0 100644 --- a/pint/converters.py +++ b/pint/converters.py @@ -12,52 +12,54 @@ from dataclasses import dataclass from dataclasses import fields as dc_fields +from typing import Any, ClassVar -from .compat import HAS_NUMPY, exp, log # noqa: F401 +from ._typing import Magnitude +from .compat import HAS_NUMPY, Self, exp, log # noqa: F401 @dataclass(frozen=True) class Converter: """Base class for value converters.""" - _subclasses = [] - _param_names_to_subclass = {} + _subclasses: ClassVar[list[type[Converter]]] = [] + _param_names_to_subclass: ClassVar[dict[frozenset[str], type[Converter]]] = {} @property - def is_multiplicative(self): + def is_multiplicative(self) -> bool: return True @property - def is_logarithmic(self): + def is_logarithmic(self) -> bool: return False - def to_reference(self, value, inplace=False): + def to_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: return value - def from_reference(self, value, inplace=False): + def from_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: return value - def __init_subclass__(cls, **kwargs): + def __init_subclass__(cls, **kwargs: Any): # Get constructor parameters super().__init_subclass__(**kwargs) cls._subclasses.append(cls) @classmethod - def get_field_names(cls, new_cls): - return frozenset((p.name for p in dc_fields(new_cls))) + def get_field_names(cls, new_cls: type) -> frozenset[str]: + return frozenset(p.name for p in dc_fields(new_cls)) @classmethod - def preprocess_kwargs(cls, **kwargs): + def preprocess_kwargs(cls, **kwargs: Any) -> dict[str, Any] | None: return None @classmethod - def from_arguments(cls, **kwargs): + def from_arguments(cls, **kwargs: Any) -> Converter: kwk = frozenset(kwargs.keys()) try: new_cls = cls._param_names_to_subclass[kwk] except KeyError: for new_cls in cls._subclasses: - p_names = frozenset((p.name for p in dc_fields(new_cls))) + p_names = frozenset(p.name for p in dc_fields(new_cls)) if p_names == kwk: cls._param_names_to_subclass[kwk] = new_cls break diff --git a/pint/default_en.txt b/pint/default_en.txt index ed4f3d805..8f063333b 100644 --- a/pint/default_en.txt +++ b/pint/default_en.txt @@ -72,7 +72,7 @@ pico- = 1e-12 = p- nano- = 1e-9 = n- # The micro (U+00B5) and Greek mu (U+03BC) are both valid prefixes, # and they often use the same glyph. -micro- = 1e-6 = µ- = μ- = u- +micro- = 1e-6 = µ- = μ- = u- = mu- = mc- milli- = 1e-3 = m- centi- = 1e-2 = c- deci- = 1e-1 = d- @@ -150,6 +150,7 @@ byte = 8 * bit = B = octet # Ratios percent = 0.01 = % +permille = 0.001 = ‰ ppm = 1e-6 # Length @@ -161,9 +162,6 @@ astronomical_unit = 149597870700 * meter = au # since Aug 2012 parsec = 1 / tansec * astronomical_unit = pc nautical_mile = 1852 * meter = nmi bohr = hbar / (alpha * m_e * c) = a_0 = a0 = bohr_radius = atomic_unit_of_length = a_u_length -x_unit_Cu = K_alpha_Cu_d_220 * d_220 / 1537.4 = Xu_Cu -x_unit_Mo = K_alpha_Mo_d_220 * d_220 / 707.831 = Xu_Mo -angstrom_star = K_alpha_W_d_220 * d_220 / 0.2090100 = Å_star planck_length = (hbar * gravitational_constant / c ** 3) ** 0.5 # Mass @@ -221,7 +219,7 @@ hectare = 100 * are = ha # Volume [volume] = [length] ** 3 -liter = decimeter ** 3 = l = L = litre +liter = decimeter ** 3 = l = L = ℓ = litre cubic_centimeter = centimeter ** 3 = cc lambda = microliter = λ stere = meter ** 3 @@ -430,6 +428,10 @@ atomic_unit_of_electric_field = e * k_C / a_0 ** 2 = a_u_electric_field # Electric displacement field [electric_displacement_field] = [charge] / [area] +# Reduced electric field +[reduced_electric_field] = [electric_field] * [area] +townsend = 1e-21 * V * m^2 = Td + # Resistance [resistance] = [electric_potential] / [current] ohm = volt / ampere = Ω @@ -446,6 +448,9 @@ conventional_ohm_90 = R_K / R_K90 * ohm = Ω_90 = ohm_90 siemens = ampere / volt = S = mho absiemens = 1e9 * siemens = abS = abmho +# Conductivity +[conductivity] = [conductance]/[length] + # Capacitance [capacitance] = [charge] / [electric_potential] farad = coulomb / volt = F @@ -490,12 +495,28 @@ buckingham = debye * angstrom bohr_magneton = e * hbar / (2 * m_e) = µ_B = mu_B nuclear_magneton = e * hbar / (2 * m_p) = µ_N = mu_N +# Refractive index +[refractive_index] = [] +refractive_index_unit = [] = RIU + +# Absorbance +[absorbance] = [] +absorbance_unit = [] = AU + +# Membrane filtration flux +LMH = L / m**2 / h +[membrane_flux] = [volume] / [area] / [time] + +# Membrane filtration permeability +[membrane_permeability] = [membrane_flux] / [pressure] + # Logaritmic Unit Definition # Unit = scale; logbase; logfactor # x_dB = [logfactor] * log( x_lin / [scale] ) / log( [logbase] ) # Logaritmic Units of dimensionless quantity: [ https://en.wikipedia.org/wiki/Level_(logarithmic_quantity) ] +decibelwatt = watt; logbase: 10; logfactor: 10 = dBW decibelmilliwatt = 1e-3 watt; logbase: 10; logfactor: 10 = dBm decibelmicrowatt = 1e-6 watt; logbase: 10; logfactor: 10 = dBu @@ -668,7 +689,7 @@ neper = 1 ; logbase: 2.71828182845904523536028747135266249775724709369995; logfa @group Textile tex = gram / kilometer = Tt dtex = decitex - denier = gram / (9 * kilometer) = den = Td + denier = gram / (9 * kilometer) = den jute = pound / (14400 * yard) = Tj aberdeen = jute = Ta RKM = gf / tex diff --git a/pint/definitions.py b/pint/definitions.py index 789d9e39a..da884ed95 100644 --- a/pint/definitions.py +++ b/pint/definitions.py @@ -8,8 +8,11 @@ :license: BSD, see LICENSE for more details. """ +from __future__ import annotations + +import flexparser as fp + from . import errors -from ._vendor import flexparser as fp from .delegates import ParserConfig, txt_defparser @@ -17,12 +20,28 @@ class Definition: """This is kept for backwards compatibility""" @classmethod - def from_string(cls, s: str, non_int_type=float): + def from_string(cls, input_string: str, non_int_type: type = float) -> Definition: + """Parse a string into a definition object. + + Parameters + ---------- + input_string + Single line string. + non_int_type + Numerical type used for non integer values. + + Raises + ------ + DefinitionSyntaxError + If a syntax error was found. + """ cfg = ParserConfig(non_int_type) parser = txt_defparser.DefParser(cfg, None) - pp = parser.parse_string(s) + pp = parser.parse_string(input_string) for definition in parser.iter_parsed_project(pp): if isinstance(definition, Exception): raise errors.DefinitionSyntaxError(str(definition)) - if not isinstance(definition, (fp.BOS, fp.BOF, fp.BOS)): + if not isinstance(definition, (fp.BOS, fp.BOF)): return definition + + # TODO: What shall we do in this return path. diff --git a/pint/delegates/__init__.py b/pint/delegates/__init__.py index 363ef9cef..dc4699cf9 100644 --- a/pint/delegates/__init__.py +++ b/pint/delegates/__init__.py @@ -7,8 +7,10 @@ :copyright: 2022 by Pint Authors, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ +from __future__ import annotations from . import txt_defparser from .base_defparser import ParserConfig, build_disk_cache_class +from .formatter import Formatter -__all__ = [txt_defparser, ParserConfig, build_disk_cache_class] +__all__ = ["txt_defparser", "ParserConfig", "build_disk_cache_class", "Formatter"] diff --git a/pint/delegates/base_defparser.py b/pint/delegates/base_defparser.py index b2de99998..44170f842 100644 --- a/pint/delegates/base_defparser.py +++ b/pint/delegates/base_defparser.py @@ -14,23 +14,23 @@ import itertools import numbers import pathlib -import typing as ty -from dataclasses import dataclass, field +from dataclasses import dataclass +from typing import Any + +import flexcache as fc +import flexparser as fp from pint import errors from pint.facets.plain.definitions import NotNumeric from pint.util import ParserHelper, UnitsContainer -from .._vendor import flexcache as fc -from .._vendor import flexparser as fp - @dataclass(frozen=True) class ParserConfig: - """Configuration used by the parser.""" + """Configuration used by the parser in Pint.""" #: Indicates the output type of non integer numbers. - non_int_type: ty.Type[numbers.Number] = float + non_int_type: type[numbers.Number] = float def to_scaled_units_container(self, s: str): return ParserHelper.from_string(s, self.non_int_type) @@ -67,37 +67,41 @@ def to_number(self, s: str) -> numbers.Number: return val.scale -@functools.lru_cache() -def build_disk_cache_class(non_int_type: type): +@dataclass(frozen=True) +class PintParsedStatement(fp.ParsedStatement[ParserConfig]): + """A parsed statement for pint, specialized in the actual config.""" + + +@functools.lru_cache +def build_disk_cache_class(chosen_non_int_type: type): """Build disk cache class, taking into account the non_int_type.""" @dataclass(frozen=True) class PintHeader(fc.InvalidateByExist, fc.NameByFields, fc.BasicPythonHeader): - from .. import __version__ pint_version: str = __version__ - non_int_type: str = field(default_factory=lambda: non_int_type.__qualname__) + non_int_type: str = chosen_non_int_type.__qualname__ + @dataclass(frozen=True) class PathHeader(fc.NameByFileContent, PintHeader): pass + @dataclass(frozen=True) class ParsedProjecHeader(fc.NameByHashIter, PintHeader): @classmethod - def from_parsed_project(cls, pp: fp.ParsedProject, reader_id): - tmp = [] - for stmt in pp.iter_statements(): - if isinstance(stmt, fp.BOS): - tmp.append( - stmt.content_hash.algorithm_name - + ":" - + stmt.content_hash.hexdigest - ) + def from_parsed_project( + cls, pp: fp.ParsedProject[Any, ParserConfig], reader_id: str + ): + tmp = ( + f"{stmt.content_hash.algorithm_name}:{stmt.content_hash.hexdigest}" + for stmt in pp.iter_statements() + if isinstance(stmt, fp.BOS) + ) return cls(tuple(tmp), reader_id) class PintDiskCache(fc.DiskCache): - _header_classes = { pathlib.Path: PathHeader, str: PathHeader.from_string, diff --git a/pint/delegates/formatter/__init__.py b/pint/delegates/formatter/__init__.py new file mode 100644 index 000000000..5dab6a0f0 --- /dev/null +++ b/pint/delegates/formatter/__init__.py @@ -0,0 +1,26 @@ +""" + pint.delegates.formatter + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Easy to replace and extend string formatting. + + See pint.delegates.formatter.plain.DefaultFormatter for a + description of a formatter. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +from __future__ import annotations + +from .full import FullFormatter + + +class Formatter(FullFormatter): + """Default Pint Formatter""" + + pass + + +__all__ = [ + "Formatter", +] diff --git a/pint/delegates/formatter/_compound_unit_helpers.py b/pint/delegates/formatter/_compound_unit_helpers.py new file mode 100644 index 000000000..06a8ac2d3 --- /dev/null +++ b/pint/delegates/formatter/_compound_unit_helpers.py @@ -0,0 +1,328 @@ +""" + pint.delegates.formatter._compound_unit_helpers + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Convenient functions to help organize compount units. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + + +from __future__ import annotations + +import functools +import locale +from collections.abc import Callable, Iterable +from functools import partial +from itertools import filterfalse, tee +from typing import ( + TYPE_CHECKING, + Any, + Literal, + TypedDict, + TypeVar, +) + +from ...compat import TypeAlias, babel_parse +from ...util import UnitsContainer + +T = TypeVar("T") +U = TypeVar("U") +V = TypeVar("V") +W = TypeVar("W") + +if TYPE_CHECKING: + from ...compat import Locale, Number + from ...facets.plain import PlainUnit + from ...registry import UnitRegistry + + +class SortKwds(TypedDict): + registry: UnitRegistry + + +SortFunc: TypeAlias = Callable[ + [Iterable[tuple[str, Any, str]], Any], Iterable[tuple[str, Any, str]] +] + + +class BabelKwds(TypedDict): + """Babel related keywords used in formatters.""" + + use_plural: bool + length: Literal["short", "long", "narrow"] | None + locale: Locale | str | None + + +def partition( + predicate: Callable[[T], bool], iterable: Iterable[T] +) -> tuple[filterfalse[T], filter[T]]: + """Partition entries into false entries and true entries. + + If *predicate* is slow, consider wrapping it with functools.lru_cache(). + """ + # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 + t1, t2 = tee(iterable) + return filterfalse(predicate, t1), filter(predicate, t2) + + +def localize_per( + length: Literal["short", "long", "narrow"] = "long", + locale: Locale | str | None = locale.LC_NUMERIC, + default: str | None = None, +) -> str: + """Localized singular and plural form of a unit. + + THIS IS TAKEN FROM BABEL format_unit. But + - No magnitude is returned in the string. + - If the unit is not found, the default is given. + - If the default is None, then the same value is given. + """ + locale = babel_parse(locale) + + patterns = locale._data["compound_unit_patterns"].get("per", None) + if patterns is None: + return default or "{}/{}" + + patterns = patterns.get(length, None) + if patterns is None: + return default or "{}/{}" + + # babel 2.8 + if isinstance(patterns, str): + return patterns + + # babe; 2.15 + return patterns.get("compound", default or "{}/{}") + + +@functools.lru_cache +def localize_unit_name( + measurement_unit: str, + use_plural: bool, + length: Literal["short", "long", "narrow"] = "long", + locale: Locale | str | None = locale.LC_NUMERIC, + default: str | None = None, +) -> str: + """Localized singular and plural form of a unit. + + THIS IS TAKEN FROM BABEL format_unit. But + - No magnitude is returned in the string. + - If the unit is not found, the default is given. + - If the default is None, then the same value is given. + """ + locale = babel_parse(locale) + from babel.units import _find_unit_pattern, get_unit_name + + q_unit = _find_unit_pattern(measurement_unit, locale=locale) + if not q_unit: + return measurement_unit + + unit_patterns = locale._data["unit_patterns"][q_unit].get(length, {}) + + if use_plural: + grammatical_number = "other" + else: + grammatical_number = "one" + + if grammatical_number in unit_patterns: + return unit_patterns[grammatical_number].format("").replace("\xa0", "").strip() + + if default is not None: + return default + + # Fall back to a somewhat bad representation. + # nb: This is marked as no-cover, as the current CLDR seemingly has no way for this to happen. + fallback_name = get_unit_name( + measurement_unit, length=length, locale=locale + ) # pragma: no cover + return f"{fallback_name or measurement_unit}" # pragma: no cover + + +def extract2(element: tuple[str, T, str]) -> tuple[str, T]: + """Extract display name and exponent from a tuple containing display name, exponent and unit name.""" + + return element[:2] + + +def to_name_exponent_name(element: tuple[str, T]) -> tuple[str, T, str]: + """Convert unit name and exponent to unit name as display name, exponent and unit name.""" + + # TODO: write a generic typing + + return element + (element[0],) + + +def to_symbol_exponent_name( + el: tuple[str, T], registry: UnitRegistry +) -> tuple[str, T, str]: + """Convert unit name and exponent to unit symbol as display name, exponent and unit name.""" + return registry._get_symbol(el[0]), el[1], el[0] + + +def localize_display_exponent_name( + element: tuple[str, T, str], + use_plural: bool, + length: Literal["short", "long", "narrow"] = "long", + locale: Locale | str | None = locale.LC_NUMERIC, + default: str | None = None, +) -> tuple[str, T, str]: + """Localize display name in a triplet display name, exponent and unit name.""" + + return ( + localize_unit_name( + element[2], use_plural, length, locale, default or element[0] + ), + element[1], + element[2], + ) + + +##################### +# Sorting functions +##################### + + +def sort_by_unit_name( + items: Iterable[tuple[str, Number, str]], _registry: UnitRegistry | None +) -> Iterable[tuple[str, Number, str]]: + return sorted(items, key=lambda el: el[2]) + + +def sort_by_display_name( + items: Iterable[tuple[str, Number, str]], _registry: UnitRegistry | None +) -> Iterable[tuple[str, Number, str]]: + return sorted(items) + + +def sort_by_dimensionality( + items: Iterable[tuple[str, Number, str]], registry: UnitRegistry | None +) -> Iterable[tuple[str, Number, str]]: + """Sort a list of units by dimensional order (from `registry.formatter.dim_order`). + + Parameters + ---------- + items : tuple + a list of tuples containing (unit names, exponent values). + registry : UnitRegistry | None + the registry to use for looking up the dimensions of each unit. + + Returns + ------- + list + the list of units sorted by most significant dimension first. + + Raises + ------ + KeyError + If unit cannot be found in the registry. + """ + + if registry is None: + return items + + dim_order = registry.formatter.dim_order + + def sort_key(item: tuple[str, Number, str]): + _display_name, _unit_exponent, unit_name = item + cname = registry.get_name(unit_name) + cname_dims = registry.get_dimensionality(cname) or {"[]": None} + for cname_dim in cname_dims: + if cname_dim in dim_order: + return dim_order.index(cname_dim), cname + + raise KeyError(f"Unit {unit_name} (aka {cname}) has no recognized dimensions") + + return sorted(items, key=sort_key) + + +def prepare_compount_unit( + unit: PlainUnit | UnitsContainer | Iterable[tuple[str, T]], + spec: str = "", + sort_func: SortFunc | None = None, + use_plural: bool = True, + length: Literal["short", "long", "narrow"] | None = None, + locale: Locale | str | None = None, + as_ratio: bool = True, + registry: UnitRegistry | None = None, +) -> tuple[Iterable[tuple[str, T]], Iterable[tuple[str, T]]]: + """Format compound unit into unit container given + an spec and locale. + + Returns + ------- + iterable of display name, exponent, canonical name + """ + + if isinstance(unit, UnitsContainer): + out = unit.items() + elif hasattr(unit, "_units"): + out = unit._units.items() + else: + out = unit + + # out: unit_name, unit_exponent + + if len(out) == 0: + if "~" in spec: + return ([], []) + else: + return ([("dimensionless", 1)], []) + + if "~" in spec: + if registry is None: + raise ValueError( + f"Can't short format a {type(unit)} without a registry." + " This is usually triggered when formatting a instance" + " of the internal `UnitsContainer`." + ) + _to_symbol_exponent_name = partial(to_symbol_exponent_name, registry=registry) + out = map(_to_symbol_exponent_name, out) + else: + out = map(to_name_exponent_name, out) + + # We keep unit_name because the sort or localizing functions might needed. + # out: display_unit_name, unit_exponent, unit_name + + if as_ratio: + numerator, denominator = partition(lambda el: el[1] < 0, out) + else: + numerator, denominator = out, () + + # numerator: display_unit_name, unit_name, unit_exponent + # denominator: display_unit_name, unit_name, unit_exponent + + if locale is None: + if sort_func is not None: + numerator = sort_func(numerator, registry) + denominator = sort_func(denominator, registry) + + return map(extract2, numerator), map(extract2, denominator) + + if length is None: + length = "short" if "~" in spec else "long" + + mapper = partial( + localize_display_exponent_name, use_plural=False, length=length, locale=locale + ) + + numerator = map(mapper, numerator) + denominator = map(mapper, denominator) + + if sort_func is not None: + numerator = sort_func(numerator, registry) + denominator = sort_func(denominator, registry) + + if use_plural: + if not isinstance(numerator, list): + numerator = list(numerator) + numerator[-1] = localize_display_exponent_name( + numerator[-1], + use_plural, + length=length, + locale=locale, + default=numerator[-1][0], + ) + + return map(extract2, numerator), map(extract2, denominator) diff --git a/pint/delegates/formatter/_format_helpers.py b/pint/delegates/formatter/_format_helpers.py new file mode 100644 index 000000000..8a2f37a59 --- /dev/null +++ b/pint/delegates/formatter/_format_helpers.py @@ -0,0 +1,235 @@ +""" + pint.delegates.formatter._format_helpers + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Convenient functions to help string formatting operations. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + + +from __future__ import annotations + +import re +from collections.abc import Callable, Generator, Iterable +from contextlib import contextmanager +from functools import partial +from locale import LC_NUMERIC, getlocale, setlocale +from typing import ( + TYPE_CHECKING, + Any, + TypeVar, +) + +from ...compat import ndarray +from ._spec_helpers import FORMATTER + +try: + from numpy import integer as np_integer +except ImportError: + np_integer = None + +if TYPE_CHECKING: + from ...compat import Locale, Number + +T = TypeVar("T") +U = TypeVar("U") +V = TypeVar("V") +W = TypeVar("W") + +_PRETTY_EXPONENTS = "⁰¹²³⁴⁵⁶⁷⁸⁹" +_JOIN_REG_EXP = re.compile(r"{\d*}") + + +def format_number(value: Any, spec: str = "") -> str: + """Format number + + This function might disapear in the future. + Right now is aiding backwards compatible migration. + """ + if isinstance(value, float): + return format(value, spec or ".16n") + + elif isinstance(value, int): + return format(value, spec or "n") + + elif isinstance(value, ndarray) and value.ndim == 0: + if issubclass(value.dtype.type, np_integer): + return format(value, spec or "n") + else: + return format(value, spec or ".16n") + else: + return str(value) + + +def builtin_format(value: Any, spec: str = "") -> str: + """A keyword enabled replacement for builtin format + + format has positional only arguments + and this cannot be partialized + and np requires a callable. + """ + return format(value, spec) + + +@contextmanager +def override_locale( + spec: str, locale: str | Locale | None +) -> Generator[Callable[[Any], str], Any, None]: + """Given a spec a locale, yields a function to format a number. + + IMPORTANT: When the locale is not None, this function uses setlocale + and therefore is not thread safe. + """ + + if locale is None: + # If locale is None, just return the builtin format function. + yield ("{:" + spec + "}").format + else: + # If locale is not None, change it and return the backwards compatible + # format_number. + prev_locale_string = getlocale(LC_NUMERIC) + if isinstance(locale, str): + setlocale(LC_NUMERIC, locale) + else: + setlocale(LC_NUMERIC, str(locale)) + yield partial(format_number, spec=spec) + setlocale(LC_NUMERIC, prev_locale_string) + + +def pretty_fmt_exponent(num: Number) -> str: + """Format an number into a pretty printed exponent.""" + # unicode dot operator (U+22C5) looks like a superscript decimal + ret = f"{num:n}".replace("-", "⁻").replace(".", "\u22C5") + for n in range(10): + ret = ret.replace(str(n), _PRETTY_EXPONENTS[n]) + return ret + + +def join_u(fmt: str, iterable: Iterable[Any]) -> str: + """Join an iterable with the format specified in fmt. + + The format can be specified in two ways: + - PEP3101 format with two replacement fields (eg. '{} * {}') + - The concatenating string (eg. ' * ') + """ + if not iterable: + return "" + if not _JOIN_REG_EXP.search(fmt): + return fmt.join(iterable) + miter = iter(iterable) + first = next(miter) + for val in miter: + ret = fmt.format(first, val) + first = ret + return first + + +def join_mu(joint_fstring: str, mstr: str, ustr: str) -> str: + """Join magnitude and units. + + This avoids that `3 and `1 / m` becomes `3 1 / m` + """ + if ustr == "": + return mstr + if ustr.startswith("1 / "): + return joint_fstring.format(mstr, ustr[2:]) + return joint_fstring.format(mstr, ustr) + + +def join_unc(joint_fstring: str, lpar: str, rpar: str, mstr: str, ustr: str) -> str: + """Join uncertainty magnitude and units. + + Uncertainty magnitudes might require extra parenthesis when joined to units. + - YES: 3 +/- 1 + - NO : 3(1) + - NO : (3 +/ 1)e-9 + + This avoids that `(3 + 1)` and `meter` becomes ((3 +/- 1) meter) + """ + if mstr.startswith(lpar) or mstr.endswith(rpar): + return joint_fstring.format(mstr, ustr) + return joint_fstring.format(lpar + mstr + rpar, ustr) + + +def formatter( + numerator: Iterable[tuple[str, Number]], + denominator: Iterable[tuple[str, Number]], + as_ratio: bool = True, + single_denominator: bool = False, + product_fmt: str = " * ", + division_fmt: str = " / ", + power_fmt: str = "{} ** {}", + parentheses_fmt: str = "({0})", + exp_call: FORMATTER = "{:n}".format, +) -> str: + """Format a list of (name, exponent) pairs. + + Parameters + ---------- + items : list + a list of (name, exponent) pairs. + as_ratio : bool, optional + True to display as ratio, False as negative powers. (Default value = True) + single_denominator : bool, optional + all with terms with negative exponents are + collected together. (Default value = False) + product_fmt : str + the format used for multiplication. (Default value = " * ") + division_fmt : str + the format used for division. (Default value = " / ") + power_fmt : str + the format used for exponentiation. (Default value = "{} ** {}") + parentheses_fmt : str + the format used for parenthesis. (Default value = "({0})") + exp_call : callable + (Default value = lambda x: f"{x:n}") + + Returns + ------- + str + the formula as a string. + + """ + + if as_ratio: + fun = lambda x: exp_call(abs(x)) + else: + fun = exp_call + + pos_terms: list[str] = [] + for key, value in numerator: + if value == 1: + pos_terms.append(key) + else: + pos_terms.append(power_fmt.format(key, fun(value))) + + neg_terms: list[str] = [] + for key, value in denominator: + if value == -1 and as_ratio: + neg_terms.append(key) + else: + neg_terms.append(power_fmt.format(key, fun(value))) + + if not pos_terms and not neg_terms: + return "" + + if not as_ratio: + # Show as Product: positive * negative terms ** -1 + return join_u(product_fmt, pos_terms + neg_terms) + + # Show as Ratio: positive terms / negative terms + pos_ret = join_u(product_fmt, pos_terms) or "1" + + if not neg_terms: + return pos_ret + + if single_denominator: + neg_ret = join_u(product_fmt, neg_terms) + if len(neg_terms) > 1: + neg_ret = parentheses_fmt.format(neg_ret) + else: + neg_ret = join_u(division_fmt, neg_terms) + + return join_u(division_fmt, [pos_ret, neg_ret]) diff --git a/pint/delegates/formatter/_spec_helpers.py b/pint/delegates/formatter/_spec_helpers.py new file mode 100644 index 000000000..5f52b5ee0 --- /dev/null +++ b/pint/delegates/formatter/_spec_helpers.py @@ -0,0 +1,147 @@ +""" +pint.delegates.formatter._spec_helpers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Convenient functions to deal with format specifications. + +:copyright: 2022 by Pint Authors, see AUTHORS for more details. +:license: BSD, see LICENSE for more details. +""" + +from __future__ import annotations + +import functools +import re +import warnings +from collections.abc import Callable +from typing import Any + +FORMATTER = Callable[ + [ + Any, + ], + str, +] + +# Extract just the type from the specification mini-language: see +# http://docs.python.org/2/library/string.html#format-specification-mini-language +# We also add uS for uncertainties. +_BASIC_TYPES = frozenset("bcdeEfFgGnosxX%uS") + +REGISTERED_FORMATTERS: dict[str, Any] = {} + + +def parse_spec(spec: str) -> str: + """Parse and return spec. + + If an unknown item is found, raise a ValueError. + + This function still needs work: + - what happens if two distinct values are found? + + """ + + result = "" + for ch in reversed(spec): + if ch == "~" or ch in _BASIC_TYPES: + continue + elif ch in list(REGISTERED_FORMATTERS.keys()) + ["~"]: + if result: + raise ValueError("expected ':' after format specifier") + else: + result = ch + elif ch.isalpha(): + raise ValueError("Unknown conversion specified " + ch) + else: + break + return result + + +def extract_custom_flags(spec: str) -> str: + """Return custom flags present in a format specification + + (i.e those not part of Python's formatting mini language) + """ + + if not spec: + return "" + + # sort by length, with longer items first + known_flags = sorted(REGISTERED_FORMATTERS.keys(), key=len, reverse=True) + + flag_re = re.compile("(" + "|".join(known_flags + ["~"]) + ")") + custom_flags = flag_re.findall(spec) + + return "".join(custom_flags) + + +def remove_custom_flags(spec: str) -> str: + """Remove custom flags present in a format specification + + (i.e those not part of Python's formatting mini language) + """ + + for flag in sorted(REGISTERED_FORMATTERS.keys(), key=len, reverse=True) + ["~"]: + if flag: + spec = spec.replace(flag, "") + return spec + + +########## +# This weird way of defining split format +# is the only reasonable way I foudn to use +# lru_cache in a function that might emit warning +# and do it every time. +# TODO: simplify it when there are no warnings. + + +@functools.lru_cache +def _split_format( + spec: str, default: str, separate_format_defaults: bool = True +) -> tuple[str, str, list[str]]: + """Split format specification into magnitude and unit format.""" + mspec = remove_custom_flags(spec) + uspec = extract_custom_flags(spec) + + default_mspec = remove_custom_flags(default) + default_uspec = extract_custom_flags(default) + + warns = [] + if separate_format_defaults in (False, None): + # should we warn always or only if there was no explicit choice? + # Given that we want to eventually remove the flag again, I'd say yes? + if spec and separate_format_defaults is None: + if not uspec and default_uspec: + warns.append( + "The given format spec does not contain a unit formatter." + " Falling back to the builtin defaults, but in the future" + " the unit formatter specified in the `default_format`" + " attribute will be used instead." + ) + if not mspec and default_mspec: + warns.append( + "The given format spec does not contain a magnitude formatter." + " Falling back to the builtin defaults, but in the future" + " the magnitude formatter specified in the `default_format`" + " attribute will be used instead." + ) + elif not spec: + mspec, uspec = default_mspec, default_uspec + else: + mspec = mspec or default_mspec + uspec = uspec or default_uspec + + return mspec, uspec, warns + + +def split_format( + spec: str, default: str, separate_format_defaults: bool = True +) -> tuple[str, str]: + """Split format specification into magnitude and unit format.""" + + mspec, uspec, warns = _split_format(spec, default, separate_format_defaults) + + for warn_msg in warns: + warnings.warn(warn_msg, DeprecationWarning) + + return mspec, uspec diff --git a/pint/delegates/formatter/_to_register.py b/pint/delegates/formatter/_to_register.py new file mode 100644 index 000000000..d808640d6 --- /dev/null +++ b/pint/delegates/formatter/_to_register.py @@ -0,0 +1,132 @@ +""" + pint.delegates.formatter.base_formatter + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Common class and function for all formatters. + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import annotations + +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Any + +from ..._typing import Magnitude +from ...compat import Unpack, ndarray, np +from ...util import UnitsContainer +from ._compound_unit_helpers import BabelKwds, prepare_compount_unit +from ._format_helpers import join_mu, override_locale +from ._spec_helpers import REGISTERED_FORMATTERS, split_format +from .plain import BaseFormatter + +if TYPE_CHECKING: + from ...facets.plain import MagnitudeT, PlainQuantity, PlainUnit + from ...registry import UnitRegistry + + +def register_unit_format(name: str): + """register a function as a new format for units + + The registered function must have a signature of: + + .. code:: python + + def new_format(unit, registry, **options): + pass + + Parameters + ---------- + name : str + The name of the new format (to be used in the format mini-language). A error is + raised if the new format would overwrite a existing format. + + Examples + -------- + .. code:: python + + @pint.register_unit_format("custom") + def format_custom(unit, registry, **options): + result = "" # do the formatting + return result + + + ureg = pint.UnitRegistry() + u = ureg.m / ureg.s ** 2 + f"{u:custom}" + """ + + # TODO: kwargs missing in typing + def wrapper(func: Callable[[PlainUnit, UnitRegistry], str]): + if name in REGISTERED_FORMATTERS: + raise ValueError(f"format {name!r} already exists") # or warn instead + + class NewFormatter(BaseFormatter): + spec = name + + def format_magnitude( + self, + magnitude: Magnitude, + mspec: str = "", + **babel_kwds: Unpack[BabelKwds], + ) -> str: + with override_locale( + mspec, babel_kwds.get("locale", None) + ) as format_number: + if isinstance(magnitude, ndarray) and magnitude.ndim > 0: + # Use custom ndarray text formatting--need to handle scalars differently + # since they don't respond to printoptions + with np.printoptions(formatter={"float_kind": format_number}): + mstr = format(magnitude).replace("\n", "") + else: + mstr = format_number(magnitude) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + **babel_kwds: Unpack[BabelKwds], + ) -> str: + numerator, _denominator = prepare_compount_unit( + unit, + uspec, + **babel_kwds, + as_ratio=False, + registry=self._registry, + ) + + if self._registry is None: + units = UnitsContainer(numerator) + else: + units = self._registry.UnitsContainer(numerator) + + return func(units, registry=self._registry) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + if registry is None: + mspec, uspec = split_format(qspec, "", True) + else: + mspec, uspec = split_format( + qspec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + joint_fstring = "{} {}" + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, **babel_kwds), + ) + + REGISTERED_FORMATTERS[name] = NewFormatter() + + return wrapper diff --git a/pint/delegates/formatter/full.py b/pint/delegates/formatter/full.py new file mode 100644 index 000000000..f017483bf --- /dev/null +++ b/pint/delegates/formatter/full.py @@ -0,0 +1,269 @@ +""" + pint.delegates.formatter.full + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Implements: + - Full: dispatch to other formats, accept defaults. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import annotations + +import locale + +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any, Literal + +from ..._typing import Magnitude +from ...compat import Unpack, babel_parse +from ...util import iterable +from ._compound_unit_helpers import BabelKwds, SortFunc, sort_by_unit_name +from ._to_register import REGISTERED_FORMATTERS +from .html import HTMLFormatter +from .latex import LatexFormatter, SIunitxFormatter +from .plain import ( + BaseFormatter, + CompactFormatter, + DefaultFormatter, + PrettyFormatter, + RawFormatter, +) + +if TYPE_CHECKING: + from ...compat import Locale + from ...facets.measurement import Measurement + from ...facets.plain import ( + MagnitudeT, + PlainQuantity, + PlainUnit, + ) + from ...registry import UnitRegistry + + +class FullFormatter(BaseFormatter): + """A formatter that dispatch to other formatters. + + Has a default format, locale and babel_length + """ + + _formatters: dict[str, Any] = {} + + default_format: str = "" + + # TODO: This can be over-riden by the registry definitions file + dim_order: tuple[str, ...] = ( + "[substance]", + "[mass]", + "[current]", + "[luminosity]", + "[length]", + "[]", + "[time]", + "[temperature]", + ) + + default_sort_func: SortFunc | None = staticmethod(sort_by_unit_name) + + locale: Locale | None = None + + def __init__(self, registry: UnitRegistry | None = None): + super().__init__(registry) + + self._formatters = {} + self._formatters["raw"] = RawFormatter(registry) + self._formatters["D"] = DefaultFormatter(registry) + self._formatters["H"] = HTMLFormatter(registry) + self._formatters["P"] = PrettyFormatter(registry) + self._formatters["Lx"] = SIunitxFormatter(registry) + self._formatters["L"] = LatexFormatter(registry) + self._formatters["C"] = CompactFormatter(registry) + + def set_locale(self, loc: str | None) -> None: + """Change the locale used by default by `format_babel`. + + Parameters + ---------- + loc : str or None + None (do not translate), 'sys' (detect the system locale) or a locale id string. + """ + if isinstance(loc, str): + if loc == "sys": + loc = locale.getdefaultlocale()[0] + + # We call babel parse to fail here and not in the formatting operation + babel_parse(loc) + + self.locale = loc + + def get_formatter(self, spec: str): + if spec == "": + return self._formatters["D"] + for k, v in self._formatters.items(): + if k in spec: + return v + + for k, v in REGISTERED_FORMATTERS.items(): + if k in spec: + orphan_fmt = REGISTERED_FORMATTERS[k] + break + else: + return self._formatters["D"] + + try: + fmt = orphan_fmt.__class__(self._registry) + spec = getattr(fmt, "spec", spec) + self._formatters[spec] = fmt + return fmt + except Exception: + return orphan_fmt + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + mspec = mspec or self.default_format + return self.get_formatter(mspec).format_magnitude( + magnitude, mspec, **babel_kwds + ) + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + uspec = uspec or self.default_format + sort_func = sort_func or self.default_sort_func + return self.get_formatter(uspec).format_unit( + unit, uspec, sort_func=sort_func, **babel_kwds + ) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + spec: str = "", + **babel_kwds: Unpack[BabelKwds], + ) -> str: + spec = spec or self.default_format + # If Compact is selected, do it at the beginning + if "#" in spec: + spec = spec.replace("#", "") + obj = quantity.to_compact() + else: + obj = quantity + + del quantity + + locale = babel_kwds.get("locale", self.locale) + + if locale: + if "use_plural" in babel_kwds: + use_plural = babel_kwds["use_plural"] + else: + use_plural = obj.magnitude > 1 + if iterable(use_plural): + use_plural = True + else: + use_plural = False + + return self.get_formatter(spec).format_quantity( + obj, + spec, + sort_func=self.default_sort_func, + use_plural=use_plural, + length=babel_kwds.get("length", None), + locale=locale, + ) + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + **babel_kwds: Unpack[BabelKwds], + ) -> str: + meas_spec = meas_spec or self.default_format + # If Compact is selected, do it at the beginning + if "#" in meas_spec: + meas_spec = meas_spec.replace("#", "") + obj = measurement.to_compact() + else: + obj = measurement + + del measurement + + use_plural = obj.magnitude.nominal_value > 1 + if iterable(use_plural): + use_plural = True + + return self.get_formatter(meas_spec).format_measurement( + obj, + meas_spec, + sort_func=self.default_sort_func, + use_plural=babel_kwds.get("use_plural", use_plural), + length=babel_kwds.get("length", None), + locale=babel_kwds.get("locale", self.locale), + ) + + ####################################### + # This is for backwards compatibility + ####################################### + + def format_unit_babel( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + spec: str = "", + length: Literal["short", "long", "narrow"] | None = None, + locale: Locale | None = None, + ) -> str: + if self.locale is None and locale is None: + raise ValueError( + "format_babel requires a locale argumente if the Formatter locale is not set." + ) + + return self.format_unit( + unit, + spec or self.default_format, + sort_func=self.default_sort_func, + use_plural=False, + length=length, + locale=locale or self.locale, + ) + + def format_quantity_babel( + self, + quantity: PlainQuantity[MagnitudeT], + spec: str = "", + length: Literal["short", "long", "narrow"] | None = None, + locale: Locale | None = None, + ) -> str: + if self.locale is None and locale is None: + raise ValueError( + "format_babel requires a locale argumente if the Formatter locale is not set." + ) + + use_plural = quantity.magnitude > 1 + if iterable(use_plural): + use_plural = True + + return self.format_quantity( + quantity, + spec or self.default_format, + sort_func=self.default_sort_func, + use_plural=use_plural, + length=length, + locale=locale or self.locale, + ) + + +################################################################ +# This allows to format units independently of the registry +# +REGISTERED_FORMATTERS["raw"] = RawFormatter() +REGISTERED_FORMATTERS["D"] = DefaultFormatter() +REGISTERED_FORMATTERS["H"] = HTMLFormatter() +REGISTERED_FORMATTERS["P"] = PrettyFormatter() +REGISTERED_FORMATTERS["Lx"] = SIunitxFormatter() +REGISTERED_FORMATTERS["L"] = LatexFormatter() +REGISTERED_FORMATTERS["C"] = CompactFormatter() diff --git a/pint/delegates/formatter/html.py b/pint/delegates/formatter/html.py new file mode 100644 index 000000000..5201dc724 --- /dev/null +++ b/pint/delegates/formatter/html.py @@ -0,0 +1,190 @@ +""" + pint.delegates.formatter.html + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Implements: + - HTML: suitable for web/jupyter notebook outputs. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import annotations + +import re + +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any + +from ..._typing import Magnitude +from ...compat import Unpack, ndarray, np +from ...util import iterable +from ._compound_unit_helpers import ( + BabelKwds, + SortFunc, + localize_per, + prepare_compount_unit, +) +from ._format_helpers import ( + formatter, + join_mu, + join_unc, + override_locale, +) +from ._spec_helpers import ( + remove_custom_flags, + split_format, +) +from .plain import BaseFormatter + +if TYPE_CHECKING: + from ...facets.measurement import Measurement + from ...facets.plain import MagnitudeT, PlainQuantity, PlainUnit + +_EXP_PATTERN = re.compile(r"([0-9]\.?[0-9]*)e(-?)\+?0*([0-9]+)") + + +class HTMLFormatter(BaseFormatter): + """HTML localizable text formatter.""" + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + with override_locale(mspec, babel_kwds.get("locale", None)) as format_number: + if hasattr(magnitude, "_repr_html_"): + # If magnitude has an HTML repr, nest it within Pint's + mstr = magnitude._repr_html_() # type: ignore + assert isinstance(mstr, str) + else: + if isinstance(magnitude, ndarray): + # Need to override for scalars, which are detected as iterable, + # and don't respond to printoptions. + if magnitude.ndim == 0: + mstr = format_number(magnitude) + else: + with np.printoptions(formatter={"float_kind": format_number}): + mstr = ( + "
" + format(magnitude).replace("\n", "") + "
" + ) + elif not iterable(magnitude): + # Use plain text for scalars + mstr = format_number(magnitude) + else: + # Use monospace font for other array-likes + mstr = ( + "
"
+                        + format_number(magnitude).replace("\n", "
") + + "
" + ) + + m = _EXP_PATTERN.match(mstr) + _exp_formatter = lambda s: f"{s}" + + if m: + exp = int(m.group(2) + m.group(3)) + mstr = _EXP_PATTERN.sub(r"\1×10" + _exp_formatter(exp), mstr) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + numerator, denominator = prepare_compount_unit( + unit, + uspec, + sort_func=sort_func, + **babel_kwds, + registry=self._registry, + ) + + if babel_kwds.get("locale", None): + length = babel_kwds.get("length") or ("short" if "~" in uspec else "long") + division_fmt = localize_per(length, babel_kwds.get("locale"), "{}/{}") + else: + division_fmt = "{}/{}" + + return formatter( + numerator, + denominator, + as_ratio=True, + single_denominator=True, + product_fmt=r" ", + division_fmt=division_fmt, + power_fmt=r"{}{}", + parentheses_fmt=r"({})", + ) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + if iterable(quantity.magnitude): + # Use HTML table instead of plain text template for array-likes + joint_fstring = ( + "" + "" + "" + "" + "
Magnitude{}
Units{}
" + ) + else: + joint_fstring = "{} {}" + + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds), + ) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + unc_str = format(uncertainty, unc_spec).replace("+/-", " ± ") + + unc_str = re.sub(r"\)e\+0?(\d+)", r")×10\1", unc_str) + unc_str = re.sub(r"\)e-0?(\d+)", r")×10-\1", unc_str) + return unc_str + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = remove_custom_flags(meas_spec) + + joint_fstring = "{} {}" + + return join_unc( + joint_fstring, + "(", + ")", + self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds), + ) diff --git a/pint/delegates/formatter/latex.py b/pint/delegates/formatter/latex.py new file mode 100644 index 000000000..468a65fa4 --- /dev/null +++ b/pint/delegates/formatter/latex.py @@ -0,0 +1,421 @@ +""" + pint.delegates.formatter.latex + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Implements: + - Latex: uses vainilla latex. + - SIunitx: uses latex siunitx package format. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + + +from __future__ import annotations + +import functools +import re +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any + +from ..._typing import Magnitude +from ...compat import Number, Unpack, ndarray +from ._compound_unit_helpers import ( + BabelKwds, + SortFunc, + prepare_compount_unit, +) +from ._format_helpers import ( + FORMATTER, + formatter, + join_mu, + join_unc, + override_locale, +) +from ._spec_helpers import ( + remove_custom_flags, + split_format, +) +from .plain import BaseFormatter + +if TYPE_CHECKING: + from ...facets.measurement import Measurement + from ...facets.plain import MagnitudeT, PlainQuantity, PlainUnit + from ...registry import UnitRegistry + from ...util import ItMatrix + + +def vector_to_latex( + vec: Iterable[Any], fmtfun: FORMATTER | str = "{:.2n}".format +) -> str: + """Format a vector into a latex string.""" + return matrix_to_latex([vec], fmtfun) + + +def matrix_to_latex(matrix: ItMatrix, fmtfun: FORMATTER | str = "{:.2n}".format) -> str: + """Format a matrix into a latex string.""" + + ret: list[str] = [] + + for row in matrix: + ret += [" & ".join(fmtfun(f) for f in row)] + + return r"\begin{pmatrix}%s\end{pmatrix}" % "\\\\ \n".join(ret) + + +def ndarray_to_latex_parts( + ndarr: ndarray, fmtfun: FORMATTER = "{:.2n}".format, dim: tuple[int, ...] = tuple() +) -> list[str]: + """Convert an numpy array into an iterable of elements to be print. + + e.g. + - if the array is 2d, it will return an iterable of rows. + - if the array is 3d, it will return an iterable of matrices. + """ + + if isinstance(fmtfun, str): + fmtfun = fmtfun.format + + if ndarr.ndim == 0: + _ndarr = ndarr.reshape(1) + return [vector_to_latex(_ndarr, fmtfun)] + if ndarr.ndim == 1: + return [vector_to_latex(ndarr, fmtfun)] + if ndarr.ndim == 2: + return [matrix_to_latex(ndarr, fmtfun)] + else: + ret = [] + if ndarr.ndim == 3: + header = ("arr[%s," % ",".join("%d" % d for d in dim)) + "%d,:,:]" + for elno, el in enumerate(ndarr): + ret += [header % elno + " = " + matrix_to_latex(el, fmtfun)] + else: + for elno, el in enumerate(ndarr): + ret += ndarray_to_latex_parts(el, fmtfun, dim + (elno,)) + + return ret + + +def ndarray_to_latex( + ndarr: ndarray, + fmtfun: FORMATTER | str = "{:.2n}".format, + dim: tuple[int, ...] = tuple(), +) -> str: + """Format a numpy array into string.""" + return "\n".join(ndarray_to_latex_parts(ndarr, fmtfun, dim)) + + +def latex_escape(string: str) -> str: + """Prepend characters that have a special meaning in LaTeX with a backslash.""" + return functools.reduce( + lambda s, m: re.sub(m[0], m[1], s), + ( + (r"[\\]", r"\\textbackslash "), + (r"[~]", r"\\textasciitilde "), + (r"[\^]", r"\\textasciicircum "), + (r"([&%$#_{}])", r"\\\1"), + ), + str(string), + ) + + +def siunitx_format_unit( + units: Iterable[tuple[str, Number]], registry: UnitRegistry +) -> str: + """Returns LaTeX code for the unit that can be put into an siunitx command.""" + + def _tothe(power) -> str: + if power == int(power): + if power == 1: + return "" + elif power == 2: + return r"\squared" + elif power == 3: + return r"\cubed" + else: + return rf"\tothe{{{int(power):d}}}" + else: + # limit float powers to 3 decimal places + return rf"\tothe{{{power:.3f}}}".rstrip("0") + + lpos = [] + lneg = [] + # loop through all units in the container + for unit, power in sorted(units): + # remove unit prefix if it exists + # siunitx supports \prefix commands + + lpick = lpos if power >= 0 else lneg + prefix = None + # TODO: fix this to be fore efficient and detect also aliases. + for p in registry._prefixes.values(): + p = str(p.name) + if len(p) > 0 and unit.find(p) == 0: + prefix = p + unit = unit.replace(prefix, "", 1) + + if power < 0: + lpick.append(r"\per") + if prefix is not None: + lpick.append(rf"\{prefix}") + lpick.append(rf"\{unit}") + lpick.append(rf"{_tothe(abs(power))}") + + return "".join(lpos) + "".join(lneg) + + +_EXP_PATTERN = re.compile(r"([0-9]\.?[0-9]*)e(-?)\+?0*([0-9]+)") + + +class LatexFormatter(BaseFormatter): + """Latex localizable text formatter.""" + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + with override_locale(mspec, babel_kwds.get("locale", None)) as format_number: + if isinstance(magnitude, ndarray): + mstr = ndarray_to_latex(magnitude, mspec) + else: + mstr = format_number(magnitude) + + mstr = _EXP_PATTERN.sub(r"\1\\times 10^{\2\3}", mstr) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + numerator, denominator = prepare_compount_unit( + unit, + uspec, + sort_func=sort_func, + **babel_kwds, + registry=self._registry, + ) + + numerator = ((rf"\mathrm{{{latex_escape(u)}}}", p) for u, p in numerator) + denominator = ((rf"\mathrm{{{latex_escape(u)}}}", p) for u, p in denominator) + + # Localized latex + # if babel_kwds.get("locale", None): + # length = babel_kwds.get("length") or ("short" if "~" in uspec else "long") + # division_fmt = localize_per(length, babel_kwds.get("locale"), "{}/{}") + # else: + # division_fmt = "{}/{}" + + # division_fmt = r"\frac" + division_fmt.format("[{}]", "[{}]") + + formatted = formatter( + numerator, + denominator, + as_ratio=True, + single_denominator=True, + product_fmt=r" \cdot ", + division_fmt=r"\frac[{}][{}]", + power_fmt="{}^[{}]", + parentheses_fmt=r"\left({}\right)", + ) + + return formatted.replace("[", "{").replace("]", "}") + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + joint_fstring = r"{}\ {}" + + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds), + ) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + # uncertainties handles everythin related to latex. + unc_str = format(uncertainty, unc_spec) + + if unc_str.startswith(r"\left"): + return unc_str + + return unc_str.replace("(", r"\left(").replace(")", r"\right)") + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = remove_custom_flags(meas_spec) + + # TODO: ugly. uncertainties recognizes L + if "L" not in unc_spec: + unc_spec += "L" + + joint_fstring = r"{}\ {}" + + return join_unc( + joint_fstring, + r"\left(", + r"\right)", + self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds), + ) + + +class SIunitxFormatter(BaseFormatter): + """Latex localizable text formatter with siunitx format. + + See: https://ctan.org/pkg/siunitx + """ + + def format_magnitude( + self, + magnitude: Magnitude, + mspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + with override_locale(mspec, babel_kwds.get("locale", None)) as format_number: + if isinstance(magnitude, ndarray): + mstr = ndarray_to_latex(magnitude, mspec) + else: + mstr = format_number(magnitude) + + # TODO: Why this is not needed in siunitx? + # mstr = _EXP_PATTERN.sub(r"\1\\times 10^{\2\3}", mstr) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + if registry is None: + raise ValueError( + "Can't format as siunitx without a registry." + " This is usually triggered when formatting a instance" + ' of the internal `UnitsContainer` with a spec of `"Lx"`' + " and might indicate a bug in `pint`." + ) + + # TODO: not sure if I should call format_compound_unit here. + # siunitx_format_unit requires certain specific names? + # should unit names be translated? + # should unit names be shortened? + # units = format_compound_unit(unit, uspec, **babel_kwds) + + try: + units = unit._units.items() + except Exception: + units = unit + + formatted = siunitx_format_unit(units, registry) + + if "~" in uspec: + formatted = formatted.replace(r"\percent", r"\%") + + # TODO: is this the right behaviour? Should we return the \si[] when only + # the units are returned? + return rf"\si[]{{{formatted}}}" + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + joint_fstring = "{}{}" + + mstr = self.format_magnitude(quantity.magnitude, mspec, **babel_kwds) + ustr = self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds)[ + len(r"\si[]") : + ] + return r"\SI[]" + join_mu(joint_fstring, "{%s}" % mstr, ustr) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + # SIunitx requires space between "+-" (or "\pm") and the nominal value + # and uncertainty, and doesn't accept "+/-" + # SIunitx doesn't accept parentheses, which uncs uses with + # scientific notation ('e' or 'E' and sometimes 'g' or 'G'). + return ( + format(uncertainty, unc_spec) + .replace("+/-", r" +- ") + .replace("(", "") + .replace(")", " ") + ) + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = remove_custom_flags(meas_spec) + + joint_fstring = "{}{}" + + return r"\SI" + join_unc( + joint_fstring, + r"", + r"", + "{%s}" + % self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds)[ + len(r"\si[]") : + ], + ) diff --git a/pint/delegates/formatter/plain.py b/pint/delegates/formatter/plain.py new file mode 100644 index 000000000..744cbb402 --- /dev/null +++ b/pint/delegates/formatter/plain.py @@ -0,0 +1,487 @@ +""" + pint.delegates.formatter.plain + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Implements plain text formatters: + - Raw: as simple as it gets (no locale aware, no unit formatter.) + - Default: used when no string spec is given. + - Compact: like default but with less spaces. + - Pretty: pretty printed formatter. + + :copyright: 2022 by Pint Authors, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import annotations + +import itertools +import re +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any + +from ..._typing import Magnitude +from ...compat import Unpack, ndarray, np +from ._compound_unit_helpers import ( + BabelKwds, + SortFunc, + localize_per, + prepare_compount_unit, +) +from ._format_helpers import ( + formatter, + join_mu, + join_unc, + override_locale, + pretty_fmt_exponent, +) +from ._spec_helpers import ( + remove_custom_flags, + split_format, +) + +if TYPE_CHECKING: + from ...facets.measurement import Measurement + from ...facets.plain import MagnitudeT, PlainQuantity, PlainUnit + from ...registry import UnitRegistry + + +_EXP_PATTERN = re.compile(r"([0-9]\.?[0-9]*)e(-?)\+?0*([0-9]+)") + + +class BaseFormatter: + def __init__(self, registry: UnitRegistry | None = None): + self._registry = registry + + +class DefaultFormatter(BaseFormatter): + """Simple, localizable plain text formatter. + + A formatter is a class with methods to format into string each of the objects + that appear in pint (magnitude, unit, quantity, uncertainty, measurement) + """ + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + """Format scalar/array into string + given a string formatting specification and locale related arguments. + """ + with override_locale(mspec, babel_kwds.get("locale", None)) as format_number: + if isinstance(magnitude, ndarray) and magnitude.ndim > 0: + # Use custom ndarray text formatting--need to handle scalars differently + # since they don't respond to printoptions + with np.printoptions(formatter={"float_kind": format_number}): + mstr = format(magnitude).replace("\n", "") + else: + mstr = format_number(magnitude) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + """Format a unit (can be compound) into string + given a string formatting specification and locale related arguments. + """ + + numerator, denominator = prepare_compount_unit( + unit, + uspec, + sort_func=sort_func, + **babel_kwds, + registry=self._registry, + ) + + if babel_kwds.get("locale", None): + length = babel_kwds.get("length") or ("short" if "~" in uspec else "long") + division_fmt = localize_per(length, babel_kwds.get("locale"), "{} / {}") + else: + division_fmt = "{} / {}" + + return formatter( + numerator, + denominator, + as_ratio=True, + single_denominator=False, + product_fmt="{} * {}", + division_fmt=division_fmt, + power_fmt="{} ** {}", + parentheses_fmt=r"({})", + ) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + """Format a quantity (magnitude and unit) into string + given a string formatting specification and locale related arguments. + """ + + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + joint_fstring = "{} {}" + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds), + ) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + """Format an uncertainty magnitude (nominal value and stdev) into string + given a string formatting specification and locale related arguments. + """ + + return format(uncertainty, unc_spec).replace("+/-", " +/- ") + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + """Format an measurement (uncertainty and units) into string + given a string formatting specification and locale related arguments. + """ + + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = remove_custom_flags(meas_spec) + + joint_fstring = "{} {}" + + return join_unc( + joint_fstring, + "(", + ")", + self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds), + ) + + +class CompactFormatter(BaseFormatter): + """Simple, localizable plain text formatter without extra spaces.""" + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + with override_locale(mspec, babel_kwds.get("locale", None)) as format_number: + if isinstance(magnitude, ndarray) and magnitude.ndim > 0: + # Use custom ndarray text formatting--need to handle scalars differently + # since they don't respond to printoptions + with np.printoptions(formatter={"float_kind": format_number}): + mstr = format(magnitude).replace("\n", "") + else: + mstr = format_number(magnitude) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + numerator, denominator = prepare_compount_unit( + unit, + uspec, + sort_func=sort_func, + **babel_kwds, + registry=self._registry, + ) + + # Division format in compact formatter is not localized. + division_fmt = "{}/{}" + + return formatter( + numerator, + denominator, + as_ratio=True, + single_denominator=False, + product_fmt="*", # TODO: Should this just be ''? + division_fmt=division_fmt, + power_fmt="{}**{}", + parentheses_fmt=r"({})", + ) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + joint_fstring = "{} {}" + + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds), + ) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + return format(uncertainty, unc_spec).replace("+/-", "+/-") + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = remove_custom_flags(meas_spec) + + joint_fstring = "{} {}" + + return join_unc( + joint_fstring, + "(", + ")", + self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds), + ) + + +class PrettyFormatter(BaseFormatter): + """Pretty printed localizable plain text formatter without extra spaces.""" + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + with override_locale(mspec, babel_kwds.get("locale", None)) as format_number: + if isinstance(magnitude, ndarray) and magnitude.ndim > 0: + # Use custom ndarray text formatting--need to handle scalars differently + # since they don't respond to printoptions + with np.printoptions(formatter={"float_kind": format_number}): + mstr = format(magnitude).replace("\n", "") + else: + mstr = format_number(magnitude) + + m = _EXP_PATTERN.match(mstr) + + if m: + exp = int(m.group(2) + m.group(3)) + mstr = _EXP_PATTERN.sub(r"\1×10" + pretty_fmt_exponent(exp), mstr) + + return mstr + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + numerator, denominator = prepare_compount_unit( + unit, + uspec, + sort_func=sort_func, + **babel_kwds, + registry=self._registry, + ) + + if babel_kwds.get("locale", None): + length = babel_kwds.get("length") or ("short" if "~" in uspec else "long") + division_fmt = localize_per(length, babel_kwds.get("locale"), "{}/{}") + else: + division_fmt = "{}/{}" + + return formatter( + numerator, + denominator, + as_ratio=True, + single_denominator=False, + product_fmt="·", + division_fmt=division_fmt, + power_fmt="{}{}", + parentheses_fmt="({})", + exp_call=pretty_fmt_exponent, + ) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + joint_fstring = "{} {}" + + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds), + ) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + return format(uncertainty, unc_spec).replace("±", " ± ") + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = meas_spec + joint_fstring = "{} {}" + + return join_unc( + joint_fstring, + "(", + ")", + self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds), + ) + + +class RawFormatter(BaseFormatter): + """Very simple non-localizable plain text formatter. + + Ignores all pint custom string formatting specification. + """ + + def format_magnitude( + self, magnitude: Magnitude, mspec: str = "", **babel_kwds: Unpack[BabelKwds] + ) -> str: + return str(magnitude) + + def format_unit( + self, + unit: PlainUnit | Iterable[tuple[str, Any]], + uspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + numerator, denominator = prepare_compount_unit( + unit, + uspec, + sort_func=sort_func, + **babel_kwds, + registry=self._registry, + ) + + return " * ".join( + k if v == 1 else f"{k} ** {v}" + for k, v in itertools.chain(numerator, denominator) + ) + + def format_quantity( + self, + quantity: PlainQuantity[MagnitudeT], + qspec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + qspec, registry.formatter.default_format, registry.separate_format_defaults + ) + + joint_fstring = "{} {}" + return join_mu( + joint_fstring, + self.format_magnitude(quantity.magnitude, mspec, **babel_kwds), + self.format_unit(quantity.unit_items(), uspec, sort_func, **babel_kwds), + ) + + def format_uncertainty( + self, + uncertainty, + unc_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + return format(uncertainty, unc_spec) + + def format_measurement( + self, + measurement: Measurement, + meas_spec: str = "", + sort_func: SortFunc | None = None, + **babel_kwds: Unpack[BabelKwds], + ) -> str: + registry = self._registry + + mspec, uspec = split_format( + meas_spec, + registry.formatter.default_format, + registry.separate_format_defaults, + ) + + unc_spec = remove_custom_flags(meas_spec) + + joint_fstring = "{} {}" + + return join_unc( + joint_fstring, + "(", + ")", + self.format_uncertainty(measurement.magnitude, unc_spec, **babel_kwds), + self.format_unit(measurement.units, uspec, sort_func, **babel_kwds), + ) diff --git a/pint/delegates/txt_defparser/__init__.py b/pint/delegates/txt_defparser/__init__.py index 5572ca12c..ba0dbbf65 100644 --- a/pint/delegates/txt_defparser/__init__.py +++ b/pint/delegates/txt_defparser/__init__.py @@ -7,8 +7,10 @@ :copyright: 2022 by Pint Authors, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ - +from __future__ import annotations from .defparser import DefParser -__all__ = [DefParser] +__all__ = [ + "DefParser", +] diff --git a/pint/delegates/txt_defparser/block.py b/pint/delegates/txt_defparser/block.py index 20ebcbac2..6e8d18968 100644 --- a/pint/delegates/txt_defparser/block.py +++ b/pint/delegates/txt_defparser/block.py @@ -16,30 +16,39 @@ from __future__ import annotations from dataclasses import dataclass +from typing import Generic, TypeVar -from ..._vendor import flexparser as fp +import flexparser as fp + +from ..base_defparser import ParserConfig, PintParsedStatement @dataclass(frozen=True) -class EndDirectiveBlock(fp.ParsedStatement): +class EndDirectiveBlock(PintParsedStatement): """An EndDirectiveBlock is simply an "@end" statement.""" @classmethod - def from_string(cls, s: str) -> fp.FromString[EndDirectiveBlock]: + def from_string(cls, s: str) -> fp.NullableParsedResult[EndDirectiveBlock]: if s == "@end": return cls() return None +OPST = TypeVar("OPST", bound="PintParsedStatement") +IPST = TypeVar("IPST", bound="PintParsedStatement") + +DefT = TypeVar("DefT") + + @dataclass(frozen=True) -class DirectiveBlock(fp.Block): +class DirectiveBlock( + Generic[DefT, OPST, IPST], fp.Block[OPST, IPST, EndDirectiveBlock, ParserConfig] +): """Directive blocks have beginning statement starting with a @ character. and ending with a "@end" (captured using a EndDirectiveBlock). Subclass this class for convenience. """ - closing: EndDirectiveBlock - - def derive_definition(self): - pass + def derive_definition(self) -> DefT: + ... diff --git a/pint/delegates/txt_defparser/common.py b/pint/delegates/txt_defparser/common.py index 961f111b6..def901d88 100644 --- a/pint/delegates/txt_defparser/common.py +++ b/pint/delegates/txt_defparser/common.py @@ -12,13 +12,14 @@ from __future__ import annotations -from dataclasses import dataclass, field +from dataclasses import dataclass + +import flexparser as fp from ... import errors -from ..._vendor import flexparser as fp +from ..base_defparser import ParserConfig -@dataclass(frozen=True) class DefinitionSyntaxError(errors.DefinitionSyntaxError, fp.ParsingError): """A syntax error was found in a definition. Combines: @@ -28,9 +29,13 @@ class DefinitionSyntaxError(errors.DefinitionSyntaxError, fp.ParsingError): and an extra location attribute in which the filename or reseource is stored. """ - location: str = field(init=False, default="") + msg: str - def __str__(self): + def __init__(self, msg: str, location: str = ""): + self.msg = msg + self.location = location + + def __str__(self) -> str: msg = ( self.msg + "\n " + (self.format_position or "") + " " + (self.raw or "") ) @@ -38,21 +43,20 @@ def __str__(self): msg += "\n " + self.location return msg - def set_location(self, value): + def set_location(self, value: str) -> None: super().__setattr__("location", value) @dataclass(frozen=True) -class ImportDefinition(fp.IncludeStatement): - +class ImportDefinition(fp.IncludeStatement[ParserConfig]): value: str @property - def target(self): + def target(self) -> str: return self.value @classmethod - def from_string(cls, s: str) -> fp.FromString[ImportDefinition]: + def from_string(cls, s: str) -> fp.NullableParsedResult[ImportDefinition]: if s.startswith("@import"): return ImportDefinition(s[len("@import") :].strip()) return None diff --git a/pint/delegates/txt_defparser/context.py b/pint/delegates/txt_defparser/context.py index 5c54b4c43..029b60445 100644 --- a/pint/delegates/txt_defparser/context.py +++ b/pint/delegates/txt_defparser/context.py @@ -20,36 +20,37 @@ import re import typing as ty from dataclasses import dataclass -from typing import Dict, Tuple +from typing import Union + +import flexparser as fp -from ..._vendor import flexparser as fp from ...facets.context import definitions -from ..base_defparser import ParserConfig +from ..base_defparser import ParserConfig, PintParsedStatement from . import block, common, plain +# TODO check syntax +T = ty.TypeVar("T", bound="Union[ForwardRelation, BidirectionalRelation]") -@dataclass(frozen=True) -class Relation(definitions.Relation): - @classmethod - def _from_string_and_context_sep( - cls, s: str, config: ParserConfig, separator: str - ) -> fp.FromString[Relation]: - if separator not in s: - return None - if ":" not in s: - return None - rel, eq = s.split(":") +def _from_string_and_context_sep( + cls: type[T], s: str, config: ParserConfig, separator: str +) -> T | None: + if separator not in s: + return None + if ":" not in s: + return None - parts = rel.split(separator) + rel, eq = s.split(":") - src, dst = (config.to_dimension_container(s) for s in parts) + parts = rel.split(separator) - return cls(src, dst, eq.strip()) + src, dst = (config.to_dimension_container(s) for s in parts) + + return cls(src, dst, eq.strip()) @dataclass(frozen=True) -class ForwardRelation(fp.ParsedStatement, definitions.ForwardRelation, Relation): +class ForwardRelation(PintParsedStatement, definitions.ForwardRelation): """A relation connecting a dimension to another via a transformation function. -> : @@ -58,14 +59,12 @@ class ForwardRelation(fp.ParsedStatement, definitions.ForwardRelation, Relation) @classmethod def from_string_and_config( cls, s: str, config: ParserConfig - ) -> fp.FromString[ForwardRelation]: - return super()._from_string_and_context_sep(s, config, "->") + ) -> fp.NullableParsedResult[ForwardRelation]: + return _from_string_and_context_sep(cls, s, config, "->") @dataclass(frozen=True) -class BidirectionalRelation( - fp.ParsedStatement, definitions.BidirectionalRelation, Relation -): +class BidirectionalRelation(PintParsedStatement, definitions.BidirectionalRelation): """A bidirectional relation connecting a dimension to another via a simple transformation function. @@ -76,12 +75,12 @@ class BidirectionalRelation( @classmethod def from_string_and_config( cls, s: str, config: ParserConfig - ) -> fp.FromString[BidirectionalRelation]: - return super()._from_string_and_context_sep(s, config, "<->") + ) -> fp.NullableParsedResult[BidirectionalRelation]: + return _from_string_and_context_sep(cls, s, config, "<->") @dataclass(frozen=True) -class BeginContext(fp.ParsedStatement): +class BeginContext(PintParsedStatement): """Being of a context directive. @context[(defaults)] [= ] [= ] @@ -92,13 +91,13 @@ class BeginContext(fp.ParsedStatement): ) name: str - aliases: Tuple[str, ...] - defaults: Dict[str, numbers.Number] + aliases: tuple[str, ...] + defaults: dict[str, numbers.Number] @classmethod def from_string_and_config( cls, s: str, config: ParserConfig - ) -> fp.FromString[BeginContext]: + ) -> fp.NullableParsedResult[BeginContext]: try: r = cls._header_re.search(s) if r is None: @@ -131,7 +130,18 @@ def from_string_and_config( @dataclass(frozen=True) -class ContextDefinition(block.DirectiveBlock): +class ContextDefinition( + block.DirectiveBlock[ + definitions.ContextDefinition, + BeginContext, + ty.Union[ + plain.CommentDefinition, + BidirectionalRelation, + ForwardRelation, + plain.UnitDefinition, + ], + ] +): """Definition of a Context @context[(defaults)] [= ] [= ] @@ -160,37 +170,34 @@ class ContextDefinition(block.DirectiveBlock): @end """ - opening: fp.Single[BeginContext] - body: fp.Multi[ - ty.Union[ - plain.CommentDefinition, - BidirectionalRelation, - ForwardRelation, - plain.UnitDefinition, - ] - ] - - def derive_definition(self): + def derive_definition(self) -> definitions.ContextDefinition: return definitions.ContextDefinition( self.name, self.aliases, self.defaults, self.relations, self.redefinitions ) @property - def name(self): + def name(self) -> str: + assert isinstance(self.opening, BeginContext) return self.opening.name @property - def aliases(self): + def aliases(self) -> tuple[str, ...]: + assert isinstance(self.opening, BeginContext) return self.opening.aliases @property - def defaults(self): + def defaults(self) -> dict[str, numbers.Number]: + assert isinstance(self.opening, BeginContext) return self.opening.defaults @property - def relations(self): - return tuple(r for r in self.body if isinstance(r, Relation)) + def relations(self) -> tuple[BidirectionalRelation | ForwardRelation, ...]: + return tuple( + r + for r in self.body + if isinstance(r, (ForwardRelation, BidirectionalRelation)) + ) @property - def redefinitions(self): + def redefinitions(self) -> tuple[plain.UnitDefinition, ...]: return tuple(r for r in self.body if isinstance(r, plain.UnitDefinition)) diff --git a/pint/delegates/txt_defparser/defaults.py b/pint/delegates/txt_defparser/defaults.py index af6e31f29..669daddb4 100644 --- a/pint/delegates/txt_defparser/defaults.py +++ b/pint/delegates/txt_defparser/defaults.py @@ -16,27 +16,38 @@ import typing as ty from dataclasses import dataclass, fields -from ..._vendor import flexparser as fp +import flexparser as fp + from ...facets.plain import definitions +from ..base_defparser import PintParsedStatement from . import block, plain @dataclass(frozen=True) -class BeginDefaults(fp.ParsedStatement): +class BeginDefaults(PintParsedStatement): """Being of a defaults directive. @defaults """ @classmethod - def from_string(cls, s: str) -> fp.FromString[BeginDefaults]: + def from_string(cls, s: str) -> fp.NullableParsedResult[BeginDefaults]: if s.strip() == "@defaults": return cls() return None @dataclass(frozen=True) -class DefaultsDefinition(block.DirectiveBlock): +class DefaultsDefinition( + block.DirectiveBlock[ + definitions.DefaultsDefinition, + BeginDefaults, + ty.Union[ + plain.CommentDefinition, + plain.Equality, + ], + ] +): """Directive to store values. @defaults @@ -46,19 +57,11 @@ class DefaultsDefinition(block.DirectiveBlock): See Equality and Comment for more parsing related information. """ - opening: fp.Single[BeginDefaults] - body: fp.Multi[ - ty.Union[ - plain.CommentDefinition, - plain.Equality, - ] - ] - @property - def _valid_fields(self): + def _valid_fields(self) -> tuple[str, ...]: return tuple(f.name for f in fields(definitions.DefaultsDefinition)) - def derive_definition(self): + def derive_definition(self) -> definitions.DefaultsDefinition: for definition in self.filter_by(plain.Equality): if definition.lhs not in self._valid_fields: raise ValueError( @@ -70,7 +73,7 @@ def derive_definition(self): *tuple(self.get_key(key) for key in self._valid_fields) ) - def get_key(self, key): + def get_key(self, key: str) -> str: for stmt in self.body: if isinstance(stmt, plain.Equality) and stmt.lhs == key: return stmt.rhs diff --git a/pint/delegates/txt_defparser/defparser.py b/pint/delegates/txt_defparser/defparser.py index 6112690e1..8c57ac306 100644 --- a/pint/delegates/txt_defparser/defparser.py +++ b/pint/delegates/txt_defparser/defparser.py @@ -3,15 +3,15 @@ import pathlib import typing as ty -from ..._vendor import flexcache as fc -from ..._vendor import flexparser as fp -from .. import base_defparser -from . import block, common, context, defaults, group, plain, system +import flexcache as fc +import flexparser as fp +from ..base_defparser import ParserConfig +from . import block, common, context, defaults, group, plain, system -class PintRootBlock(fp.RootBlock): - body: fp.Multi[ +class PintRootBlock( + fp.RootBlock[ ty.Union[ plain.CommentDefinition, common.ImportDefinition, @@ -24,15 +24,14 @@ class PintRootBlock(fp.RootBlock): plain.DimensionDefinition, plain.PrefixDefinition, plain.UnitDefinition, - ] + ], + ParserConfig, ] - - -class HashTuple(tuple): +): pass -class _PintParser(fp.Parser): +class _PintParser(fp.Parser[PintRootBlock, ParserConfig]): """Parser for the original Pint definition file, with cache.""" _delimiters = { @@ -45,40 +44,56 @@ class _PintParser(fp.Parser): _root_block_class = PintRootBlock _strip_spaces = True - _diskcache: fc.DiskCache + _diskcache: fc.DiskCache | None - def __init__(self, config: base_defparser.ParserConfig, *args, **kwargs): + def __init__(self, config: ParserConfig, *args: ty.Any, **kwargs: ty.Any): self._diskcache = kwargs.pop("diskcache", None) super().__init__(config, *args, **kwargs) - def parse_file(self, path: pathlib.Path) -> fp.ParsedSource: + def parse_file( + self, path: pathlib.Path + ) -> fp.ParsedSource[PintRootBlock, ParserConfig]: if self._diskcache is None: return super().parse_file(path) - content, basename = self._diskcache.load(path, super().parse_file) + content, _basename = self._diskcache.load(path, super().parse_file) return content class DefParser: - - skip_classes = (fp.BOF, fp.BOR, fp.BOS, fp.EOS, plain.CommentDefinition) - - def __init__(self, default_config, diskcache): + skip_classes: tuple[type, ...] = ( + fp.BOF, + fp.BOR, + fp.BOS, + fp.EOS, + plain.CommentDefinition, + ) + + def __init__(self, default_config: ParserConfig, diskcache: fc.DiskCache): self._default_config = default_config self._diskcache = diskcache - def iter_parsed_project(self, parsed_project: fp.ParsedProject): + def iter_parsed_project( + self, parsed_project: fp.ParsedProject[PintRootBlock, ParserConfig] + ) -> ty.Generator[fp.ParsedStatement[ParserConfig], None, None]: last_location = None for stmt in parsed_project.iter_blocks(): - if isinstance(stmt, fp.BOF): - last_location = str(stmt.path) - elif isinstance(stmt, fp.BOR): - last_location = ( - f"[package: {stmt.package}, resource: {stmt.resource_name}]" - ) + if isinstance(stmt, fp.BOS): + if isinstance(stmt, fp.BOF): + last_location = str(stmt.path) + continue + elif isinstance(stmt, fp.BOR): + last_location = ( + f"[package: {stmt.package}, resource: {stmt.resource_name}]" + ) + continue + else: + last_location = "orphan string" + continue if isinstance(stmt, self.skip_classes): continue + assert isinstance(last_location, str) if isinstance(stmt, common.DefinitionSyntaxError): stmt.set_location(last_location) raise stmt @@ -103,18 +118,26 @@ def iter_parsed_project(self, parsed_project: fp.ParsedProject): else: yield stmt - def parse_file(self, filename: pathlib.Path, cfg=None): + def parse_file( + self, filename: pathlib.Path | str, cfg: ParserConfig | None = None + ) -> fp.ParsedProject[PintRootBlock, ParserConfig]: return fp.parse( filename, _PintParser, cfg or self._default_config, diskcache=self._diskcache, + strip_spaces=True, + delimiters=_PintParser._delimiters, ) - def parse_string(self, content: str, cfg=None): + def parse_string( + self, content: str, cfg: ParserConfig | None = None + ) -> fp.ParsedProject[PintRootBlock, ParserConfig]: return fp.parse_bytes( content.encode("utf-8"), _PintParser, cfg or self._default_config, diskcache=self._diskcache, + strip_spaces=True, + delimiters=_PintParser._delimiters, ) diff --git a/pint/delegates/txt_defparser/group.py b/pint/delegates/txt_defparser/group.py index 5be42ac24..120438a83 100644 --- a/pint/delegates/txt_defparser/group.py +++ b/pint/delegates/txt_defparser/group.py @@ -20,13 +20,15 @@ import typing as ty from dataclasses import dataclass -from ..._vendor import flexparser as fp +import flexparser as fp + from ...facets.group import definitions +from ..base_defparser import PintParsedStatement from . import block, common, plain @dataclass(frozen=True) -class BeginGroup(fp.ParsedStatement): +class BeginGroup(PintParsedStatement): """Being of a group directive. @group [using , ..., ] @@ -39,7 +41,7 @@ class BeginGroup(fp.ParsedStatement): using_group_names: ty.Tuple[str, ...] @classmethod - def from_string(cls, s: str) -> fp.FromString[BeginGroup]: + def from_string(cls, s: str) -> fp.NullableParsedResult[BeginGroup]: if not s.startswith("@group"): return None @@ -59,7 +61,16 @@ def from_string(cls, s: str) -> fp.FromString[BeginGroup]: @dataclass(frozen=True) -class GroupDefinition(block.DirectiveBlock): +class GroupDefinition( + block.DirectiveBlock[ + definitions.GroupDefinition, + BeginGroup, + ty.Union[ + plain.CommentDefinition, + plain.UnitDefinition, + ], + ] +): """Definition of a group. @group [using , ..., ] @@ -80,27 +91,21 @@ class GroupDefinition(block.DirectiveBlock): """ - opening: fp.Single[BeginGroup] - body: fp.Multi[ - ty.Union[ - plain.CommentDefinition, - plain.UnitDefinition, - ] - ] - - def derive_definition(self): + def derive_definition(self) -> definitions.GroupDefinition: return definitions.GroupDefinition( self.name, self.using_group_names, self.definitions ) @property - def name(self): + def name(self) -> str: + assert isinstance(self.opening, BeginGroup) return self.opening.name @property - def using_group_names(self): + def using_group_names(self) -> tuple[str, ...]: + assert isinstance(self.opening, BeginGroup) return self.opening.using_group_names @property - def definitions(self) -> ty.Tuple[plain.UnitDefinition, ...]: + def definitions(self) -> tuple[plain.UnitDefinition, ...]: return tuple(el for el in self.body if isinstance(el, plain.UnitDefinition)) diff --git a/pint/delegates/txt_defparser/plain.py b/pint/delegates/txt_defparser/plain.py index 428df105f..ac4230bcb 100644 --- a/pint/delegates/txt_defparser/plain.py +++ b/pint/delegates/txt_defparser/plain.py @@ -25,23 +25,24 @@ from dataclasses import dataclass -from ..._vendor import flexparser as fp +import flexparser as fp + from ...converters import Converter from ...facets.plain import definitions from ...util import UnitsContainer -from ..base_defparser import ParserConfig +from ..base_defparser import ParserConfig, PintParsedStatement from . import common @dataclass(frozen=True) -class Equality(fp.ParsedStatement, definitions.Equality): +class Equality(PintParsedStatement, definitions.Equality): """An equality statement contains a left and right hand separated lhs and rhs should be space stripped. """ @classmethod - def from_string(cls, s: str) -> fp.FromString[Equality]: + def from_string(cls, s: str) -> fp.NullableParsedResult[Equality]: if "=" not in s: return None parts = [p.strip() for p in s.split("=")] @@ -53,7 +54,7 @@ def from_string(cls, s: str) -> fp.FromString[Equality]: @dataclass(frozen=True) -class CommentDefinition(fp.ParsedStatement, definitions.CommentDefinition): +class CommentDefinition(PintParsedStatement, definitions.CommentDefinition): """Comments start with a # character. # This is a comment. @@ -63,14 +64,14 @@ class CommentDefinition(fp.ParsedStatement, definitions.CommentDefinition): """ @classmethod - def from_string(cls, s: str) -> fp.FromString[fp.ParsedStatement]: + def from_string(cls, s: str) -> fp.NullableParsedResult[CommentDefinition]: if not s.startswith("#"): return None return cls(s[1:].strip()) @dataclass(frozen=True) -class PrefixDefinition(fp.ParsedStatement, definitions.PrefixDefinition): +class PrefixDefinition(PintParsedStatement, definitions.PrefixDefinition): """Definition of a prefix:: - = [= ] [= ] [ = ] [...] @@ -83,7 +84,7 @@ class PrefixDefinition(fp.ParsedStatement, definitions.PrefixDefinition): @classmethod def from_string_and_config( cls, s: str, config: ParserConfig - ) -> fp.FromString[PrefixDefinition]: + ) -> fp.NullableParsedResult[PrefixDefinition]: if "=" not in s: return None @@ -119,7 +120,7 @@ def from_string_and_config( @dataclass(frozen=True) -class UnitDefinition(fp.ParsedStatement, definitions.UnitDefinition): +class UnitDefinition(PintParsedStatement, definitions.UnitDefinition): """Definition of a unit:: = [= ] [= ] [ = ] [...] @@ -140,7 +141,7 @@ class UnitDefinition(fp.ParsedStatement, definitions.UnitDefinition): @classmethod def from_string_and_config( cls, s: str, config: ParserConfig - ) -> fp.FromString[UnitDefinition]: + ) -> fp.NullableParsedResult[UnitDefinition]: if "=" not in s: return None @@ -159,10 +160,10 @@ def from_string_and_config( [converter, modifiers] = value.split(";", 1) try: - modifiers = dict( - (key.strip(), config.to_number(value)) + modifiers = { + key.strip(): config.to_number(value) for key, value in (part.split(":") for part in modifiers.split(";")) - ) + } except definitions.NotNumeric as ex: return common.DefinitionSyntaxError( f"Unit definition ('{name}') must contain only numbers in modifier, not {ex.value}" @@ -194,7 +195,7 @@ def from_string_and_config( @dataclass(frozen=True) -class DimensionDefinition(fp.ParsedStatement, definitions.DimensionDefinition): +class DimensionDefinition(PintParsedStatement, definitions.DimensionDefinition): """Definition of a root dimension:: [dimension name] @@ -205,23 +206,18 @@ class DimensionDefinition(fp.ParsedStatement, definitions.DimensionDefinition): """ @classmethod - def from_string(cls, s: str) -> fp.FromString[DimensionDefinition]: + def from_string(cls, s: str) -> fp.NullableParsedResult[DimensionDefinition]: s = s.strip() if not (s.startswith("[") and "=" not in s): return None - try: - s = definitions.check_dim(s) - except common.DefinitionSyntaxError as ex: - return ex - return cls(s) @dataclass(frozen=True) class DerivedDimensionDefinition( - fp.ParsedStatement, definitions.DerivedDimensionDefinition + PintParsedStatement, definitions.DerivedDimensionDefinition ): """Definition of a derived dimension:: @@ -235,7 +231,7 @@ class DerivedDimensionDefinition( @classmethod def from_string_and_config( cls, s: str, config: ParserConfig - ) -> fp.FromString[DerivedDimensionDefinition]: + ) -> fp.NullableParsedResult[DerivedDimensionDefinition]: if not (s.startswith("[") and "=" in s): return None @@ -261,7 +257,7 @@ def from_string_and_config( @dataclass(frozen=True) -class AliasDefinition(fp.ParsedStatement, definitions.AliasDefinition): +class AliasDefinition(PintParsedStatement, definitions.AliasDefinition): """Additional alias(es) for an already existing unit:: @alias = [ = ] [...] @@ -272,7 +268,7 @@ class AliasDefinition(fp.ParsedStatement, definitions.AliasDefinition): """ @classmethod - def from_string(cls, s: str) -> fp.FromString[AliasDefinition]: + def from_string(cls, s: str) -> fp.NullableParsedResult[AliasDefinition]: if not s.startswith("@alias "): return None name, *aliases = s[len("@alias ") :].split("=") diff --git a/pint/delegates/txt_defparser/system.py b/pint/delegates/txt_defparser/system.py index b21fd7a1d..8c45b0b0b 100644 --- a/pint/delegates/txt_defparser/system.py +++ b/pint/delegates/txt_defparser/system.py @@ -12,15 +12,17 @@ import typing as ty from dataclasses import dataclass -from ..._vendor import flexparser as fp +import flexparser as fp + from ...facets.system import definitions +from ..base_defparser import PintParsedStatement from . import block, common, plain @dataclass(frozen=True) -class BaseUnitRule(fp.ParsedStatement, definitions.BaseUnitRule): +class BaseUnitRule(PintParsedStatement, definitions.BaseUnitRule): @classmethod - def from_string(cls, s: str) -> fp.FromString[BaseUnitRule]: + def from_string(cls, s: str) -> fp.NullableParsedResult[BaseUnitRule]: if ":" not in s: return cls(s.strip()) parts = [p.strip() for p in s.split(":")] @@ -32,7 +34,7 @@ def from_string(cls, s: str) -> fp.FromString[BaseUnitRule]: @dataclass(frozen=True) -class BeginSystem(fp.ParsedStatement): +class BeginSystem(PintParsedStatement): """Being of a system directive. @system [using , ..., ] @@ -45,7 +47,7 @@ class BeginSystem(fp.ParsedStatement): using_group_names: ty.Tuple[str, ...] @classmethod - def from_string(cls, s: str) -> fp.FromString[BeginSystem]: + def from_string(cls, s: str) -> fp.NullableParsedResult[BeginSystem]: if not s.startswith("@system"): return None @@ -67,7 +69,13 @@ def from_string(cls, s: str) -> fp.FromString[BeginSystem]: @dataclass(frozen=True) -class SystemDefinition(block.DirectiveBlock): +class SystemDefinition( + block.DirectiveBlock[ + definitions.SystemDefinition, + BeginSystem, + ty.Union[plain.CommentDefinition, BaseUnitRule], + ] +): """Definition of a System: @system [using , ..., ] @@ -89,22 +97,21 @@ class SystemDefinition(block.DirectiveBlock): If the new_unit_name and the old_unit_name, the later and the colon can be omitted. """ - opening: fp.Single[BeginSystem] - body: fp.Multi[ty.Union[plain.CommentDefinition, BaseUnitRule]] - - def derive_definition(self): + def derive_definition(self) -> definitions.SystemDefinition: return definitions.SystemDefinition( self.name, self.using_group_names, self.rules ) @property - def name(self): + def name(self) -> str: + assert isinstance(self.opening, BeginSystem) return self.opening.name @property - def using_group_names(self): + def using_group_names(self) -> tuple[str, ...]: + assert isinstance(self.opening, BeginSystem) return self.opening.using_group_names @property - def rules(self): + def rules(self) -> tuple[BaseUnitRule, ...]: return tuple(el for el in self.body if isinstance(el, BaseUnitRule)) diff --git a/pint/errors.py b/pint/errors.py index 0cd35907d..d1882dbdd 100644 --- a/pint/errors.py +++ b/pint/errors.py @@ -11,10 +11,9 @@ from __future__ import annotations import typing as ty -from dataclasses import dataclass, fields -OFFSET_ERROR_DOCS_HTML = "https://pint.readthedocs.io/en/latest/nonmult.html" -LOG_ERROR_DOCS_HTML = "https://pint.readthedocs.io/en/latest/nonmult.html" +OFFSET_ERROR_DOCS_HTML = "https://pint.readthedocs.io/en/stable/user/nonmult.html" +LOG_ERROR_DOCS_HTML = "https://pint.readthedocs.io/en/stable/user/log_units.html" MSG_INVALID_UNIT_NAME = "is not a valid unit name (must follow Python identifier rules)" MSG_INVALID_UNIT_SYMBOL = "is not a valid unit symbol (must not contain spaces)" @@ -36,18 +35,21 @@ ) -def is_dim(name): +def is_dim(name: str) -> bool: + """Return True if the name is flanked by square brackets `[` and `]`.""" return name[0] == "[" and name[-1] == "]" -def is_valid_prefix_name(name): +def is_valid_prefix_name(name: str) -> bool: + """Return True if the name is a valid python identifier or empty.""" return str.isidentifier(name) or name == "" is_valid_unit_name = is_valid_system_name = is_valid_context_name = str.isidentifier -def _no_space(name): +def _no_space(name: str) -> bool: + """Return False if the name contains a space in any position.""" return name.strip() == name and " " not in name @@ -58,7 +60,14 @@ def _no_space(name): ) = is_valid_unit_symbol = is_valid_prefix_symbol = _no_space -def is_valid_dimension_name(name): +def is_valid_dimension_name(name: str) -> bool: + """Return True if the name is consistent with a dimension name. + + - flanked by square brackets. + - empty dimension name or identifier. + """ + + # TODO: shall we check also fro spaces? return name == "[]" or ( len(name) > 1 and is_dim(name) and str.isidentifier(name[1:-1]) ) @@ -67,86 +76,91 @@ def is_valid_dimension_name(name): class WithDefErr: """Mixing class to make some classes more readable.""" - def def_err(self, msg): - return DefinitionError(self.name, self.__class__.__name__, msg) + def def_err(self, msg: str): + return DefinitionError(self.name, self.__class__, msg) -@dataclass(frozen=False) class PintError(Exception): """Base exception for all Pint errors.""" -@dataclass(frozen=False) class DefinitionError(ValueError, PintError): """Raised when a definition is not properly constructed.""" name: str - definition_type: ty.Type + definition_type: type msg: str + def __init__(self, name: str, definition_type: type, msg: str): + self.name = name + self.definition_type = definition_type + self.msg = msg + def __str__(self): msg = f"Cannot define '{self.name}' ({self.definition_type}): {self.msg}" return msg def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.name, self.definition_type, self.msg) -@dataclass(frozen=False) class DefinitionSyntaxError(ValueError, PintError): """Raised when a textual definition has a syntax error.""" msg: str + def __init__(self, msg: str): + self.msg = msg + def __str__(self): return self.msg def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.msg,) -@dataclass(frozen=False) class RedefinitionError(ValueError, PintError): """Raised when a unit or prefix is redefined.""" name: str - definition_type: ty.Type + definition_type: type + + def __init__(self, name: str, definition_type: type): + self.name = name + self.definition_type = definition_type def __str__(self): msg = f"Cannot redefine '{self.name}' ({self.definition_type})" return msg def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.name, self.definition_type) -@dataclass(frozen=False) class UndefinedUnitError(AttributeError, PintError): """Raised when the units are not defined in the unit registry.""" - unit_names: ty.Union[str, ty.Tuple[str, ...]] + unit_names: tuple[str, ...] + + def __init__(self, unit_names: str | ty.Iterable[str]): + if isinstance(unit_names, str): + self.unit_names = (unit_names,) + else: + self.unit_names = tuple(unit_names) def __str__(self): - if isinstance(self.unit_names, str): - return f"'{self.unit_names}' is not defined in the unit registry" - if ( - isinstance(self.unit_names, (tuple, list, set)) - and len(self.unit_names) == 1 - ): + if len(self.unit_names) == 1: return f"'{tuple(self.unit_names)[0]}' is not defined in the unit registry" return f"{tuple(self.unit_names)} are not defined in the unit registry" def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.unit_names,) -@dataclass(frozen=False) class PintTypeError(TypeError, PintError): - def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + pass -@dataclass(frozen=False) class DimensionalityError(PintTypeError): """Raised when trying to convert between incompatible units.""" @@ -156,6 +170,20 @@ class DimensionalityError(PintTypeError): dim2: str = "" extra_msg: str = "" + def __init__( + self, + units1: ty.Any, + units2: ty.Any, + dim1: str = "", + dim2: str = "", + extra_msg: str = "", + ) -> None: + self.units1 = units1 + self.units2 = units2 + self.dim1 = dim1 + self.dim2 = dim2 + self.extra_msg = extra_msg + def __str__(self): if self.dim1 or self.dim2: dim1 = f" ({self.dim1})" @@ -170,16 +198,25 @@ def __str__(self): ) def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, ( + self.units1, + self.units2, + self.dim1, + self.dim2, + self.extra_msg, + ) -@dataclass(frozen=False) class OffsetUnitCalculusError(PintTypeError): """Raised on ambiguous operations with offset units.""" units1: ty.Any units2: ty.Optional[ty.Any] = None + def __init__(self, units1: ty.Any, units2: ty.Optional[ty.Any] = None) -> None: + self.units1 = units1 + self.units2 = units2 + def yield_units(self): yield self.units1 if self.units2: @@ -195,16 +232,19 @@ def __str__(self): ) def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.units1, self.units2) -@dataclass(frozen=False) class LogarithmicUnitCalculusError(PintTypeError): """Raised on inappropriate operations with logarithmic units.""" units1: ty.Any units2: ty.Optional[ty.Any] = None + def __init__(self, units1: ty.Any, units2: ty.Optional[ty.Any] = None) -> None: + self.units1 = units1 + self.units2 = units2 + def yield_units(self): yield self.units1 if self.units2: @@ -220,19 +260,28 @@ def __str__(self): ) def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.units1, self.units2) -@dataclass(frozen=False) class UnitStrippedWarning(UserWarning, PintError): - msg: str + def __init__(self, msg: str): + self.msg = msg + def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.msg,) -@dataclass(frozen=False) class UnexpectedScaleInContainer(Exception): + pass + + +class UndefinedBehavior(UserWarning, PintError): + msg: str + + def __init__(self, msg: str): + self.msg = msg + def __reduce__(self): - return self.__class__, tuple(getattr(self, f.name) for f in fields(self)) + return self.__class__, (self.msg,) diff --git a/pint/facets/__init__.py b/pint/facets/__init__.py index d669b9ff7..12729289c 100644 --- a/pint/facets/__init__.py +++ b/pint/facets/__init__.py @@ -7,7 +7,7 @@ keeping each part small enough to be hackable. Each facet contains one or more of the following modules: - - definitions: classes describing an specific unit related definiton. + - definitions: classes describing specific unit-related definitons. These objects must be immutable, pickable and not reference the registry (e.g. ContextDefinition) - objects: classes and functions that encapsulate behavior (e.g. Context) - registry: implements a subclass of PlainRegistry or class that can be @@ -30,8 +30,8 @@ class NumpyRegistry: - _quantity_class = NumpyQuantity - _unit_class = NumpyUnit + Quantity = NumpyQuantity + Unit = NumpyUnit This tells pint that it should use NumpyQuantity as base class for a quantity class that belongs to a registry that has NumpyRegistry as one of its bases. @@ -41,8 +41,6 @@ class that belongs to a registry that has NumpyRegistry as one of its bases. - plain: basic manipulation and calculation with multiplicative dimensions, units and quantities (e.g. length, time, mass, etc). - - formatting: pretty printing and formatting modifiers. - - nonmultiplicative: manipulation and calculation with offset and log units and quantities (e.g. temperature and decibel). @@ -71,24 +69,38 @@ class that belongs to a registry that has NumpyRegistry as one of its bases. from __future__ import annotations -from .context import ContextRegistry -from .dask import DaskRegistry -from .formatting import FormattingRegistry -from .group import GroupRegistry -from .measurement import MeasurementRegistry -from .nonmultiplicative import NonMultiplicativeRegistry -from .numpy import NumpyRegistry -from .plain import PlainRegistry -from .system import SystemRegistry +from .context import ContextRegistry, GenericContextRegistry +from .dask import DaskRegistry, GenericDaskRegistry +from .group import GenericGroupRegistry, GroupRegistry +from .measurement import GenericMeasurementRegistry, MeasurementRegistry +from .nonmultiplicative import ( + GenericNonMultiplicativeRegistry, + NonMultiplicativeRegistry, +) +from .numpy import GenericNumpyRegistry, NumpyRegistry +from .plain import GenericPlainRegistry, MagnitudeT, PlainRegistry, QuantityT, UnitT +from .system import GenericSystemRegistry, SystemRegistry __all__ = [ - ContextRegistry, - DaskRegistry, - FormattingRegistry, - GroupRegistry, - MeasurementRegistry, - NonMultiplicativeRegistry, - NumpyRegistry, - PlainRegistry, - SystemRegistry, + "ContextRegistry", + "DaskRegistry", + "FormattingRegistry", + "GroupRegistry", + "MeasurementRegistry", + "NonMultiplicativeRegistry", + "NumpyRegistry", + "PlainRegistry", + "SystemRegistry", + "GenericContextRegistry", + "GenericDaskRegistry", + "GenericFormattingRegistry", + "GenericGroupRegistry", + "GenericMeasurementRegistry", + "GenericNonMultiplicativeRegistry", + "GenericNumpyRegistry", + "GenericPlainRegistry", + "GenericSystemRegistry", + "QuantityT", + "UnitT", + "MagnitudeT", ] diff --git a/pint/facets/context/__init__.py b/pint/facets/context/__init__.py index db2843648..28c7b5ced 100644 --- a/pint/facets/context/__init__.py +++ b/pint/facets/context/__init__.py @@ -13,6 +13,6 @@ from .definitions import ContextDefinition from .objects import Context -from .registry import ContextRegistry +from .registry import ContextRegistry, GenericContextRegistry -__all__ = ["ContextDefinition", "Context", "ContextRegistry"] +__all__ = ["ContextDefinition", "Context", "ContextRegistry", "GenericContextRegistry"] diff --git a/pint/facets/context/definitions.py b/pint/facets/context/definitions.py index a24977b67..76f84d63d 100644 --- a/pint/facets/context/definitions.py +++ b/pint/facets/context/definitions.py @@ -11,14 +11,15 @@ import itertools import numbers import re +from collections.abc import Callable, Iterable from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Dict, Set, Tuple +from typing import TYPE_CHECKING from ... import errors from ..plain import UnitDefinition if TYPE_CHECKING: - from pint import Quantity, UnitsContainer + from ..._typing import Quantity, UnitsContainer @dataclass(frozen=True) @@ -41,12 +42,12 @@ class Relation: # could be used. @property - def variables(self) -> Set[str, ...]: + def variables(self) -> set[str]: """Find all variables names in the equation.""" return set(self._varname_re.findall(self.equation)) @property - def transformation(self) -> Callable[..., Quantity[Any]]: + def transformation(self) -> Callable[..., Quantity]: """Return a transformation callable that uses the registry to parse the transformation equation. """ @@ -55,7 +56,7 @@ def transformation(self) -> Callable[..., Quantity[Any]]: ) @property - def bidirectional(self): + def bidirectional(self) -> bool: raise NotImplementedError @@ -67,7 +68,7 @@ class ForwardRelation(Relation): """ @property - def bidirectional(self): + def bidirectional(self) -> bool: return False @@ -81,7 +82,7 @@ class BidirectionalRelation(Relation): """ @property - def bidirectional(self): + def bidirectional(self) -> bool: return True @@ -92,18 +93,18 @@ class ContextDefinition(errors.WithDefErr): #: name of the context name: str #: other na - aliases: Tuple[str, ...] - defaults: Dict[str, numbers.Number] - relations: Tuple[Relation, ...] - redefinitions: Tuple[UnitDefinition, ...] + aliases: tuple[str, ...] + defaults: dict[str, numbers.Number] + relations: tuple[Relation, ...] + redefinitions: tuple[UnitDefinition, ...] @property - def variables(self) -> Set[str, ...]: + def variables(self) -> set[str]: """Return all variable names in all transformations.""" return set().union(*(r.variables for r in self.relations)) @classmethod - def from_lines(cls, lines, non_int_type): + def from_lines(cls, lines: Iterable[str], non_int_type: type): # TODO: this is to keep it backwards compatible from ...delegates import ParserConfig, txt_defparser diff --git a/pint/facets/context/objects.py b/pint/facets/context/objects.py index 6f2307a26..edd1dfb2a 100644 --- a/pint/facets/context/objects.py +++ b/pint/facets/context/objects.py @@ -10,12 +10,38 @@ import weakref from collections import ChainMap, defaultdict -from typing import Optional, Tuple +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Any, Generic, Protocol -from ...facets.plain import UnitDefinition +from ..._typing import Magnitude +from ...facets.plain import MagnitudeT, PlainQuantity, PlainUnit, UnitDefinition from ...util import UnitsContainer, to_units_container from .definitions import ContextDefinition +if TYPE_CHECKING: + from ...registry import UnitRegistry + + +class Transformation(Protocol): + def __call__( + self, ureg: UnitRegistry, value: PlainQuantity, **kwargs: Any + ) -> PlainQuantity: + ... + + +from ..._typing import UnitLike + +ToBaseFunc = Callable[[UnitsContainer], UnitsContainer] +SrcDst = tuple[UnitsContainer, UnitsContainer] + + +class ContextQuantity(Generic[MagnitudeT], PlainQuantity[MagnitudeT]): + pass + + +class ContextUnit(PlainUnit): + pass + class Context: """A specialized container that defines transformation functions from one @@ -70,32 +96,34 @@ class Context: def __init__( self, - name: Optional[str] = None, - aliases: Tuple[str, ...] = (), - defaults: Optional[dict] = None, + name: str | None = None, + aliases: tuple[str, ...] = tuple(), + defaults: dict[str, Any] | None = None, ) -> None: - - self.name = name - self.aliases = aliases + self.name: str | None = name + self.aliases: tuple[str, ...] = aliases #: Maps (src, dst) -> transformation function - self.funcs = {} + self.funcs: dict[SrcDst, Transformation] = {} #: Maps defaults variable names to values - self.defaults = defaults or {} + self.defaults: dict[str, Any] = defaults or {} # Store Definition objects that are context-specific - self.redefinitions = [] + # TODO: narrow type this if possible. + self.redefinitions: list[Any] = [] # Flag set to True by the Registry the first time the context is enabled self.checked = False #: Maps (src, dst) -> self #: Used as a convenience dictionary to be composed by ContextChain - self.relation_to_context = weakref.WeakValueDictionary() + self.relation_to_context: weakref.WeakValueDictionary[ + SrcDst, Context + ] = weakref.WeakValueDictionary() @classmethod - def from_context(cls, context: Context, **defaults) -> Context: + def from_context(cls, context: Context, **defaults: Any) -> Context: """Creates a new context that shares the funcs dictionary with the original context. The default values are copied from the original context and updated with the new defaults. @@ -124,12 +152,23 @@ def from_context(cls, context: Context, **defaults) -> Context: return context @classmethod - def from_lines(cls, lines, to_base_func=None, non_int_type=float) -> Context: - cd = ContextDefinition.from_lines(lines, non_int_type) - return cls.from_definition(cd, to_base_func) + def from_lines( + cls, + lines: Iterable[str], + to_base_func: ToBaseFunc | None = None, + non_int_type: type = float, + ) -> Context: + context_definition = ContextDefinition.from_lines(lines, non_int_type) + + if context_definition is None: + raise ValueError(f"Could not define Context from from {lines}") + + return cls.from_definition(context_definition, to_base_func) @classmethod - def from_definition(cls, cd: ContextDefinition, to_base_func=None) -> Context: + def from_definition( + cls, cd: ContextDefinition, to_base_func: ToBaseFunc | None = None + ) -> Context: ctx = cls(cd.name, cd.aliases, cd.defaults) for definition in cd.redefinitions: @@ -137,6 +176,7 @@ def from_definition(cls, cd: ContextDefinition, to_base_func=None) -> Context: for relation in cd.relations: try: + # TODO: check to_base_func. Is it a good API idea? if to_base_func: src = to_base_func(relation.src) dst = to_base_func(relation.dst) @@ -152,14 +192,16 @@ def from_definition(cls, cd: ContextDefinition, to_base_func=None) -> Context: return ctx - def add_transformation(self, src, dst, func) -> None: + def add_transformation( + self, src: UnitLike, dst: UnitLike, func: Transformation + ) -> None: """Add a transformation function to the context.""" _key = self.__keytransform__(src, dst) self.funcs[_key] = func self.relation_to_context[_key] = self - def remove_transformation(self, src, dst) -> None: + def remove_transformation(self, src: UnitLike, dst: UnitLike) -> None: """Add a transformation function to the context.""" _key = self.__keytransform__(src, dst) @@ -167,14 +209,17 @@ def remove_transformation(self, src, dst) -> None: del self.relation_to_context[_key] @staticmethod - def __keytransform__(src, dst) -> Tuple[UnitsContainer, UnitsContainer]: + def __keytransform__(src: UnitLike, dst: UnitLike) -> SrcDst: return to_units_container(src), to_units_container(dst) - def transform(self, src, dst, registry, value): + def transform( + self, src: UnitLike, dst: UnitLike, registry: Any, value: Magnitude + ) -> Magnitude: """Transform a value.""" _key = self.__keytransform__(src, dst) - return self.funcs[_key](registry, value, **self.defaults) + func = self.funcs[_key] + return func(registry, value, **self.defaults) def redefine(self, definition: str) -> None: """Override the definition of a unit in the registry. @@ -200,7 +245,13 @@ def _redefine(self, definition: UnitDefinition): def hashable( self, - ) -> Tuple[Optional[str], Tuple[str, ...], frozenset, frozenset, tuple]: + ) -> tuple[ + str | None, + tuple[str, ...], + frozenset[tuple[SrcDst, int]], + frozenset[tuple[str, Any]], + tuple[Any, ...], + ]: """Generate a unique hashable and comparable representation of self, which can be used as a key in a dict. This class cannot define ``__hash__`` because it is mutable, and the Python interpreter does cache the output of ``__hash__``. @@ -218,18 +269,18 @@ def hashable( ) -class ContextChain(ChainMap): +class ContextChain(ChainMap[SrcDst, Context]): """A specialized ChainMap for contexts that simplifies finding rules to transform from one dimension to another. """ def __init__(self): super().__init__() - self.contexts = [] + self.contexts: list[Context] = [] self.maps.clear() # Remove default empty map - self._graph = None + self._graph: dict[SrcDst, set[UnitsContainer]] | None = None - def insert_contexts(self, *contexts): + def insert_contexts(self, *contexts: Context): """Insert one or more contexts in reversed order the chained map. (A rule in last context will take precedence) @@ -241,7 +292,7 @@ def insert_contexts(self, *contexts): self.maps = [ctx.relation_to_context for ctx in reversed(contexts)] + self.maps self._graph = None - def remove_contexts(self, n: int = None): + def remove_contexts(self, n: int | None = None): """Remove the last n inserted contexts from the chain. Parameters @@ -255,7 +306,7 @@ def remove_contexts(self, n: int = None): self._graph = None @property - def defaults(self): + def defaults(self) -> dict[str, Any]: for ctx in self.values(): return ctx.defaults return {} @@ -269,13 +320,16 @@ def graph(self): self._graph[fr_].add(to_) return self._graph - def transform(self, src, dst, registry, value): + # TODO: type registry + def transform( + self, src: UnitsContainer, dst: UnitsContainer, registry: Any, value: Magnitude + ): """Transform the value, finding the rule in the chained context. (A rule in last context will take precedence) """ return self[(src, dst)].transform(src, dst, registry, value) - def hashable(self): + def hashable(self) -> tuple[Any, ...]: """Generate a unique hashable and comparable representation of self, which can be used as a key in a dict. This class cannot define ``__hash__`` because it is mutable, and the Python interpreter does cache the output of ``__hash__``. diff --git a/pint/facets/context/registry.py b/pint/facets/context/registry.py index 2b5629937..c91f289b8 100644 --- a/pint/facets/context/registry.py +++ b/pint/facets/context/registry.py @@ -10,15 +10,17 @@ import functools from collections import ChainMap +from collections.abc import Callable, Generator from contextlib import contextmanager -from typing import Any, Callable, ContextManager, Dict, Union +from typing import Any, Generic -from ..._typing import F +from ..._typing import F, Magnitude +from ...compat import TypeAlias from ...errors import UndefinedUnitError -from ...util import find_connected_nodes, find_shortest_path, logger -from ..plain import PlainRegistry, UnitDefinition +from ...util import UnitsContainer, find_connected_nodes, find_shortest_path, logger +from ..plain import GenericPlainRegistry, QuantityT, UnitDefinition, UnitT +from . import objects from .definitions import ContextDefinition -from .objects import Context, ContextChain # TODO: Put back annotation when possible # registry_cache: "RegistryCache" @@ -34,9 +36,12 @@ def __init__(self, registry_cache) -> None: self.root_units = {} self.dimensionality = registry_cache.dimensionality self.parse_unit = registry_cache.parse_unit + self.conversion_factor = {} -class ContextRegistry(PlainRegistry): +class GenericContextRegistry( + Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT] +): """Handle of Contexts. Conversion between units with different dimensions according @@ -50,13 +55,13 @@ class ContextRegistry(PlainRegistry): - Parse @context directive. """ - Context = Context + Context: type[objects.Context] = objects.Context def __init__(self, **kwargs: Any) -> None: # Map context name (string) or abbreviation to context. - self._contexts: Dict[str, Context] = {} + self._contexts: dict[str, objects.Context] = {} # Stores active contexts. - self._active_ctx = ContextChain() + self._active_ctx = objects.ContextChain() # Map context chain to cache self._caches = {} # Map context chain to units override @@ -65,13 +70,13 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) # Allow contexts to add override layers to the units - self._units = ChainMap(self._units) + self._units: ChainMap[str, UnitDefinition] = ChainMap(self._units) def _register_definition_adders(self) -> None: super()._register_definition_adders() self._register_adder(ContextDefinition, self.add_context) - def add_context(self, context: Union[Context, ContextDefinition]) -> None: + def add_context(self, context: objects.Context | ContextDefinition) -> None: """Add a context object to the registry. The context will be accessible by its name and aliases. @@ -80,7 +85,7 @@ def add_context(self, context: Union[Context, ContextDefinition]) -> None: see :meth:`enable_contexts`. """ if isinstance(context, ContextDefinition): - context = Context.from_definition(context, self.get_dimensionality) + context = objects.Context.from_definition(context, self.get_dimensionality) if not context.name: raise ValueError("Can't add unnamed context to registry") @@ -97,7 +102,7 @@ def add_context(self, context: Union[Context, ContextDefinition]) -> None: ) self._contexts[alias] = context - def remove_context(self, name_or_alias: str) -> Context: + def remove_context(self, name_or_alias: str) -> objects.Context: """Remove a context from the registry and return it. Notice that this methods will not disable the context; @@ -194,7 +199,7 @@ def _redefine(self, definition: UnitDefinition) -> None: self.define(definition) def enable_contexts( - self, *names_or_contexts: Union[str, Context], **kwargs + self, *names_or_contexts: str | objects.Context, **kwargs: Any ) -> None: """Enable contexts provided by name or by object. @@ -235,13 +240,13 @@ def enable_contexts( ctx.checked = True # and create a new one with the new defaults. - contexts = tuple(Context.from_context(ctx, **kwargs) for ctx in ctxs) + contexts = tuple(objects.Context.from_context(ctx, **kwargs) for ctx in ctxs) # Finally we add them to the active context. self._active_ctx.insert_contexts(*contexts) self._switch_context_cache_and_units() - def disable_contexts(self, n: int = None) -> None: + def disable_contexts(self, n: int | None = None) -> None: """Disable the last n enabled contexts. Parameters @@ -253,7 +258,9 @@ def disable_contexts(self, n: int = None) -> None: self._switch_context_cache_and_units() @contextmanager - def context(self, *names, **kwargs) -> ContextManager[Context]: + def context( + self: GenericContextRegistry[QuantityT, UnitT], *names: str, **kwargs: Any + ) -> Generator[GenericContextRegistry[QuantityT, UnitT]]: """Used as a context manager, this function enables to activate a context which is removed after usage. @@ -309,7 +316,7 @@ def context(self, *names, **kwargs) -> ContextManager[Context]: # the added contexts are removed from the active one. self.disable_contexts(len(names)) - def with_context(self, name, **kwargs) -> Callable[[F], F]: + def with_context(self, name: str, **kwargs: Any) -> Callable[[F], F]: """Decorator to wrap a function call in a Pint context. Use it to ensure that a certain context is active when @@ -351,7 +358,13 @@ def wrapper(*values, **wrapper_kwargs): return decorator - def _convert(self, value, src, dst, inplace=False): + def _convert( + self, + value: Magnitude, + src: UnitsContainer, + dst: UnitsContainer, + inplace: bool = False, + ) -> Magnitude: """Convert value from some source to destination units. In addition to what is done by the PlainRegistry, @@ -378,7 +391,6 @@ def _convert(self, value, src, dst, inplace=False): # destination dimensionality. If it exists, we transform the source value # by applying sequentially each transformation of the path. if self._active_ctx: - src_dim = self._get_dimensionality(src) dst_dim = self._get_dimensionality(dst) @@ -392,7 +404,9 @@ def _convert(self, value, src, dst, inplace=False): return super()._convert(value, src, dst, inplace) - def _get_compatible_units(self, input_units, group_or_system): + def _get_compatible_units( + self, input_units: UnitsContainer, group_or_system: str | None = None + ): src_dim = self._get_dimensionality(input_units) ret = super()._get_compatible_units(input_units, group_or_system) @@ -405,3 +419,10 @@ def _get_compatible_units(self, input_units, group_or_system): ret |= self._cache.dimensional_equivalents[node] return ret + + +class ContextRegistry( + GenericContextRegistry[objects.ContextQuantity[Any], objects.ContextUnit] +): + Quantity: TypeAlias = objects.ContextQuantity[Any] + Unit: TypeAlias = objects.ContextUnit diff --git a/pint/facets/dask/__init__.py b/pint/facets/dask/__init__.py index f99e8a2fd..c3133bc31 100644 --- a/pint/facets/dask/__init__.py +++ b/pint/facets/dask/__init__.py @@ -12,9 +12,17 @@ from __future__ import annotations import functools +from typing import Any, Generic -from ...compat import compute, dask_array, persist, visualize -from ..plain import PlainRegistry +from ...compat import TypeAlias, compute, dask_array, persist, visualize +from ..plain import ( + GenericPlainRegistry, + MagnitudeT, + PlainQuantity, + PlainUnit, + QuantityT, + UnitT, +) def check_dask_array(f): @@ -31,14 +39,13 @@ def wrapper(self, *args, **kwargs): return wrapper -class DaskQuantity: - +class DaskQuantity(Generic[MagnitudeT], PlainQuantity[MagnitudeT]): # Dask.array.Array ducking def __dask_graph__(self): if isinstance(self._magnitude, dask_array.Array): return self._magnitude.__dask_graph__() - else: - return None + + return None def __dask_keys__(self): return self._magnitude.__dask_keys__() @@ -46,10 +53,7 @@ def __dask_keys__(self): def __dask_tokenize__(self): from dask.base import tokenize - from pint import UnitRegistry - - # TODO: Check if this is the right class as first argument - return (UnitRegistry.Quantity, tokenize(self._magnitude), self.units) + return (type(self), tokenize(self._magnitude), self.units) @property def __dask_optimize__(self): @@ -67,14 +71,9 @@ def __dask_postpersist__(self): func, args = self._magnitude.__dask_postpersist__() return self._dask_finalize, (func, args, self.units) - @staticmethod - def _dask_finalize(results, func, args, units): + def _dask_finalize(self, results, func, args, units): values = func(results, *args) - - from pint import Quantity - - # TODO: Check if this is the right class as first argument - return Quantity(values, units) + return type(self)(values, units) @check_dask_array def compute(self, **kwargs): @@ -128,6 +127,16 @@ def visualize(self, **kwargs): visualize(self, **kwargs) -class DaskRegistry(PlainRegistry): +class DaskUnit(PlainUnit): + pass + + +class GenericDaskRegistry( + Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT] +): + pass + - _quantity_class = DaskQuantity +class DaskRegistry(GenericDaskRegistry[DaskQuantity[Any], DaskUnit]): + Quantity: TypeAlias = DaskQuantity[Any] + Unit: TypeAlias = DaskUnit diff --git a/pint/facets/formatting/__init__.py b/pint/facets/formatting/__init__.py deleted file mode 100644 index e3f43816e..000000000 --- a/pint/facets/formatting/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -""" - pint.facets.formatting - ~~~~~~~~~~~~~~~~~~~~~~ - - Adds pint the capability to format quantities and units into string. - - :copyright: 2022 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -from .objects import FormattingQuantity, FormattingUnit -from .registry import FormattingRegistry - -__all__ = ["FormattingQuantity", "FormattingUnit", "FormattingRegistry"] diff --git a/pint/facets/formatting/objects.py b/pint/facets/formatting/objects.py deleted file mode 100644 index 7435c3725..000000000 --- a/pint/facets/formatting/objects.py +++ /dev/null @@ -1,226 +0,0 @@ -""" - pint.facets.formatting.objects - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - :copyright: 2022 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -import re -from typing import Any - -from ...compat import babel_parse, ndarray, np -from ...formatting import ( - _pretty_fmt_exponent, - extract_custom_flags, - format_unit, - ndarray_to_latex, - remove_custom_flags, - siunitx_format_unit, - split_format, -) -from ...util import UnitsContainer, iterable - - -class FormattingQuantity: - - _exp_pattern = re.compile(r"([0-9]\.?[0-9]*)e(-?)\+?0*([0-9]+)") - - def __format__(self, spec: str) -> str: - if self._REGISTRY.fmt_locale is not None: - return self.format_babel(spec) - - mspec, uspec = split_format( - spec, self.default_format, self._REGISTRY.separate_format_defaults - ) - - # If Compact is selected, do it at the beginning - if "#" in spec: - # TODO: don't replace '#' - mspec = mspec.replace("#", "") - uspec = uspec.replace("#", "") - obj = self.to_compact() - else: - obj = self - - if "L" in uspec: - allf = plain_allf = r"{}\ {}" - elif "H" in uspec: - allf = plain_allf = "{} {}" - if iterable(obj.magnitude): - # Use HTML table instead of plain text template for array-likes - allf = ( - "" - "" - "" - "" - "
Magnitude{}
Units{}
" - ) - else: - allf = plain_allf = "{} {}" - - if "Lx" in uspec: - # the LaTeX siunitx code - # TODO: add support for extracting options - opts = "" - ustr = siunitx_format_unit(obj.units._units, obj._REGISTRY) - allf = r"\SI[%s]{{{}}}{{{}}}" % opts - else: - # Hand off to unit formatting - # TODO: only use `uspec` after completing the deprecation cycle - ustr = format(obj.units, mspec + uspec) - - # mspec = remove_custom_flags(spec) - if "H" in uspec: - # HTML formatting - if hasattr(obj.magnitude, "_repr_html_"): - # If magnitude has an HTML repr, nest it within Pint's - mstr = obj.magnitude._repr_html_() - else: - if isinstance(self.magnitude, ndarray): - # Use custom ndarray text formatting with monospace font - formatter = "{{:{}}}".format(mspec) - # Need to override for scalars, which are detected as iterable, - # and don't respond to printoptions. - if self.magnitude.ndim == 0: - allf = plain_allf = "{} {}" - mstr = formatter.format(obj.magnitude) - else: - with np.printoptions( - formatter={"float_kind": formatter.format} - ): - mstr = ( - "
"
-                                + format(obj.magnitude).replace("\n", "
") - + "
" - ) - elif not iterable(obj.magnitude): - # Use plain text for scalars - mstr = format(obj.magnitude, mspec) - else: - # Use monospace font for other array-likes - mstr = ( - "
"
-                        + format(obj.magnitude, mspec).replace("\n", "
") - + "
" - ) - elif isinstance(self.magnitude, ndarray): - if "L" in uspec: - # Use ndarray LaTeX special formatting - mstr = ndarray_to_latex(obj.magnitude, mspec) - else: - # Use custom ndarray text formatting--need to handle scalars differently - # since they don't respond to printoptions - formatter = "{{:{}}}".format(mspec) - if obj.magnitude.ndim == 0: - mstr = formatter.format(obj.magnitude) - else: - with np.printoptions(formatter={"float_kind": formatter.format}): - mstr = format(obj.magnitude).replace("\n", "") - else: - mstr = format(obj.magnitude, mspec).replace("\n", "") - - if "L" in uspec and "Lx" not in uspec: - mstr = self._exp_pattern.sub(r"\1\\times 10^{\2\3}", mstr) - elif "H" in uspec or "P" in uspec: - m = self._exp_pattern.match(mstr) - _exp_formatter = ( - _pretty_fmt_exponent if "P" in uspec else lambda s: f"{s}" - ) - if m: - exp = int(m.group(2) + m.group(3)) - mstr = self._exp_pattern.sub(r"\1×10" + _exp_formatter(exp), mstr) - - if allf == plain_allf and ustr.startswith("1 /"): - # Write e.g. "3 / s" instead of "3 1 / s" - ustr = ustr[2:] - return allf.format(mstr, ustr).strip() - - def _repr_pretty_(self, p, cycle): - if cycle: - super()._repr_pretty_(p, cycle) - else: - p.pretty(self.magnitude) - p.text(" ") - p.pretty(self.units) - - def format_babel(self, spec: str = "", **kwspec: Any) -> str: - spec = spec or self.default_format - - # standard cases - if "#" in spec: - spec = spec.replace("#", "") - obj = self.to_compact() - else: - obj = self - kwspec = dict(kwspec) - if "length" in kwspec: - kwspec["babel_length"] = kwspec.pop("length") - - loc = kwspec.get("locale", self._REGISTRY.fmt_locale) - if loc is None: - raise ValueError("Provide a `locale` value to localize translation.") - - kwspec["locale"] = babel_parse(loc) - kwspec["babel_plural_form"] = kwspec["locale"].plural_form(obj.magnitude) - return "{} {}".format( - format(obj.magnitude, remove_custom_flags(spec)), - obj.units.format_babel(spec, **kwspec), - ).replace("\n", "") - - def __str__(self) -> str: - if self._REGISTRY.fmt_locale is not None: - return self.format_babel() - - return format(self) - - -class FormattingUnit: - def __str__(self): - return format(self) - - def __format__(self, spec) -> str: - _, uspec = split_format( - spec, self.default_format, self._REGISTRY.separate_format_defaults - ) - if "~" in uspec: - if not self._units: - return "" - units = UnitsContainer( - dict( - (self._REGISTRY._get_symbol(key), value) - for key, value in self._units.items() - ) - ) - uspec = uspec.replace("~", "") - else: - units = self._units - - return format_unit(units, uspec, registry=self._REGISTRY) - - def format_babel(self, spec="", locale=None, **kwspec: Any) -> str: - spec = spec or extract_custom_flags(self.default_format) - - if "~" in spec: - if self.dimensionless: - return "" - units = UnitsContainer( - dict( - (self._REGISTRY._get_symbol(key), value) - for key, value in self._units.items() - ) - ) - spec = spec.replace("~", "") - else: - units = self._units - - locale = self._REGISTRY.fmt_locale if locale is None else locale - - if locale is None: - raise ValueError("Provide a `locale` value to localize translation.") - else: - kwspec["locale"] = babel_parse(locale) - - return units.format_babel(spec, registry=self._REGISTRY, **kwspec) diff --git a/pint/facets/formatting/registry.py b/pint/facets/formatting/registry.py deleted file mode 100644 index 246cc43c3..000000000 --- a/pint/facets/formatting/registry.py +++ /dev/null @@ -1,18 +0,0 @@ -""" - pint.facets.formatting.registry - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - :copyright: 2022 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -from ..plain import PlainRegistry -from .objects import FormattingQuantity, FormattingUnit - - -class FormattingRegistry(PlainRegistry): - - _quantity_class = FormattingQuantity - _unit_class = FormattingUnit diff --git a/pint/facets/group/__init__.py b/pint/facets/group/__init__.py index e1fad043d..db488deac 100644 --- a/pint/facets/group/__init__.py +++ b/pint/facets/group/__init__.py @@ -11,7 +11,14 @@ from __future__ import annotations from .definitions import GroupDefinition -from .objects import Group -from .registry import GroupRegistry +from .objects import Group, GroupQuantity, GroupUnit +from .registry import GenericGroupRegistry, GroupRegistry -__all__ = ["GroupDefinition", "Group", "GroupRegistry"] +__all__ = [ + "GroupDefinition", + "Group", + "GroupRegistry", + "GenericGroupRegistry", + "GroupQuantity", + "GroupUnit", +] diff --git a/pint/facets/group/definitions.py b/pint/facets/group/definitions.py index c0abced3c..bec7d8ac0 100644 --- a/pint/facets/group/definitions.py +++ b/pint/facets/group/definitions.py @@ -8,10 +8,11 @@ from __future__ import annotations -import typing as ty +from collections.abc import Iterable from dataclasses import dataclass from ... import errors +from ...compat import Self from .. import plain @@ -22,12 +23,14 @@ class GroupDefinition(errors.WithDefErr): #: name of the group name: str #: unit groups that will be included within the group - using_group_names: ty.Tuple[str, ...] + using_group_names: tuple[str, ...] #: definitions for the units existing within the group - definitions: ty.Tuple[plain.UnitDefinition, ...] + definitions: tuple[plain.UnitDefinition, ...] @classmethod - def from_lines(cls, lines, non_int_type): + def from_lines( + cls: type[Self], lines: Iterable[str], non_int_type: type + ) -> Self | None: # TODO: this is to keep it backwards compatible from ...delegates import ParserConfig, txt_defparser @@ -39,10 +42,10 @@ def from_lines(cls, lines, non_int_type): return definition @property - def unit_names(self) -> ty.Tuple[str, ...]: + def unit_names(self) -> tuple[str, ...]: return tuple(el.name for el in self.definitions) - def __post_init__(self): + def __post_init__(self) -> None: if not errors.is_valid_group_name(self.name): raise self.def_err(errors.MSG_INVALID_GROUP_NAME) diff --git a/pint/facets/group/objects.py b/pint/facets/group/objects.py index 67fa136c7..a1767e666 100644 --- a/pint/facets/group/objects.py +++ b/pint/facets/group/objects.py @@ -8,9 +8,37 @@ from __future__ import annotations +from collections.abc import Callable, Generator, Iterable +from typing import TYPE_CHECKING, Any, Generic + from ...util import SharedRegistryObject, getattr_maybe_raise +from ..plain import MagnitudeT, PlainQuantity, PlainUnit from .definitions import GroupDefinition +if TYPE_CHECKING: + from ..plain import UnitDefinition + + DefineFunc = Callable[ + [ + Any, + ], + None, + ] + AddUnitFunc = Callable[ + [ + UnitDefinition, + ], + None, + ] + + +class GroupQuantity(Generic[MagnitudeT], PlainQuantity[MagnitudeT]): + pass + + +class GroupUnit(PlainUnit): + pass + class Group(SharedRegistryObject): """A group is a set of units. @@ -23,32 +51,26 @@ class Group(SharedRegistryObject): The group belongs to one Registry. See GroupDefinition for the definition file syntax. - """ - def __init__(self, name): - """ - :param name: Name of the group. If not given, a root Group will be created. - :type name: str - :param groups: dictionary like object groups and system. - The newly created group will be added after creation. - :type groups: dict[str | Group] - """ + Parameters + ---------- + name + If not given, a root Group will be created. + """ + def __init__(self, name: str): # The name of the group. - #: type: str self.name = name #: Names of the units in this group. #: :type: set[str] - self._unit_names = set() + self._unit_names: set[str] = set() #: Names of the groups in this group. - #: :type: set[str] - self._used_groups = set() + self._used_groups: set[str] = set() #: Names of the groups in which this group is contained. - #: :type: set[str] - self._used_by = set() + self._used_by: set[str] = set() # Add this group to the group dictionary self._REGISTRY._groups[self.name] = self @@ -59,34 +81,33 @@ def __init__(self, name): #: A cache of the included units. #: None indicates that the cache has been invalidated. - #: :type: frozenset[str] | None - self._computed_members = None + self._computed_members: frozenset[str] | None = None @property - def members(self): + def members(self) -> frozenset[str]: """Names of the units that are members of the group. Calculated to include to all units in all included _used_groups. """ if self._computed_members is None: - self._computed_members = set(self._unit_names) + tmp = set(self._unit_names) for _, group in self.iter_used_groups(): - self._computed_members |= group.members + tmp |= group.members - self._computed_members = frozenset(self._computed_members) + self._computed_members = frozenset(tmp) return self._computed_members - def invalidate_members(self): + def invalidate_members(self) -> None: """Invalidate computed members in this Group and all parent nodes.""" self._computed_members = None d = self._REGISTRY._groups for name in self._used_by: d[name].invalidate_members() - def iter_used_groups(self): + def iter_used_groups(self) -> Generator[tuple[str, Group]]: pending = set(self._used_groups) d = self._REGISTRY._groups while pending: @@ -95,13 +116,13 @@ def iter_used_groups(self): pending |= group._used_groups yield name, d[name] - def is_used_group(self, group_name): + def is_used_group(self, group_name: str) -> bool: for name, _ in self.iter_used_groups(): if name == group_name: return True return False - def add_units(self, *unit_names): + def add_units(self, *unit_names: str) -> None: """Add units to group.""" for unit_name in unit_names: self._unit_names.add(unit_name) @@ -109,21 +130,20 @@ def add_units(self, *unit_names): self.invalidate_members() @property - def non_inherited_unit_names(self): + def non_inherited_unit_names(self) -> frozenset[str]: return frozenset(self._unit_names) - def remove_units(self, *unit_names): + def remove_units(self, *unit_names: str) -> None: """Remove units from group.""" for unit_name in unit_names: self._unit_names.remove(unit_name) self.invalidate_members() - def add_groups(self, *group_names): + def add_groups(self, *group_names: str) -> None: """Add groups to group.""" d = self._REGISTRY._groups for group_name in group_names: - grp = d[group_name] if grp.is_used_group(self.name): @@ -137,7 +157,7 @@ def add_groups(self, *group_names): self.invalidate_members() - def remove_groups(self, *group_names): + def remove_groups(self, *group_names: str) -> None: """Remove groups from group.""" d = self._REGISTRY._groups for group_name in group_names: @@ -149,7 +169,9 @@ def remove_groups(self, *group_names): self.invalidate_members() @classmethod - def from_lines(cls, lines, define_func, non_int_type=float) -> Group: + def from_lines( + cls, lines: Iterable[str], define_func: DefineFunc, non_int_type: type = float + ) -> Group: """Return a Group object parsing an iterable of lines. Parameters @@ -165,11 +187,17 @@ def from_lines(cls, lines, define_func, non_int_type=float) -> Group: """ group_definition = GroupDefinition.from_lines(lines, non_int_type) + + if group_definition is None: + raise ValueError(f"Could not define group from {lines}") + return cls.from_definition(group_definition, define_func) @classmethod def from_definition( - cls, group_definition: GroupDefinition, add_unit_func=None + cls, + group_definition: GroupDefinition, + add_unit_func: AddUnitFunc | None = None, ) -> Group: grp = cls(group_definition.name) @@ -191,6 +219,6 @@ def from_definition( return grp - def __getattr__(self, item): + def __getattr__(self, item: str): getattr_maybe_raise(self, item) return self._REGISTRY diff --git a/pint/facets/group/registry.py b/pint/facets/group/registry.py index c4ed0be2e..33f78c645 100644 --- a/pint/facets/group/registry.py +++ b/pint/facets/group/registry.py @@ -8,20 +8,28 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Dict, FrozenSet +from typing import TYPE_CHECKING, Any, Generic from ... import errors +from ...compat import TypeAlias if TYPE_CHECKING: - from pint import Unit - -from ...util import build_dependent_class, create_class_with_registry -from ..plain import PlainRegistry, UnitDefinition + from ..._typing import Unit, UnitsContainer + +from ...util import create_class_with_registry, to_units_container +from ..plain import ( + GenericPlainRegistry, + QuantityT, + UnitDefinition, + UnitT, +) +from . import objects from .definitions import GroupDefinition -from .objects import Group -class GroupRegistry(PlainRegistry): +class GenericGroupRegistry( + Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT] +): """Handle of Groups. Group units @@ -34,23 +42,18 @@ class GroupRegistry(PlainRegistry): # TODO: Change this to Group: Group to specify class # and use introspection to get system class as a way # to enjoy typing goodies - _group_class = Group + Group = type[objects.Group] def __init__(self, **kwargs): super().__init__(**kwargs) #: Map group name to group. - #: :type: dict[ str | Group] - self._groups: Dict[str, Group] = {} + self._groups: dict[str, objects.Group] = {} self._groups["root"] = self.Group("root") - def __init_subclass__(cls, **kwargs): - super().__init_subclass__() - cls.Group = build_dependent_class(cls, "Group", "_group_class") - def _init_dynamic_classes(self) -> None: """Generate subclasses on the fly and attach them to self""" super()._init_dynamic_classes() - self.Group = create_class_with_registry(self, self.Group) + self.Group = create_class_with_registry(self, objects.Group) def _after_init(self) -> None: """Invoked at the end of ``__init__``. @@ -84,7 +87,6 @@ def _add_unit(self, definition: UnitDefinition): self.get_group("root").add_units(definition.name) def _add_group(self, gd: GroupDefinition): - if gd.name in self._groups: raise ValueError(f"Group {gd.name} already present in registry") try: @@ -94,7 +96,7 @@ def _add_group(self, gd: GroupDefinition): except KeyError as e: raise errors.DefinitionSyntaxError(f"unknown dimension {e} in context") - def get_group(self, name: str, create_if_needed: bool = True) -> Group: + def get_group(self, name: str, create_if_needed: bool = True) -> objects.Group: """Return a Group. Parameters @@ -118,9 +120,23 @@ def get_group(self, name: str, create_if_needed: bool = True) -> Group: return self.Group(name) - def _get_compatible_units(self, input_units, group) -> FrozenSet["Unit"]: + def get_compatible_units( + self, input_units: UnitsContainer, group: str | None = None + ) -> frozenset[Unit]: + """ """ + if group is None: + return super().get_compatible_units(input_units) + + input_units = to_units_container(input_units) - ret = super()._get_compatible_units(input_units, group) + equiv = self._get_compatible_units(input_units, group) + + return frozenset(self.Unit(eq) for eq in equiv) + + def _get_compatible_units( + self, input_units: UnitsContainer, group: str | None = None + ) -> frozenset[str]: + ret = super()._get_compatible_units(input_units) if not group: return ret @@ -130,3 +146,10 @@ def _get_compatible_units(self, input_units, group) -> FrozenSet["Unit"]: else: raise ValueError("Unknown Group with name '%s'" % group) return frozenset(ret & members) + + +class GroupRegistry( + GenericGroupRegistry[objects.GroupQuantity[Any], objects.GroupUnit] +): + Quantity: TypeAlias = objects.GroupQuantity[Any] + Unit: TypeAlias = objects.GroupUnit diff --git a/pint/facets/measurement/__init__.py b/pint/facets/measurement/__init__.py index 21539dcd5..0b241ea1d 100644 --- a/pint/facets/measurement/__init__.py +++ b/pint/facets/measurement/__init__.py @@ -11,6 +11,11 @@ from __future__ import annotations from .objects import Measurement, MeasurementQuantity -from .registry import MeasurementRegistry +from .registry import GenericMeasurementRegistry, MeasurementRegistry -__all__ = ["Measurement", "MeasurementQuantity", "MeasurementRegistry"] +__all__ = [ + "Measurement", + "MeasurementQuantity", + "MeasurementRegistry", + "GenericMeasurementRegistry", +] diff --git a/pint/facets/measurement/objects.py b/pint/facets/measurement/objects.py index 88fad0a73..4240a91d2 100644 --- a/pint/facets/measurement/objects.py +++ b/pint/facets/measurement/objects.py @@ -10,21 +10,20 @@ import copy import re +from typing import Generic from ...compat import ufloat -from ...formatting import _FORMATS, extract_custom_flags, siunitx_format_unit -from ..plain import PlainQuantity +from ..plain import MagnitudeT, PlainQuantity, PlainUnit MISSING = object() -class MeasurementQuantity: - +class MeasurementQuantity(Generic[MagnitudeT], PlainQuantity[MagnitudeT]): # Measurement support def plus_minus(self, error, relative=False): if isinstance(error, self.__class__): if relative: - raise ValueError("{} is not a valid relative error.".format(error)) + raise ValueError(f"{error} is not a valid relative error.") error = error.to(self._units).magnitude else: if relative: @@ -33,6 +32,10 @@ def plus_minus(self, error, relative=False): return self._REGISTRY.Measurement(copy.copy(self.magnitude), error, self._units) +class MeasurementUnit(PlainUnit): + pass + + class Measurement(PlainQuantity): """Implements a class to describe a quantity with uncertainty. @@ -48,7 +51,7 @@ class Measurement(PlainQuantity): """ - def __new__(cls, value, error, units=MISSING): + def __new__(cls, value, error=MISSING, units=MISSING): if units is MISSING: try: value, units = value.magnitude, value.units @@ -60,17 +63,18 @@ def __new__(cls, value, error, units=MISSING): error = MISSING # used for check below else: units = "" - try: - error = error.to(units).magnitude - except AttributeError: - pass - if error is MISSING: + # We've already extracted the units from the Quantity above mag = value - elif error < 0: - raise ValueError("The magnitude of the error cannot be negative") else: - mag = ufloat(value, error) + try: + error = error.to(units).magnitude + except AttributeError: + pass + if error < 0: + raise ValueError("The magnitude of the error cannot be negative") + else: + mag = ufloat(value, error) inst = super().__new__(cls, mag, units) return inst @@ -99,11 +103,15 @@ def __repr__(self): ) def __str__(self): - return "{}".format(self) + return f"{self}" def __format__(self, spec): + spec = spec or self._REGISTRY.default_format + return self._REGISTRY.formatter.format_measurement(self, spec) - spec = spec or self.default_format + def old_format(self, spec): + # TODO: provisional + from ...formatting import _FORMATS, extract_custom_flags, siunitx_format_unit # special cases if "Lx" in spec: # the LaTeX siunitx code @@ -134,8 +142,8 @@ def __format__(self, spec): # Also, SIunitx doesn't accept parentheses, which uncs uses with # scientific notation ('e' or 'E' and sometimes 'g' or 'G'). mstr = mstr.replace("(", "").replace(")", " ") - ustr = siunitx_format_unit(self.units._units, self._REGISTRY) - return r"\SI%s{%s}{%s}" % (opts, mstr, ustr) + ustr = siunitx_format_unit(self.units._units.items(), self._REGISTRY) + return rf"\SI{opts}{{{mstr}}}{{{ustr}}}" # standard cases if "L" in spec: diff --git a/pint/facets/measurement/registry.py b/pint/facets/measurement/registry.py index f5c962171..905de7ab7 100644 --- a/pint/facets/measurement/registry.py +++ b/pint/facets/measurement/registry.py @@ -9,23 +9,18 @@ from __future__ import annotations -from ...compat import ufloat -from ...util import build_dependent_class, create_class_with_registry -from ..plain import PlainRegistry -from .objects import Measurement, MeasurementQuantity +from typing import Any, Generic +from ...compat import TypeAlias, ufloat +from ...util import create_class_with_registry +from ..plain import GenericPlainRegistry, QuantityT, UnitT +from . import objects -class MeasurementRegistry(PlainRegistry): - _quantity_class = MeasurementQuantity - _measurement_class = Measurement - - def __init_subclass__(cls, **kwargs): - super().__init_subclass__() - - cls.Measurement = build_dependent_class( - cls, "Measurement", "_measurement_class" - ) +class GenericMeasurementRegistry( + Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT] +): + Measurement = objects.Measurement def _init_dynamic_classes(self) -> None: """Generate subclasses on the fly and attach them to self""" @@ -41,3 +36,12 @@ def no_uncertainties(*args, **kwargs): ) self.Measurement = no_uncertainties + + +class MeasurementRegistry( + GenericMeasurementRegistry[ + objects.MeasurementQuantity[Any], objects.MeasurementUnit + ] +): + Quantity: TypeAlias = objects.MeasurementQuantity[Any] + Unit: TypeAlias = objects.MeasurementUnit diff --git a/pint/facets/nonmultiplicative/__init__.py b/pint/facets/nonmultiplicative/__init__.py index cbba4100c..a338dc34a 100644 --- a/pint/facets/nonmultiplicative/__init__.py +++ b/pint/facets/nonmultiplicative/__init__.py @@ -15,8 +15,6 @@ # This import register LogarithmicConverter and OffsetConverter to be usable # (via subclassing) from .definitions import LogarithmicConverter, OffsetConverter # noqa: F401 -from .registry import NonMultiplicativeRegistry +from .registry import GenericNonMultiplicativeRegistry, NonMultiplicativeRegistry -__all__ = [ - "NonMultiplicativeRegistry", -] +__all__ = ["NonMultiplicativeRegistry", "GenericNonMultiplicativeRegistry"] diff --git a/pint/facets/nonmultiplicative/definitions.py b/pint/facets/nonmultiplicative/definitions.py index dbfc0ffb9..f795cf046 100644 --- a/pint/facets/nonmultiplicative/definitions.py +++ b/pint/facets/nonmultiplicative/definitions.py @@ -10,6 +10,7 @@ from dataclasses import dataclass +from ..._typing import Magnitude from ...compat import HAS_NUMPY, exp, log from ..plain import ScaleConverter @@ -24,7 +25,7 @@ class OffsetConverter(ScaleConverter): def is_multiplicative(self): return self.offset == 0 - def to_reference(self, value, inplace=False): + def to_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: if inplace: value *= self.scale value += self.offset @@ -33,7 +34,7 @@ def to_reference(self, value, inplace=False): return value - def from_reference(self, value, inplace=False): + def from_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: if inplace: value -= self.offset value /= self.scale @@ -66,6 +67,7 @@ class LogarithmicConverter(ScaleConverter): controls if computation is done in place """ + # TODO: Can I use PintScalar here? logbase: float logfactor: float @@ -77,7 +79,7 @@ def is_multiplicative(self): def is_logarithmic(self): return True - def from_reference(self, value, inplace=False): + def from_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: """Converts value from the reference unit to the logarithmic unit dBm <------ mW @@ -95,7 +97,7 @@ def from_reference(self, value, inplace=False): return value - def to_reference(self, value, inplace=False): + def to_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: """Converts value to the reference unit from the logarithmic unit dBm ------> mW diff --git a/pint/facets/nonmultiplicative/objects.py b/pint/facets/nonmultiplicative/objects.py index 1708e3218..114a256af 100644 --- a/pint/facets/nonmultiplicative/objects.py +++ b/pint/facets/nonmultiplicative/objects.py @@ -8,16 +8,18 @@ from __future__ import annotations -from typing import List +from typing import Generic +from ..plain import MagnitudeT, PlainQuantity, PlainUnit -class NonMultiplicativeQuantity: + +class NonMultiplicativeQuantity(Generic[MagnitudeT], PlainQuantity[MagnitudeT]): @property def _is_multiplicative(self) -> bool: """Check if the PlainQuantity object has only multiplicative units.""" return not self._get_non_multiplicative_units() - def _get_non_multiplicative_units(self) -> List[str]: + def _get_non_multiplicative_units(self) -> list[str]: """Return a list of the of non-multiplicative units of the PlainQuantity object.""" return [ unit @@ -25,7 +27,7 @@ def _get_non_multiplicative_units(self) -> List[str]: if not self._get_unit_definition(unit).is_multiplicative ] - def _get_delta_units(self) -> List[str]: + def _get_delta_units(self) -> list[str]: """Return list of delta units ot the PlainQuantity object.""" return [u for u in self._units if u.startswith("delta_")] @@ -40,7 +42,7 @@ def _has_compatible_delta(self, unit: str) -> bool: self._get_unit_definition(d).reference == offset_unit_dim for d in deltas ) - def _ok_for_muldiv(self, no_offset_units=None) -> bool: + def _ok_for_muldiv(self, no_offset_units: int | None = None) -> bool: """Checks if PlainQuantity object can be multiplied or divided""" is_ok = True @@ -59,3 +61,7 @@ def _ok_for_muldiv(self, no_offset_units=None) -> bool: if next(iter(self._units.values())) != 1: is_ok = False return is_ok + + +class NonMultiplicativeUnit(PlainUnit): + pass diff --git a/pint/facets/nonmultiplicative/registry.py b/pint/facets/nonmultiplicative/registry.py index fc71bc5ea..39e5d829e 100644 --- a/pint/facets/nonmultiplicative/registry.py +++ b/pint/facets/nonmultiplicative/registry.py @@ -1,23 +1,28 @@ """ - pint.facets.nonmultiplicative.registry - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +pint.facets.nonmultiplicative.registry +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: 2022 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. +:copyright: 2022 by Pint Authors, see AUTHORS for more details. +:license: BSD, see LICENSE for more details. """ from __future__ import annotations -from typing import Any, Optional +from typing import Any, Generic, TypeVar +from ...compat import TypeAlias from ...errors import DimensionalityError, UndefinedUnitError from ...util import UnitsContainer, logger -from ..plain import PlainRegistry, UnitDefinition -from .definitions import OffsetConverter, ScaleConverter -from .objects import NonMultiplicativeQuantity +from ..plain import GenericPlainRegistry, QuantityT, UnitDefinition, UnitT +from . import objects +from .definitions import LogarithmicConverter, OffsetConverter, ScaleConverter +T = TypeVar("T") -class NonMultiplicativeRegistry(PlainRegistry): + +class GenericNonMultiplicativeRegistry( + Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT] +): """Handle of non multiplicative units (e.g. Temperature). Capabilities: @@ -32,15 +37,17 @@ class NonMultiplicativeRegistry(PlainRegistry): autoconvert_offset_to_baseunit : bool If True, non-multiplicative units are converted to plain units in multiplications. + logarithmic_math : bool + If True, logarithmic units are + added as logarithmic additions. """ - _quantity_class = NonMultiplicativeQuantity - def __init__( self, default_as_delta: bool = True, autoconvert_offset_to_baseunit: bool = False, + logarithmic_math: bool = False, **kwargs: Any, ) -> None: super().__init__(**kwargs) @@ -53,43 +60,61 @@ def __init__( # plain units on multiplication and division. self.autoconvert_offset_to_baseunit = autoconvert_offset_to_baseunit - def _parse_units( + # When performing addition of logarithmic units, interpret + # the addition as a logarithmic addition + self.logarithmic_math = logarithmic_math + + def parse_units_as_container( self, input_string: str, - as_delta: Optional[bool] = None, - case_sensitive: Optional[bool] = None, - ): + as_delta: bool | None = None, + case_sensitive: bool | None = None, + ) -> UnitsContainer: """ """ if as_delta is None: as_delta = self.default_as_delta - return super()._parse_units(input_string, as_delta, case_sensitive) + return super().parse_units_as_container(input_string, as_delta, case_sensitive) - def _add_unit(self, definition: UnitDefinition): + def _add_unit(self, definition: UnitDefinition) -> None: super()._add_unit(definition) if definition.is_multiplicative: return - if definition.is_logarithmic: + # Issue #2 (valispace fork): delta versions are added for logarithmic units + # if logarithmic math is activated. + if definition.is_logarithmic and not self.logarithmic_math: return - if not isinstance(definition.converter, OffsetConverter): + if not isinstance(definition.converter, OffsetConverter) and not isinstance( + definition.converter, LogarithmicConverter + ): logger.debug( "Cannot autogenerate delta version for a unit in " - "which the converter is not an OffsetConverter" + "which the converter is not an OffsetConverter " + "or a LogarithmicConverter" ) return delta_name = "delta_" + definition.name if definition.symbol: delta_symbol = "Δ" + definition.symbol + # Issue #2 (valispace fork): delta versions need an additional symbol alias for logaritmic units, also useful for offset units + symbol_alias = ( + "delta_" + definition.symbol + if definition.symbol != definition.name + else "" + ) else: delta_symbol = None + symbol_alias = "" delta_aliases = tuple("Δ" + alias for alias in definition.aliases) + tuple( "delta_" + alias for alias in definition.aliases ) + if symbol_alias: + delta_aliases += (symbol_alias,) delta_reference = self.UnitsContainer( {ref: value for ref, value in definition.reference.items()} @@ -104,22 +129,60 @@ def _add_unit(self, definition: UnitDefinition): ) super()._add_unit(delta_def) - def _is_multiplicative(self, u) -> bool: - if u in self._units: - return self._units[u].is_multiplicative + def _is_multiplicative(self, unit_name: str) -> bool: + """True if the unit is multiplicative. + + Parameters + ---------- + unit_name + Name of the unit to check. + Can be prefixed, pluralized or even an alias + + Raises + ------ + UndefinedUnitError + If the unit is not in the registry. + """ + if unit_name in self._units: + return self._units[unit_name].is_multiplicative # If the unit is not in the registry might be because it is not # registered with its prefixed version. # TODO: Might be better to register them. - names = self.parse_unit_name(u) + names = self.parse_unit_name(unit_name) assert len(names) == 1 _, base_name, _ = names[0] try: return self._units[base_name].is_multiplicative except KeyError: - raise UndefinedUnitError(u) + raise UndefinedUnitError(unit_name) + + def _validate_and_extract(self, units: UnitsContainer) -> str | None: + """Used to check if a given units is suitable for a simple + conversion. + + Return None if all units are non-multiplicative + Return the unit name if a single non-multiplicative unit is found + and is raised to a power equals to 1. + + Otherwise, raise an Exception. + + Parameters + ---------- + units + Compound dictionary. + + Raises + ------ + ValueError + If the more than a single non-multiplicative unit is present, + or a single one is present but raised to a power different from 1. + + """ + + # TODO: document what happens if autoconvert_offset_to_baseunit + # TODO: Clarify docs - def _validate_and_extract(self, units): # u is for unit, e is for exponent nonmult_units = [ (u, e) for u, e in units.items() if not self._is_multiplicative(u) @@ -147,22 +210,33 @@ def _validate_and_extract(self, units): return None - def _add_ref_of_log_or_offset_unit(self, offset_unit, all_units): - + def _add_ref_of_log_or_offset_unit( + self, offset_unit: str, all_units: UnitsContainer + ) -> UnitsContainer: slct_unit = self._units[offset_unit] - if slct_unit.is_logarithmic or (not slct_unit.is_multiplicative): + if slct_unit.is_logarithmic: # Extract reference unit slct_ref = slct_unit.reference + + # TODO: Check that reference is None + # If reference unit is not dimensionless if slct_ref != UnitsContainer(): # Extract reference unit (u, e) = [(u, e) for u, e in slct_ref.items()].pop() # Add it back to the unit list return all_units.add(u, e) + + if not slct_unit.is_multiplicative: # is offset unit + # Extract reference unit + return slct_unit.reference + # Otherwise, return the units unmodified return all_units - def _convert(self, value, src, dst, inplace=False): + def _convert( + self, value: T, src: UnitsContainer, dst: UnitsContainer, inplace: bool = False + ) -> T: """Convert value from some source to destination units. In addition to what is done by the PlainRegistry, @@ -202,6 +276,7 @@ def _convert(self, value, src, dst, inplace=False): src, dst, extra_msg=f" - In destination units, {ex}" ) + # convert if no offset units are present if not (src_offset_unit or dst_offset_unit): return super()._convert(value, src, dst, inplace) @@ -215,6 +290,8 @@ def _convert(self, value, src, dst, inplace=False): # clean src from offset units by converting to reference if src_offset_unit: + if any(u.startswith("delta_") for u in dst): + raise DimensionalityError(src, dst) value = self._units[src_offset_unit].converter.to_reference(value, inplace) src = src.remove([src_offset_unit]) # Add reference unit for multiplicative section @@ -222,6 +299,8 @@ def _convert(self, value, src, dst, inplace=False): # clean dst units from offset units if dst_offset_unit: + if any(u.startswith("delta_") for u in src): + raise DimensionalityError(src, dst) dst = dst.remove([dst_offset_unit]) # Add reference unit for multiplicative section dst = self._add_ref_of_log_or_offset_unit(dst_offset_unit, dst) @@ -236,3 +315,12 @@ def _convert(self, value, src, dst, inplace=False): ) return value + + +class NonMultiplicativeRegistry( + GenericNonMultiplicativeRegistry[ + objects.NonMultiplicativeQuantity[Any], objects.NonMultiplicativeUnit + ] +): + Quantity: TypeAlias = objects.NonMultiplicativeQuantity[Any] + Unit: TypeAlias = objects.NonMultiplicativeUnit diff --git a/pint/facets/numpy/__init__.py b/pint/facets/numpy/__init__.py index aad9508bc..477c09579 100644 --- a/pint/facets/numpy/__init__.py +++ b/pint/facets/numpy/__init__.py @@ -10,6 +10,6 @@ from __future__ import annotations -from .registry import NumpyRegistry +from .registry import GenericNumpyRegistry, NumpyRegistry -__all__ = ["NumpyRegistry"] +__all__ = ["NumpyRegistry", "GenericNumpyRegistry"] diff --git a/pint/facets/numpy/numpy_func.py b/pint/facets/numpy/numpy_func.py index 7bce41e97..b79700f9f 100644 --- a/pint/facets/numpy/numpy_func.py +++ b/pint/facets/numpy/numpy_func.py @@ -13,7 +13,7 @@ from itertools import chain from ...compat import is_upcast_type, np, zero_or_nan -from ...errors import DimensionalityError, UnitStrippedWarning +from ...errors import DimensionalityError, OffsetUnitCalculusError, UnitStrippedWarning from ...util import iterable, sized HANDLED_UFUNCS = {} @@ -52,6 +52,10 @@ def _is_sequence_with_quantity_elements(obj): ------- True if obj is a sequence and at least one element is a Quantity; False otherwise """ + if np is not None and isinstance(obj, np.ndarray) and not obj.dtype.hasobject: + # If obj is a numpy array, avoid looping on all elements + # if dtype does not have objects + return False return ( iterable(obj) and sized(obj) @@ -220,7 +224,7 @@ def get_op_output_unit(unit_op, first_input_units, all_args=None, size=None): product /= x.units result_unit = product**-1 else: - raise ValueError("Output unit method {} not understood".format(unit_op)) + raise ValueError(f"Output unit method {unit_op} not understood") return result_unit @@ -237,7 +241,7 @@ def decorator(func): elif func_type == "ufunc": HANDLED_UFUNCS[numpy_func_string] = func else: - raise ValueError("Invalid func_type {}".format(func_type)) + raise ValueError(f"Invalid func_type {func_type}") return func return decorator @@ -284,6 +288,17 @@ def implement_func(func_type, func_str, input_units=None, output_unit=None): @implements(func_str, func_type) def implementation(*args, **kwargs): + if func_str in ["multiply", "true_divide", "divide", "floor_divide"] and any( + [ + not _is_quantity(arg) and _is_sequence_with_quantity_elements(arg) + for arg in args + ] + ): + # the sequence may contain different units, so fall back to element-wise + return np.array( + [func(*func_args) for func_args in zip(*args)], dtype=object + ) + first_input_units = _get_first_input_units(args, kwargs) if input_units == "all_consistent": # Match all input args/kwargs to same units @@ -311,7 +326,7 @@ def implementation(*args, **kwargs): return result_magnitude elif output_unit == "match_input": result_unit = first_input_units - elif output_unit in [ + elif output_unit in ( "sum", "mul", "delta", @@ -324,7 +339,7 @@ def implementation(*args, **kwargs): "cbrt", "reciprocal", "size", - ]: + ): result_unit = get_op_output_unit( output_unit, first_input_units, tuple(chain(args, kwargs.values())) ) @@ -413,6 +428,7 @@ def implementation(*args, **kwargs): "take", "trace", "transpose", + "roll", "ceil", "floor", "hypot", @@ -499,8 +515,8 @@ def _frexp(x, *args, **kwargs): def _power(x1, x2): if _is_quantity(x1): return x1**x2 - else: - return x2.__rpow__(x1) + + return x2.__rpow__(x1) @implements("add", "ufunc") @@ -527,22 +543,16 @@ def _meshgrid(*xi, **kwargs): @implements("full_like", "function") -def _full_like(a, fill_value, dtype=None, order="K", subok=True, shape=None): +def _full_like(a, fill_value, **kwargs): # Make full_like by multiplying with array from ones_like in a # non-multiplicative-unit-safe way if hasattr(fill_value, "_REGISTRY"): return fill_value._REGISTRY.Quantity( - ( - np.ones_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - * fill_value.m - ), + np.ones_like(a, **kwargs) * fill_value.m, fill_value.units, ) - else: - return ( - np.ones_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - * fill_value - ) + + return np.ones_like(a, **kwargs) * fill_value @implements("interp", "function") @@ -677,8 +687,8 @@ def _any(a, *args, **kwargs): # Only valid when multiplicative unit/no offset if a._is_multiplicative: return np.any(a._magnitude, *args, **kwargs) - else: - raise ValueError("Boolean value of Quantity with offset unit is ambiguous.") + + raise ValueError("Boolean value of Quantity with offset unit is ambiguous.") @implements("all", "function") @@ -731,10 +741,77 @@ def _prod(a, *args, **kwargs): return registry.Quantity(result, units) -for name in ["prod", "nanprod"]: +for name in ("prod", "nanprod"): implement_prod_func(name) +# Handle mutliplicative functions separately to deal with non-multiplicative units +def _base_unit_if_needed(a): + if a._is_multiplicative: + return a + else: + if a.units._REGISTRY.autoconvert_offset_to_baseunit: + return a.to_base_units() + else: + raise OffsetUnitCalculusError(a.units) + + +# NP2 Can remove trapz wrapping when we only support numpy>=2 +@implements("trapz", "function") +@implements("trapezoid", "function") +def _trapz(y, x=None, dx=1.0, **kwargs): + trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz + y = _base_unit_if_needed(y) + units = y.units + if x is not None: + if hasattr(x, "units"): + x = _base_unit_if_needed(x) + units *= x.units + x = x._magnitude + ret = trapezoid(y._magnitude, x, **kwargs) + else: + if hasattr(dx, "units"): + dx = _base_unit_if_needed(dx) + units *= dx.units + dx = dx._magnitude + ret = trapezoid(y._magnitude, dx=dx, **kwargs) + + return y.units._REGISTRY.Quantity(ret, units) + + +@implements("correlate", "function") +def _correlate(a, v, mode="valid", **kwargs): + a = _base_unit_if_needed(a) + v = _base_unit_if_needed(v) + units = a.units * v.units + ret = np.correlate(a._magnitude, v._magnitude, mode=mode, **kwargs) + return a.units._REGISTRY.Quantity(ret, units) + + +def implement_mul_func(func): + # If NumPy is not available, do not attempt implement that which does not exist + if np is None: + return + + func = getattr(np, func_str) + + @implements(func_str, "function") + def implementation(a, b, **kwargs): + a = _base_unit_if_needed(a) + units = a.units + if hasattr(b, "units"): + b = _base_unit_if_needed(b) + units *= b.units + b = b._magnitude + + mag = func(a._magnitude, b, **kwargs) + return a.units._REGISTRY.Quantity(mag, units) + + +for func_str in ("cross", "dot"): + implement_mul_func(func_str) + + # Implement simple matching-unit or stripped-unit functions based on signature @@ -781,11 +858,11 @@ def implementation(*args, **kwargs): # Conditionally wrap output if wrap_output: return output_wrap(ret) - else: - return ret + return ret -for func_str, unit_arguments, wrap_output in [ + +for func_str, unit_arguments, wrap_output in ( ("expand_dims", "a", True), ("squeeze", "a", True), ("rollaxis", "a", True), @@ -796,10 +873,12 @@ def implementation(*args, **kwargs): ("ptp", "a", True), ("ravel", "a", True), ("round_", "a", True), + ("round", "a", True), ("sort", "a", True), ("median", "a", True), ("nanmedian", "a", True), ("transpose", "a", True), + ("roll", "a", True), ("copy", "a", True), ("average", "a", True), ("nanmean", "a", True), @@ -816,8 +895,9 @@ def implementation(*args, **kwargs): ("broadcast_to", ["array"], True), ("amax", ["a", "initial"], True), ("amin", ["a", "initial"], True), + ("max", ["a", "initial"], True), + ("min", ["a", "initial"], True), ("searchsorted", ["a", "v"], False), - ("isclose", ["a", "b"], False), ("nan_to_num", ["x", "nan", "posinf", "neginf"], True), ("clip", ["a", "a_min", "a_max"], True), ("append", ["arr", "values"], True), @@ -827,14 +907,46 @@ def implementation(*args, **kwargs): ("lib.stride_tricks.sliding_window_view", "x", True), ("rot90", "m", True), ("insert", ["arr", "values"], True), + ("delete", ["arr"], True), ("resize", "a", True), ("reshape", "a", True), - ("allclose", ["a", "b"], False), ("intersect1d", ["ar1", "ar2"], True), -]: +): implement_consistent_units_by_argument(func_str, unit_arguments, wrap_output) +# implement isclose and allclose +def implement_close(func_str): + if np is None: + return + + func = getattr(np, func_str) + + @implements(func_str, "function") + def implementation(*args, **kwargs): + bound_args = signature(func).bind(*args, **kwargs) + labels = ["a", "b"] + arrays = {label: bound_args.arguments[label] for label in labels} + if "atol" in bound_args.arguments: + atol = bound_args.arguments["atol"] + a = arrays["a"] + if not hasattr(atol, "_REGISTRY") and hasattr(a, "_REGISTRY"): + # always use the units of `a` + atol_ = a._REGISTRY.Quantity(atol, a.units) + else: + atol_ = atol + arrays["atol"] = atol_ + + args, _ = unwrap_and_wrap_consistent_units(*arrays.values()) + for label, value in zip(arrays.keys(), args): + bound_args.arguments[label] = value + + return func(*bound_args.args, **bound_args.kwargs) + + +for func_str in ("isclose", "allclose"): + implement_close(func_str) + # Handle atleast_nd functions @@ -861,7 +973,7 @@ def implementation(*arrays): return output_unit._REGISTRY.Quantity(arrays_magnitude, output_unit) -for func_str in ["atleast_1d", "atleast_2d", "atleast_3d"]: +for func_str in ("atleast_1d", "atleast_2d", "atleast_3d"): implement_atleast_nd(func_str) @@ -882,24 +994,24 @@ def implementation(a, *args, **kwargs): return a._REGISTRY.Quantity(func(a_stripped, *args, **kwargs)) -for func_str in ["cumprod", "cumproduct", "nancumprod"]: +for func_str in ("cumprod", "nancumprod"): implement_single_dimensionless_argument_func(func_str) # Handle single-argument consistent unit functions -for func_str in [ +for func_str in ( "block", "hstack", "vstack", "dstack", "column_stack", "broadcast_arrays", -]: +): implement_func( "function", func_str, input_units="all_consistent", output_unit="match_input" ) # Handle functions that ignore units on input and output -for func_str in [ +for func_str in ( "size", "isreal", "iscomplex", @@ -910,28 +1022,33 @@ def implementation(a, *args, **kwargs): "argsort", "argmin", "argmax", - "alen", "ndim", "nanargmax", "nanargmin", "count_nonzero", "nonzero", "result_type", -]: +): implement_func("function", func_str, input_units=None, output_unit=None) # Handle functions with output unit defined by operation -for func_str in ["std", "nanstd", "sum", "nansum", "cumsum", "nancumsum"]: +for func_str in ( + "std", + "nanstd", + "sum", + "nansum", + "cumsum", + "nancumsum", + "linalg.norm", +): implement_func("function", func_str, input_units=None, output_unit="sum") -for func_str in ["cross", "trapz", "dot"]: - implement_func("function", func_str, input_units=None, output_unit="mul") -for func_str in ["diff", "ediff1d"]: +for func_str in ("diff", "ediff1d"): implement_func("function", func_str, input_units=None, output_unit="delta") -for func_str in ["gradient"]: +for func_str in ("gradient",): implement_func("function", func_str, input_units=None, output_unit="delta,div") -for func_str in ["linalg.solve"]: +for func_str in ("linalg.solve",): implement_func("function", func_str, input_units=None, output_unit="invdiv") -for func_str in ["var", "nanvar"]: +for func_str in ("var", "nanvar"): implement_func("function", func_str, input_units=None, output_unit="variance") @@ -947,7 +1064,7 @@ def numpy_wrap(func_type, func, args, kwargs, types): # ufuncs do not have func.__module__ name = func.__name__ else: - raise ValueError("Invalid func_type {}".format(func_type)) + raise ValueError(f"Invalid func_type {func_type}") if name not in handled or any(is_upcast_type(t) for t in types): return NotImplemented diff --git a/pint/facets/numpy/quantity.py b/pint/facets/numpy/quantity.py index 243610033..75dccec54 100644 --- a/pint/facets/numpy/quantity.py +++ b/pint/facets/numpy/quantity.py @@ -11,11 +11,12 @@ import functools import math import warnings -from typing import Any +from typing import Any, Generic -from ..._typing import Shape, _MagnitudeType -from ...compat import _to_magnitude, np +from ..._typing import Shape +from ...compat import HAS_NUMPY, _to_magnitude, np from ...errors import DimensionalityError, PintTypeError, UnitStrippedWarning +from ..plain import MagnitudeT, PlainQuantity from .numpy_func import ( HANDLED_UFUNCS, copy_units_output_ufuncs, @@ -27,6 +28,16 @@ set_units_ufuncs, ) +try: + import uncertainties.unumpy as unp + from uncertainties import UFloat, ufloat + + HAS_UNCERTAINTIES = True +except ImportError: + unp = np + ufloat = Ufloat = None + HAS_UNCERTAINTIES = False + def method_wraps(numpy_func): if isinstance(numpy_func, str): @@ -40,7 +51,7 @@ def wrapper(func): return wrapper -class NumpyQuantity: +class NumpyQuantity(Generic[MagnitudeT], PlainQuantity[MagnitudeT]): """ """ # NumPy function/ufunc support @@ -52,11 +63,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return NotImplemented # Replicate types from __array_function__ - types = set( + types = { type(arg) for arg in list(inputs) + list(kwargs.values()) if hasattr(arg, "__array_ufunc__") - ) + } return numpy_wrap("ufunc", ufunc, inputs, kwargs, types) @@ -99,19 +110,19 @@ def _numpy_method_wrap(self, func, *args, **kwargs): if output_unit is not None: return self.__class__(value, output_unit) - else: - return value + + return value def __array__(self, t=None) -> np.ndarray: - warnings.warn( - "The unit of the quantity is stripped when downcasting to ndarray.", - UnitStrippedWarning, - stacklevel=2, - ) + if HAS_NUMPY and isinstance(self._magnitude, np.ndarray): + warnings.warn( + "The unit of the quantity is stripped when downcasting to ndarray.", + UnitStrippedWarning, + stacklevel=2, + ) return _to_magnitude(self._magnitude, force_ndarray=True) def clip(self, min=None, max=None, out=None, **kwargs): - if min is not None: if isinstance(min, self.__class__): min = min.to(self).magnitude @@ -129,11 +140,11 @@ def clip(self, min=None, max=None, out=None, **kwargs): raise DimensionalityError("dimensionless", self._units) return self.__class__(self.magnitude.clip(min, max, out, **kwargs), self._units) - def fill(self: NumpyQuantity[np.ndarray], value) -> None: + def fill(self: NumpyQuantity, value) -> None: self._units = value._units return self.magnitude.fill(value.magnitude) - def put(self: NumpyQuantity[np.ndarray], indices, values, mode="raise") -> None: + def put(self: NumpyQuantity, indices, values, mode="raise") -> None: if isinstance(values, self.__class__): values = values.to(self).magnitude elif self.dimensionless: @@ -143,11 +154,11 @@ def put(self: NumpyQuantity[np.ndarray], indices, values, mode="raise") -> None: self.magnitude.put(indices, values, mode) @property - def real(self) -> NumpyQuantity[_MagnitudeType]: + def real(self) -> NumpyQuantity: return self.__class__(self._magnitude.real, self._units) @property - def imag(self) -> NumpyQuantity[_MagnitudeType]: + def imag(self) -> NumpyQuantity: return self.__class__(self._magnitude.imag, self._units) @property @@ -163,6 +174,10 @@ def flat(self): def shape(self) -> Shape: return self._magnitude.shape + @property + def dtype(self): + return self._magnitude.dtype + @shape.setter def shape(self, value): self._magnitude.shape = value @@ -223,6 +238,11 @@ def __getattr__(self, item) -> Any: ) else: raise exc + elif ( + HAS_UNCERTAINTIES and item == "ndim" and isinstance(self._magnitude, UFloat) + ): + # Dimensionality of a single UFloat is 0, like any other scalar + return 0 try: return getattr(self._magnitude, item) @@ -245,7 +265,12 @@ def __getitem__(self, key): def __setitem__(self, key, value): try: - if np.ma.is_masked(value) or math.isnan(value): + # If we're dealing with a masked single value or a nan, set it + if ( + isinstance(self._magnitude, np.ma.MaskedArray) + and np.ma.is_masked(value) + and getattr(value, "size", 0) == 1 + ) or (getattr(value, "ndim", 0) == 0 and math.isnan(value)): self._magnitude[key] = value return except TypeError: diff --git a/pint/facets/numpy/registry.py b/pint/facets/numpy/registry.py index 8ae6088fc..e1128f383 100644 --- a/pint/facets/numpy/registry.py +++ b/pint/facets/numpy/registry.py @@ -9,12 +9,20 @@ from __future__ import annotations -from ..plain import PlainRegistry +from typing import Any, Generic + +from ...compat import TypeAlias +from ..plain import GenericPlainRegistry, QuantityT, UnitT from .quantity import NumpyQuantity from .unit import NumpyUnit -class NumpyRegistry(PlainRegistry): +class GenericNumpyRegistry( + Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT] +): + pass + - _quantity_class = NumpyQuantity - _unit_class = NumpyUnit +class NumpyRegistry(GenericPlainRegistry[NumpyQuantity[Any], NumpyUnit]): + Quantity: TypeAlias = NumpyQuantity[Any] + Unit: TypeAlias = NumpyUnit diff --git a/pint/facets/numpy/unit.py b/pint/facets/numpy/unit.py index fc948534a..d6bf140a2 100644 --- a/pint/facets/numpy/unit.py +++ b/pint/facets/numpy/unit.py @@ -9,10 +9,10 @@ from __future__ import annotations from ...compat import is_upcast_type +from ..plain import PlainUnit -class NumpyUnit: - +class NumpyUnit(PlainUnit): __array_priority__ = 17 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): @@ -21,11 +21,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return NotImplemented # Check types and return NotImplemented when upcast type encountered - types = set( + types = { type(arg) for arg in list(inputs) + list(kwargs.values()) if hasattr(arg, "__array_ufunc__") - ) + } if any(is_upcast_type(other) for other in types): return NotImplemented @@ -39,5 +39,5 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ), **kwargs, ) - else: - return NotImplemented + + return NotImplemented diff --git a/pint/facets/plain/__init__.py b/pint/facets/plain/__init__.py index 211d01781..f84dd68f3 100644 --- a/pint/facets/plain/__init__.py +++ b/pint/facets/plain/__init__.py @@ -19,9 +19,11 @@ UnitDefinition, ) from .objects import PlainQuantity, PlainUnit -from .registry import PlainRegistry +from .quantity import MagnitudeT +from .registry import GenericPlainRegistry, PlainRegistry, QuantityT, UnitT __all__ = [ + "GenericPlainRegistry", "PlainUnit", "PlainQuantity", "PlainRegistry", @@ -31,4 +33,7 @@ "PrefixDefinition", "ScaleConverter", "UnitDefinition", + "QuantityT", + "UnitT", + "MagnitudeT", ] diff --git a/pint/facets/plain/definitions.py b/pint/facets/plain/definitions.py index 11a309515..a43ce0dbc 100644 --- a/pint/facets/plain/definitions.py +++ b/pint/facets/plain/definitions.py @@ -13,9 +13,10 @@ import typing as ty from dataclasses import dataclass from functools import cached_property -from typing import Callable, Optional +from typing import Any from ... import errors +from ..._typing import Magnitude from ...converters import Converter from ...util import UnitsContainer @@ -23,7 +24,7 @@ class NotNumeric(Exception): """Internal exception. Do not expose outside Pint""" - def __init__(self, value): + def __init__(self, value: Any): self.value = value @@ -68,15 +69,19 @@ def items(self): @dataclass(frozen=True) -class PrefixDefinition(errors.WithDefErr): - """Definition of a prefix.""" - +class NamedDefinition: #: name of the prefix name: str + + +@dataclass(frozen=True) +class PrefixDefinition(NamedDefinition, errors.WithDefErr): + """Definition of a prefix.""" + #: scaling value for this prefix value: numbers.Number #: canonical symbol - defined_symbol: Optional[str] = "" + defined_symbol: str | None = "" #: additional names for the same prefix aliases: ty.Tuple[str, ...] = () @@ -89,8 +94,8 @@ def has_symbol(self) -> bool: return bool(self.defined_symbol) @cached_property - def converter(self): - return Converter.from_arguments(scale=self.value) + def converter(self) -> ScaleConverter: + return ScaleConverter(self.value) def __post_init__(self): if not errors.is_valid_prefix_name(self.name): @@ -109,24 +114,29 @@ def __post_init__(self): @dataclass(frozen=True) -class UnitDefinition(errors.WithDefErr): +class UnitDefinition(NamedDefinition, errors.WithDefErr): """Definition of a unit.""" - #: canonical name of the unit - name: str #: canonical symbol - defined_symbol: ty.Optional[str] + defined_symbol: str | None #: additional names for the same unit - aliases: ty.Tuple[str, ...] + aliases: tuple[str, ...] #: A functiont that converts a value in these units into the reference units - converter: ty.Optional[ty.Union[Callable, Converter]] + # TODO: this has changed as converter is now annotated as converter. + # Briefly, in several places converter attributes like as_multiplicative were + # accesed. So having a generic function is a no go. + # I guess this was never used as errors where not raised. + converter: Converter | None #: Reference units. - reference: ty.Optional[UnitsContainer] + reference: UnitsContainer | None def __post_init__(self): if not errors.is_valid_unit_name(self.name): raise self.def_err(errors.MSG_INVALID_UNIT_NAME) + # TODO: check why reference: Optional[UnitsContainer] + assert isinstance(self.reference, UnitsContainer) + if not any(map(errors.is_dim, self.reference.keys())): invalid = tuple( itertools.filterfalse(errors.is_valid_unit_name, self.reference.keys()) @@ -180,14 +190,20 @@ def __post_init__(self): @property def is_base(self) -> bool: """Indicates if it is a base unit.""" + + # TODO: This is set in __post_init__ return self._is_base @property def is_multiplicative(self) -> bool: + # TODO: Check how to avoid this check + assert isinstance(self.converter, Converter) return self.converter.is_multiplicative @property def is_logarithmic(self) -> bool: + # TODO: Check how to avoid this check + assert isinstance(self.converter, Converter) return self.converter.is_logarithmic @property @@ -200,17 +216,14 @@ def has_symbol(self) -> bool: @dataclass(frozen=True) -class DimensionDefinition(errors.WithDefErr): +class DimensionDefinition(NamedDefinition, errors.WithDefErr): """Definition of a root dimension""" - #: name of the dimension - name: str - @property - def is_base(self): + def is_base(self) -> bool: return True - def __post_init__(self): + def __post_init__(self) -> None: if not errors.is_valid_dimension_name(self.name): raise self.def_err(errors.MSG_INVALID_DIMENSION_NAME) @@ -223,7 +236,7 @@ class DerivedDimensionDefinition(DimensionDefinition): reference: UnitsContainer @property - def is_base(self): + def is_base(self) -> bool: return False def __post_init__(self): @@ -272,7 +285,7 @@ class ScaleConverter(Converter): scale: float - def to_reference(self, value, inplace=False): + def to_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: if inplace: value *= self.scale else: @@ -280,7 +293,7 @@ def to_reference(self, value, inplace=False): return value - def from_reference(self, value, inplace=False): + def from_reference(self, value: Magnitude, inplace: bool = False) -> Magnitude: if inplace: value /= self.scale else: diff --git a/pint/facets/plain/objects.py b/pint/facets/plain/objects.py index 5b2837bb4..a868c7f95 100644 --- a/pint/facets/plain/objects.py +++ b/pint/facets/plain/objects.py @@ -11,4 +11,4 @@ from .quantity import PlainQuantity from .unit import PlainUnit, UnitsContainer -__all__ = [PlainUnit, PlainQuantity, UnitsContainer] +__all__ = ["PlainUnit", "PlainQuantity", "UnitsContainer"] diff --git a/pint/facets/plain/qto.py b/pint/facets/plain/qto.py new file mode 100644 index 000000000..9d8b7f611 --- /dev/null +++ b/pint/facets/plain/qto.py @@ -0,0 +1,424 @@ +from __future__ import annotations + +import bisect +import math +import numbers +import warnings +from typing import TYPE_CHECKING + +from ...compat import ( + mip_INF, + mip_INTEGER, + mip_Model, + mip_model, + mip_OptimizationStatus, + mip_xsum, +) +from ...errors import UndefinedBehavior +from ...util import infer_base_unit + +if TYPE_CHECKING: + from ..._typing import UnitLike + from ...util import UnitsContainer + from .quantity import PlainQuantity + + +def _get_reduced_units( + quantity: PlainQuantity, units: UnitsContainer +) -> UnitsContainer: + # loop through individual units and compare to each other unit + # can we do better than a nested loop here? + for unit1, exp in units.items(): + # make sure it wasn't already reduced to zero exponent on prior pass + if unit1 not in units: + continue + for unit2 in units: + # get exponent after reduction + exp = units[unit1] + if unit1 != unit2: + power = quantity._REGISTRY._get_dimensionality_ratio(unit1, unit2) + if power: + units = units.add(unit2, exp / power).remove([unit1]) + break + return units + + +def ito_reduced_units(quantity: PlainQuantity) -> None: + """Return PlainQuantity scaled in place to reduced units, i.e. one unit per + dimension. This will not reduce compound units (e.g., 'J/kg' will not + be reduced to m**2/s**2), nor can it make use of contexts at this time. + """ + + # shortcuts in case we're dimensionless or only a single unit + if quantity.dimensionless: + return quantity.ito({}) + if len(quantity._units) == 1: + return None + + units = quantity._units.copy() + new_units = _get_reduced_units(quantity, units) + + return quantity.ito(new_units) + + +def to_reduced_units( + quantity: PlainQuantity, +) -> PlainQuantity: + """Return PlainQuantity scaled in place to reduced units, i.e. one unit per + dimension. This will not reduce compound units (intentionally), nor + can it make use of contexts at this time. + """ + + # shortcuts in case we're dimensionless or only a single unit + if quantity.dimensionless: + return quantity.to({}) + if len(quantity._units) == 1: + return quantity + + units = quantity._units.copy() + new_units = _get_reduced_units(quantity, units) + + return quantity.to(new_units) + + +def to_compact( + quantity: PlainQuantity, unit: UnitsContainer | None = None +) -> PlainQuantity: + """ "Return PlainQuantity rescaled to compact, human-readable units. + + To get output in terms of a different unit, use the unit parameter. + + + Examples + -------- + + >>> import pint + >>> ureg = pint.UnitRegistry() + >>> (200e-9*ureg.s).to_compact() + + >>> (1e-2*ureg('kg m/s^2')).to_compact('N') + + """ + + if not isinstance(quantity.magnitude, numbers.Number) and not hasattr( + quantity.magnitude, "nominal_value" + ): + warnings.warn( + "to_compact applied to non numerical types has an undefined behavior.", + UndefinedBehavior, + stacklevel=2, + ) + return quantity + + qm = ( + quantity.magnitude + if not hasattr(quantity.magnitude, "nominal_value") + else quantity.magnitude.nominal_value + ) + if quantity.unitless or qm == 0 or math.isnan(qm) or math.isinf(qm): + return quantity + + SI_prefixes: dict[int, str] = {} + for prefix in quantity._REGISTRY._prefixes.values(): + try: + scale = prefix.converter.scale + # Kludgy way to check if this is an SI prefix + log10_scale = int(math.log10(scale)) + if log10_scale == math.log10(scale): + SI_prefixes[log10_scale] = prefix.name + except Exception: + SI_prefixes[0] = "" + + SI_prefixes_list = sorted(SI_prefixes.items()) + SI_powers = [item[0] for item in SI_prefixes_list] + SI_bases = [item[1] for item in SI_prefixes_list] + + if unit is None: + unit = infer_base_unit(quantity, registry=quantity._REGISTRY) + else: + unit = infer_base_unit(quantity.__class__(1, unit), registry=quantity._REGISTRY) + + q_base = quantity.to(unit) + + magnitude = q_base.magnitude + # Support uncertainties + if hasattr(magnitude, "nominal_value"): + magnitude = magnitude.nominal_value + + units = list(q_base._units.items()) + units_numerator = [a for a in units if a[1] > 0] + + if len(units_numerator) > 0: + unit_str, unit_power = units_numerator[0] + else: + unit_str, unit_power = units[0] + + if unit_power > 0: + power = math.floor(math.log10(abs(magnitude)) / float(unit_power) / 3) * 3 + else: + power = math.ceil(math.log10(abs(magnitude)) / float(unit_power) / 3) * 3 + + index = bisect.bisect_left(SI_powers, power) + + if index >= len(SI_bases): + index = -1 + + prefix_str = SI_bases[index] + + new_unit_str = prefix_str + unit_str + new_unit_container = q_base._units.rename(unit_str, new_unit_str) + + return quantity.to(new_unit_container) + + +def to_preferred( + quantity: PlainQuantity, preferred_units: list[UnitLike] | None = None +) -> PlainQuantity: + """Return Quantity converted to a unit composed of the preferred units. + + Examples + -------- + + >>> import pint + >>> ureg = pint.UnitRegistry() + >>> (1*ureg.acre).to_preferred([ureg.meters]) + + >>> (1*(ureg.force_pound*ureg.m)).to_preferred([ureg.W]) + + """ + + units = _get_preferred(quantity, preferred_units) + return quantity.to(units) + + +def ito_preferred( + quantity: PlainQuantity, preferred_units: list[UnitLike] | None = None +) -> PlainQuantity: + """Return Quantity converted to a unit composed of the preferred units. + + Examples + -------- + + >>> import pint + >>> ureg = pint.UnitRegistry() + >>> (1*ureg.acre).to_preferred([ureg.meters]) + + >>> (1*(ureg.force_pound*ureg.m)).to_preferred([ureg.W]) + + """ + + units = _get_preferred(quantity, preferred_units) + return quantity.ito(units) + + +def _get_preferred( + quantity: PlainQuantity, preferred_units: list[UnitLike] | None = None +) -> PlainQuantity: + if preferred_units is None: + preferred_units = quantity._REGISTRY.default_preferred_units + + if not quantity.dimensionality: + return quantity._units.copy() + + # The optimizer isn't perfect, and will sometimes miss obvious solutions. + # This sub-algorithm is less powerful, but always finds the very simple solutions. + def find_simple(): + best_ratio = None + best_unit = None + self_dims = sorted(quantity.dimensionality) + self_exps = [quantity.dimensionality[d] for d in self_dims] + s_exps_head, *s_exps_tail = self_exps + n = len(s_exps_tail) + for preferred_unit in preferred_units: + dims = sorted(preferred_unit.dimensionality) + if dims == self_dims: + p_exps_head, *p_exps_tail = ( + preferred_unit.dimensionality[d] for d in dims + ) + if all( + s_exps_tail[i] * p_exps_head == p_exps_tail[i] ** s_exps_head + for i in range(n) + ): + ratio = p_exps_head / s_exps_head + ratio = max(ratio, 1 / ratio) + if best_ratio is None or ratio < best_ratio: + best_ratio = ratio + best_unit = preferred_unit ** (s_exps_head / p_exps_head) + return best_unit + + simple = find_simple() + if simple is not None: + return simple + + # For each dimension (e.g. T(ime), L(ength), M(ass)), assign a default base unit from + # the collection of base units + + unit_selections = { + base_unit.dimensionality: base_unit + for base_unit in map(quantity._REGISTRY.Unit, quantity._REGISTRY._base_units) + } + + # Override the default unit of each dimension with the 1D-units used in this Quantity + unit_selections.update( + { + unit.dimensionality: unit + for unit in map(quantity._REGISTRY.Unit, quantity._units.keys()) + } + ) + + # Determine the preferred unit for each dimensionality from the preferred_units + # (A prefered unit doesn't have to be only one dimensional, e.g. Watts) + preferred_dims = { + preferred_unit.dimensionality: preferred_unit + for preferred_unit in map(quantity._REGISTRY.Unit, preferred_units) + } + + # Combine the defaults and preferred, favoring the preferred + unit_selections.update(preferred_dims) + + # This algorithm has poor asymptotic time complexity, so first reduce the considered + # dimensions and units to only those that are useful to the problem + + # The dimensions (without powers) of this Quantity + dimension_set = set(quantity.dimensionality) + + # Getting zero exponents in dimensions not in dimension_set can be facilitated + # by units that interact with that dimension and one or more dimension_set members. + # For example MT^1 * LT^-1 lets you get MLT^0 when T is not in dimension_set. + # For each candidate unit that interacts with a dimension_set member, add the + # candidate unit's other dimensions to dimension_set, and repeat until no more + # dimensions are selected. + + discovery_done = False + while not discovery_done: + discovery_done = True + for d in unit_selections: + unit_dimensions = set(d) + intersection = unit_dimensions.intersection(dimension_set) + if 0 < len(intersection) < len(unit_dimensions): + # there are dimensions in this unit that are in dimension set + # and others that are not in dimension set + dimension_set = dimension_set.union(unit_dimensions) + discovery_done = False + break + + # filter out dimensions and their unit selections that don't interact with any + # dimension_set members + unit_selections = { + dimensionality: unit + for dimensionality, unit in unit_selections.items() + if set(dimensionality).intersection(dimension_set) + } + + # update preferred_units with the selected units that were originally preferred + preferred_units = list( + {u for d, u in unit_selections.items() if d in preferred_dims} + ) + preferred_units.sort(key=str) # for determinism + + # and unpreferred_units are the selected units that weren't originally preferred + unpreferred_units = list( + {u for d, u in unit_selections.items() if d not in preferred_dims} + ) + unpreferred_units.sort(key=str) # for determinism + + # for indexability + dimensions = list(dimension_set) + dimensions.sort() # for determinism + + # the powers for each elemet of dimensions (the list) for this Quantity + dimensionality = [quantity.dimensionality[dimension] for dimension in dimensions] + + # Now that the input data is minimized, setup the optimization problem + + # use mip to select units from preferred units + + model = mip_Model() + model.verbose = 0 + + # Make one variable for each candidate unit + + vars = [ + model.add_var(str(unit), lb=-mip_INF, ub=mip_INF, var_type=mip_INTEGER) + for unit in (preferred_units + unpreferred_units) + ] + + # where [u1 ... uN] are powers of N candidate units (vars) + # and [d1(uI) ... dK(uI)] are the K dimensional exponents of candidate unit I + # and [t1 ... tK] are the dimensional exponents of the quantity (quantity) + # create the following constraints + # + # ⎡ d1(u1) ⋯ dK(u1) ⎤ + # [ u1 ⋯ uN ] * ⎢ ⋮ ⋱ ⎢ = [ t1 ⋯ tK ] + # ⎣ d1(uN) dK(uN) ⎦ + # + # in English, the units we choose, and their exponents, when combined, must have the + # target dimensionality + + matrix = [ + [preferred_unit.dimensionality[dimension] for dimension in dimensions] + for preferred_unit in (preferred_units + unpreferred_units) + ] + + # Do the matrix multiplication with mip_model.xsum for performance and create constraints + for i in range(len(dimensions)): + dot = mip_model.xsum([var * vector[i] for var, vector in zip(vars, matrix)]) + # add constraint to the model + model += dot == dimensionality[i] + + # where [c1 ... cN] are costs, 1 when a preferred variable, and a large value when not + # minimize sum(abs(u1) * c1 ... abs(uN) * cN) + + # linearize the optimization variable via a proxy + objective = model.add_var("objective", lb=0, ub=mip_INF, var_type=mip_INTEGER) + + # Constrain the objective to be equal to the sums of the absolute values of the preferred + # unit powers. Do this by making a separate constraint for each permutation of signedness. + # Also apply the cost coefficient, which causes the output to prefer the preferred units + + # prefer units that interact with fewer dimensions + cost = [len(p.dimensionality) for p in preferred_units] + + # set the cost for non preferred units to a higher number + bias = ( + max(map(abs, dimensionality)) * max((1, *cost)) * 10 + ) # arbitrary, just needs to be larger + cost.extend([bias] * len(unpreferred_units)) + + for i in range(1 << len(vars)): + sum = mip_xsum( + [ + (-1 if i & 1 << (len(vars) - j - 1) else 1) * cost[j] * var + for j, var in enumerate(vars) + ] + ) + model += objective >= sum + + model.objective = objective + + # run the mips minimizer and extract the result if successful + if model.optimize() == mip_OptimizationStatus.OPTIMAL: + optimal_units = [] + min_objective = float("inf") + for i in range(model.num_solutions): + if model.objective_values[i] < min_objective: + min_objective = model.objective_values[i] + optimal_units.clear() + elif model.objective_values[i] > min_objective: + continue + + temp_unit = quantity._REGISTRY.Unit("") + for var in vars: + if var.xi(i): + temp_unit *= quantity._REGISTRY.Unit(var.name) ** var.xi(i) + optimal_units.append(temp_unit) + + sorting_keys = {tuple(sorted(unit._units)): unit for unit in optimal_units} + min_key = sorted(sorting_keys)[0] + result_unit = sorting_keys[min_key] + + return result_unit + + # for whatever reason, a solution wasn't found + # return the original quantity + return quantity._units.copy() diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index 314cc3a30..70b5b57cc 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -1,42 +1,33 @@ """ - pint.facets.plain.quantity - ~~~~~~~~~~~~~~~~~~~~~~~~~ +pint.facets.plain.quantity +~~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: 2022 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. +:copyright: 2022 by Pint Authors, see AUTHORS for more details. +:license: BSD, see LICENSE for more details. """ from __future__ import annotations -import bisect import copy import datetime import locale -import math import numbers import operator -import warnings +from collections.abc import Callable, Iterable, Iterator, Sequence from typing import ( TYPE_CHECKING, Any, - Callable, - Dict, Generic, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, TypeVar, - Union, overload, ) -from ..._typing import S, UnitLike, _MagnitudeType +from ..._typing import Magnitude, QuantityOrUnitLike, Scalar, UnitLike from ...compat import ( HAS_NUMPY, + Self, _to_magnitude, + deprecated, eq, is_duck_array_type, is_upcast_type, @@ -48,38 +39,46 @@ PrettyIPython, SharedRegistryObject, UnitsContainer, - infer_base_unit, logger, to_units_container, ) +from . import qto from .definitions import UnitDefinition if TYPE_CHECKING: from ..context import Context - from .unit import Unit + from .unit import PlainUnit as Unit from .unit import UnitsContainer as UnitsContainerT if HAS_NUMPY: import numpy as np # noqa +try: + import uncertainties.unumpy as unp + from uncertainties import UFloat, ufloat -def reduce_dimensions(f): - def wrapped(self, *args, **kwargs): - result = f(self, *args, **kwargs) - try: - if result._REGISTRY.auto_reduce_dimensions: - return result.to_reduced_units() - else: - return result - except AttributeError: - return result + HAS_UNCERTAINTIES = True +except ImportError: + unp = np + ufloat = Ufloat = None + HAS_UNCERTAINTIES = False - return wrapped + +MagnitudeT = TypeVar("MagnitudeT", bound=Magnitude) +ScalarT = TypeVar("ScalarT", bound=Scalar) + +T = TypeVar("T", bound=Magnitude) def ireduce_dimensions(f): def wrapped(self, *args, **kwargs): result = f(self, *args, **kwargs) + try: + if result._REGISTRY.autoconvert_to_preferred: + result.ito_preferred() + except AttributeError: + pass + try: if result._REGISTRY.auto_reduce_dimensions: result.ito_reduced_units() @@ -116,14 +115,10 @@ def wrapper(func): return wrapper -# Workaround to bypass dynamically generated PlainQuantity with overload method -Magnitude = TypeVar("Magnitude") - - # TODO: remove all nonmultiplicative remnants -class PlainQuantity(PrettyIPython, SharedRegistryObject, Generic[_MagnitudeType]): +class PlainQuantity(Generic[MagnitudeT], PrettyIPython, SharedRegistryObject): """Implements a class to describe a physical quantity: the product of a numerical value and a unit of measurement. @@ -139,14 +134,14 @@ class PlainQuantity(PrettyIPython, SharedRegistryObject, Generic[_MagnitudeType] """ - #: Default formatting string. - default_format: str = "" - _magnitude: _MagnitudeType + _magnitude: MagnitudeT @property def ndim(self) -> int: if isinstance(self.magnitude, numbers.Number): return 0 + if str(type(self.magnitude)) == "NAType": + return 0 return self.magnitude.ndim @property @@ -157,11 +152,7 @@ def force_ndarray(self) -> bool: def force_ndarray_like(self) -> bool: return self._REGISTRY.force_ndarray_like - @property - def UnitsContainer(self) -> Callable[..., UnitsContainerT]: - return self._REGISTRY.UnitsContainer - - def __reduce__(self) -> tuple: + def __reduce__(self) -> tuple[type, Magnitude, UnitsContainer]: """Allow pickling quantities. Since UnitRegistries are not pickled, upon unpickling the new object is always attached to the application registry. """ @@ -169,30 +160,29 @@ def __reduce__(self) -> tuple: # Note: type(self) would be a mistake as subclasses built by # dinamically can't be pickled + # TODO: Check if this is still the case. return _unpickle_quantity, (PlainQuantity, self.magnitude, self._units) @overload def __new__( - cls, value: str, units: Optional[UnitLike] = None - ) -> PlainQuantity[Magnitude]: + cls, value: MagnitudeT, units: UnitLike | None = None + ) -> PlainQuantity[MagnitudeT]: ... @overload - def __new__( # type: ignore[misc] - cls, value: Sequence, units: Optional[UnitLike] = None - ) -> PlainQuantity[np.ndarray]: + def __new__(cls, value: str, units: UnitLike | None = None) -> PlainQuantity[Any]: ... @overload - def __new__( - cls, value: PlainQuantity[Magnitude], units: Optional[UnitLike] = None - ) -> PlainQuantity[Magnitude]: + def __new__( # type: ignore[misc] + cls, value: Sequence[ScalarT], units: UnitLike | None = None + ) -> PlainQuantity[Any]: ... @overload def __new__( - cls, value: Magnitude, units: Optional[UnitLike] = None - ) -> PlainQuantity[Magnitude]: + cls, value: PlainQuantity[Any], units: UnitLike | None = None + ) -> PlainQuantity[Any]: ... def __new__(cls, value, units=None): @@ -244,7 +234,7 @@ def __new__(cls, value, units=None): return inst - def __iter__(self: PlainQuantity[Iterable[S]]) -> Iterator[S]: + def __iter__(self: PlainQuantity[MagnitudeT]) -> Iterator[Any]: # Make sure that, if self.magnitude is not iterable, we raise TypeError as soon # as one calls iter(self) without waiting for the first element to be drawn from # the iterator @@ -256,46 +246,61 @@ def it_outer(): return it_outer() - def __copy__(self) -> PlainQuantity[_MagnitudeType]: + def __copy__(self) -> PlainQuantity[MagnitudeT]: ret = self.__class__(copy.copy(self._magnitude), self._units) return ret - def __deepcopy__(self, memo) -> PlainQuantity[_MagnitudeType]: + def __deepcopy__(self, memo) -> PlainQuantity[MagnitudeT]: ret = self.__class__( copy.deepcopy(self._magnitude, memo), copy.deepcopy(self._units, memo) ) return ret + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.format_quantity_babel" + ) + def format_babel(self, spec: str = "", **kwspec: Any) -> str: + return self._REGISTRY.formatter.format_quantity_babel(self, spec, **kwspec) + + def __format__(self, spec: str) -> str: + return self._REGISTRY.formatter.format_quantity(self, spec) + def __str__(self) -> str: - return str(self.magnitude) + " " + str(self.units) + return self._REGISTRY.formatter.format_quantity(self) def __bytes__(self) -> bytes: return str(self).encode(locale.getpreferredencoding()) def __repr__(self) -> str: - if isinstance(self._magnitude, float): + if HAS_UNCERTAINTIES: + if isinstance(self._magnitude, UFloat): + return f"" + else: + return f"" + elif isinstance(self._magnitude, float): return f"" - else: - return f"" + + return f"" def __hash__(self) -> int: self_base = self.to_base_units() if self_base.dimensionless: return hash(self_base.magnitude) - else: - return hash((self_base.__class__, self_base.magnitude, self_base.units)) + + return hash((self_base.__class__, self_base.magnitude, self_base.units)) @property - def magnitude(self) -> _MagnitudeType: + def magnitude(self) -> MagnitudeT: """PlainQuantity's magnitude. Long form for `m`""" return self._magnitude @property - def m(self) -> _MagnitudeType: + def m(self) -> MagnitudeT: """PlainQuantity's magnitude. Short form for `magnitude`""" return self._magnitude - def m_as(self, units) -> _MagnitudeType: + def m_as(self, units) -> MagnitudeT: """PlainQuantity's magnitude expressed in particular units. Parameters @@ -310,12 +315,12 @@ def m_as(self, units) -> _MagnitudeType: return self.to(units).magnitude @property - def units(self) -> "Unit": + def units(self) -> Unit: """PlainQuantity's units. Long form for `u`""" return self._REGISTRY.Unit(self._units) @property - def u(self) -> "Unit": + def u(self) -> Unit: """PlainQuantity's units. Short form for `units`""" return self._REGISTRY.Unit(self._units) @@ -324,6 +329,10 @@ def unitless(self) -> bool: """ """ return not bool(self.to_root_units()._units) + def unit_items(self) -> Iterable[tuple[str, Scalar]]: + """A view of the unit items.""" + return self._units.unit_items() + @property def dimensionless(self) -> bool: """ """ @@ -331,7 +340,7 @@ def dimensionless(self) -> bool: return not bool(tmp.dimensionality) - _dimensionality: Optional[UnitsContainerT] = None + _dimensionality: UnitsContainerT | None = None @property def dimensionality(self) -> UnitsContainerT: @@ -352,8 +361,8 @@ def check(self, dimension: UnitLike) -> bool: @classmethod def from_list( - cls, quant_list: List[PlainQuantity], units=None - ) -> PlainQuantity[np.ndarray]: + cls, quant_list: list[PlainQuantity[MagnitudeT]], units=None + ) -> PlainQuantity[MagnitudeT]: """Transforms a list of Quantities into an numpy.array quantity. If no units are specified, the unit of the first element will be used. Same as from_sequence. @@ -376,8 +385,8 @@ def from_list( @classmethod def from_sequence( - cls, seq: Sequence[PlainQuantity], units=None - ) -> PlainQuantity[np.ndarray]: + cls, seq: Sequence[PlainQuantity[MagnitudeT]], units=None + ) -> PlainQuantity[MagnitudeT]: """Transforms a sequence of Quantities into an numpy.array quantity. If no units are specified, the unit of the first element will be used. @@ -415,7 +424,7 @@ def from_sequence( def from_tuple(cls, tup): return cls(tup[0], cls._REGISTRY.UnitsContainer(tup[1])) - def to_tuple(self) -> Tuple[_MagnitudeType, Tuple[Tuple[str]]]: + def to_tuple(self) -> tuple[MagnitudeT, tuple[tuple[str, ...]]]: return self.m, tuple(self._units.items()) def compatible_units(self, *contexts): @@ -426,7 +435,7 @@ def compatible_units(self, *contexts): return self._REGISTRY.get_compatible_units(self._units) def is_compatible_with( - self, other: Any, *contexts: Union[str, Context], **ctx_kwargs: Any + self, other: Any, *contexts: str | Context, **ctx_kwargs: Any ) -> bool: """check if the other object is compatible @@ -482,7 +491,9 @@ def _convert_magnitude(self, other, *contexts, **ctx_kwargs): inplace=is_duck_array_type(type(self._magnitude)), ) - def ito(self, other=None, *contexts, **ctx_kwargs) -> None: + def ito( + self, other: QuantityOrUnitLike | None = None, *contexts, **ctx_kwargs + ) -> None: """Inplace rescale to different units. Parameters @@ -494,6 +505,7 @@ def ito(self, other=None, *contexts, **ctx_kwargs) -> None: **ctx_kwargs : Values for the Context/s """ + other = to_units_container(other, self._REGISTRY) self._magnitude = self._convert_magnitude(other, *contexts, **ctx_kwargs) @@ -501,7 +513,9 @@ def ito(self, other=None, *contexts, **ctx_kwargs) -> None: return None - def to(self, other=None, *contexts, **ctx_kwargs) -> PlainQuantity[_MagnitudeType]: + def to( + self, other: QuantityOrUnitLike | None = None, *contexts, **ctx_kwargs + ) -> Self: """Return PlainQuantity rescaled to different units. Parameters @@ -533,7 +547,7 @@ def ito_root_units(self) -> None: return None - def to_root_units(self) -> PlainQuantity[_MagnitudeType]: + def to_root_units(self) -> PlainQuantity[MagnitudeT]: """Return PlainQuantity rescaled to root units.""" _, other = self._REGISTRY._get_root_units(self._units) @@ -552,7 +566,7 @@ def ito_base_units(self) -> None: return None - def to_base_units(self) -> PlainQuantity[_MagnitudeType]: + def to_base_units(self) -> PlainQuantity[MagnitudeT]: """Return PlainQuantity rescaled to plain units.""" _, other = self._REGISTRY._get_base_units(self._units) @@ -561,139 +575,14 @@ def to_base_units(self) -> PlainQuantity[_MagnitudeType]: return self.__class__(magnitude, other) - def _get_reduced_units(self, units): - # loop through individual units and compare to each other unit - # can we do better than a nested loop here? - for unit1, exp in units.items(): - # make sure it wasn't already reduced to zero exponent on prior pass - if unit1 not in units: - continue - for unit2 in units: - # get exponent after reduction - exp = units[unit1] - if unit1 != unit2: - power = self._REGISTRY._get_dimensionality_ratio(unit1, unit2) - if power: - units = units.add(unit2, exp / power).remove([unit1]) - break - return units - - def ito_reduced_units(self) -> None: - """Return PlainQuantity scaled in place to reduced units, i.e. one unit per - dimension. This will not reduce compound units (e.g., 'J/kg' will not - be reduced to m**2/s**2), nor can it make use of contexts at this time. - """ - - # shortcuts in case we're dimensionless or only a single unit - if self.dimensionless: - return self.ito({}) - if len(self._units) == 1: - return None - - units = self._units.copy() - new_units = self._get_reduced_units(units) - - return self.ito(new_units) - - def to_reduced_units(self) -> PlainQuantity[_MagnitudeType]: - """Return PlainQuantity scaled in place to reduced units, i.e. one unit per - dimension. This will not reduce compound units (intentionally), nor - can it make use of contexts at this time. - """ - - # shortcuts in case we're dimensionless or only a single unit - if self.dimensionless: - return self.to({}) - if len(self._units) == 1: - return self - - units = self._units.copy() - new_units = self._get_reduced_units(units) - - return self.to(new_units) - - def to_compact(self, unit=None) -> PlainQuantity[_MagnitudeType]: - """ "Return PlainQuantity rescaled to compact, human-readable units. - - To get output in terms of a different unit, use the unit parameter. - - - Examples - -------- - - >>> import pint - >>> ureg = pint.UnitRegistry() - >>> (200e-9*ureg.s).to_compact() - - >>> (1e-2*ureg('kg m/s^2')).to_compact('N') - - """ - - if not isinstance(self.magnitude, numbers.Number): - msg = ( - "to_compact applied to non numerical types " - "has an undefined behavior." - ) - w = RuntimeWarning(msg) - warnings.warn(w, stacklevel=2) - return self - - if ( - self.unitless - or self.magnitude == 0 - or math.isnan(self.magnitude) - or math.isinf(self.magnitude) - ): - return self - - SI_prefixes: Dict[int, str] = {} - for prefix in self._REGISTRY._prefixes.values(): - try: - scale = prefix.converter.scale - # Kludgy way to check if this is an SI prefix - log10_scale = int(math.log10(scale)) - if log10_scale == math.log10(scale): - SI_prefixes[log10_scale] = prefix.name - except Exception: - SI_prefixes[0] = "" - - SI_prefixes_list = sorted(SI_prefixes.items()) - SI_powers = [item[0] for item in SI_prefixes_list] - SI_bases = [item[1] for item in SI_prefixes_list] - - if unit is None: - unit = infer_base_unit(self, registry=self._REGISTRY) - else: - unit = infer_base_unit(self.__class__(1, unit), registry=self._REGISTRY) - - q_base = self.to(unit) - - magnitude = q_base.magnitude - - units = list(q_base._units.items()) - units_numerator = [a for a in units if a[1] > 0] - - if len(units_numerator) > 0: - unit_str, unit_power = units_numerator[0] - else: - unit_str, unit_power = units[0] - - if unit_power > 0: - power = math.floor(math.log10(abs(magnitude)) / float(unit_power) / 3) * 3 - else: - power = math.ceil(math.log10(abs(magnitude)) / float(unit_power) / 3) * 3 - - index = bisect.bisect_left(SI_powers, power) - - if index >= len(SI_bases): - index = -1 - - prefix_str = SI_bases[index] - - new_unit_str = prefix_str + unit_str - new_unit_container = q_base._units.rename(unit_str, new_unit_str) - - return self.to(new_unit_container) + # Functions not essential to a Quantity but it is + # convenient that they live in PlainQuantity. + # They are implemented elsewhere to keep Quantity class clean. + to_compact = qto.to_compact + to_preferred = qto.to_preferred + ito_preferred = qto.ito_preferred + to_reduced_units = qto.to_reduced_units + ito_reduced_units = qto.ito_reduced_units # Mathematical operations def __int__(self) -> int: @@ -932,6 +821,25 @@ def _add_sub(self, other, op): tu = other._units.rename(other_non_mul_unit, "delta_" + other_non_mul_unit) magnitude = op(self._convert_magnitude_not_inplace(tu), other._magnitude) units = other._units + elif ( + self._REGISTRY.logarithmic_math + and op == operator.add + and len(self_non_mul_units) == 1 + and len(other_non_mul_units) == 1 + and getattr( + self._get_unit_definition(self_non_mul_units[0]), + "is_logarithmic", + False, + ) + and getattr( + other._get_unit_definition(other_non_mul_units[0]), + "is_logarithmic", + False, + ) + ): + return (self.to_base_units() + other.to_base_units()).to( + self.units + ) # logarithmic addition: converts logarithmic unit to dimensionless and converts back to the unit of the self else: raise OffsetUnitCalculusError(self._units, other._units) @@ -942,7 +850,7 @@ def __iadd__(self, other: datetime.datetime) -> datetime.timedelta: # type: ign ... @overload - def __iadd__(self, other) -> PlainQuantity[_MagnitudeType]: + def __iadd__(self, other) -> PlainQuantity[MagnitudeT]: ... def __iadd__(self, other): @@ -950,22 +858,22 @@ def __iadd__(self, other): return self.to_timedelta() + other elif is_duck_array_type(type(self._magnitude)): return self._iadd_sub(other, operator.iadd) - else: - return self._add_sub(other, operator.add) + + return self._add_sub(other, operator.add) def __add__(self, other): if isinstance(other, datetime.datetime): return self.to_timedelta() + other - else: - return self._add_sub(other, operator.add) + + return self._add_sub(other, operator.add) __radd__ = __add__ def __isub__(self, other): if is_duck_array_type(type(self._magnitude)): return self._iadd_sub(other, operator.isub) - else: - return self._add_sub(other, operator.sub) + + return self._add_sub(other, operator.sub) def __sub__(self, other): return self._add_sub(other, operator.sub) @@ -973,8 +881,8 @@ def __sub__(self, other): def __rsub__(self, other): if isinstance(other, datetime.datetime): return other - self.to_timedelta() - else: - return -self._add_sub(other, operator.sub) + + return -self._add_sub(other, operator.sub) @check_implemented @ireduce_dimensions @@ -1004,14 +912,13 @@ def _imul_div(self, other, magnitude_op, units_op=None): no_offset_units_self = len(offset_units_self) if not self._check(other): - if not self._ok_for_muldiv(no_offset_units_self): raise OffsetUnitCalculusError(self._units, getattr(other, "units", "")) if len(offset_units_self) == 1: - if self._units[offset_units_self[0]] != 1 or magnitude_op not in [ + if self._units[offset_units_self[0]] != 1 or magnitude_op not in ( operator.mul, operator.imul, - ]: + ): raise OffsetUnitCalculusError( self._units, getattr(other, "units", "") ) @@ -1032,14 +939,14 @@ def _imul_div(self, other, magnitude_op, units_op=None): if not self._ok_for_muldiv(no_offset_units_self): raise OffsetUnitCalculusError(self._units, other._units) - elif no_offset_units_self == 1 and len(self._units) == 1: + elif no_offset_units_self == len(self._units) == 1: self.ito_root_units() no_offset_units_other = len(other._get_non_multiplicative_units()) if not other._ok_for_muldiv(no_offset_units_other): raise OffsetUnitCalculusError(self._units, other._units) - elif no_offset_units_other == 1 and len(other._units) == 1: + elif no_offset_units_other == len(other._units) == 1: other.ito_root_units() self._magnitude = magnitude_op(self._magnitude, other._magnitude) @@ -1074,14 +981,13 @@ def _mul_div(self, other, magnitude_op, units_op=None): no_offset_units_self = len(offset_units_self) if not self._check(other): - if not self._ok_for_muldiv(no_offset_units_self): raise OffsetUnitCalculusError(self._units, getattr(other, "units", "")) if len(offset_units_self) == 1: - if self._units[offset_units_self[0]] != 1 or magnitude_op not in [ + if self._units[offset_units_self[0]] != 1 or magnitude_op not in ( operator.mul, operator.imul, - ]: + ): raise OffsetUnitCalculusError( self._units, getattr(other, "units", "") ) @@ -1106,14 +1012,14 @@ def _mul_div(self, other, magnitude_op, units_op=None): if not self._ok_for_muldiv(no_offset_units_self): raise OffsetUnitCalculusError(self._units, other._units) - elif no_offset_units_self == 1 and len(self._units) == 1: + elif no_offset_units_self == len(self._units) == 1: new_self = self.to_root_units() no_offset_units_other = len(other._get_non_multiplicative_units()) if not other._ok_for_muldiv(no_offset_units_other): raise OffsetUnitCalculusError(self._units, other._units) - elif no_offset_units_other == 1 and len(other._units) == 1: + elif no_offset_units_other == len(other._units) == 1: other = other.to_root_units() magnitude = magnitude_op(new_self._magnitude, other._magnitude) @@ -1124,8 +1030,8 @@ def _mul_div(self, other, magnitude_op, units_op=None): def __imul__(self, other): if is_duck_array_type(type(self._magnitude)): return self._imul_div(other, operator.imul) - else: - return self._mul_div(other, operator.mul) + + return self._mul_div(other, operator.mul) def __mul__(self, other): return self._mul_div(other, operator.mul) @@ -1137,13 +1043,23 @@ def __matmul__(self, other): __rmatmul__ = __matmul__ + def _truedivide_cast_int(self, a, b): + t = self._REGISTRY.non_int_type + if isinstance(a, int): + a = t(a) + if isinstance(b, int): + b = t(b) + return operator.truediv(a, b) + def __itruediv__(self, other): if is_duck_array_type(type(self._magnitude)): return self._imul_div(other, operator.itruediv) - else: - return self._mul_div(other, operator.truediv) + + return self._mul_div(other, operator.truediv) def __truediv__(self, other): + if isinstance(self.m, int) or isinstance(getattr(other, "m", None), int): + return self._mul_div(other, self._truedivide_cast_int, operator.truediv) return self._mul_div(other, operator.truediv) def __rtruediv__(self, other): @@ -1159,7 +1075,7 @@ def __rtruediv__(self, other): no_offset_units_self = len(self._get_non_multiplicative_units()) if not self._ok_for_muldiv(no_offset_units_self): raise OffsetUnitCalculusError(self._units, "") - elif no_offset_units_self == 1 and len(self._units) == 1: + elif no_offset_units_self == len(self._units) == 1: self = self.to_root_units() return self.__class__(other_magnitude / self._magnitude, 1 / self._units) @@ -1310,7 +1226,7 @@ def __ipow__(self, other): return self @check_implemented - def __pow__(self, other) -> PlainQuantity[_MagnitudeType]: + def __pow__(self, other) -> PlainQuantity[MagnitudeT]: try: _to_magnitude(other, self.force_ndarray, self.force_ndarray_like) except PintTypeError: @@ -1375,7 +1291,7 @@ def __pow__(self, other) -> PlainQuantity[_MagnitudeType]: return self.__class__(magnitude, units) @check_implemented - def __rpow__(self, other) -> PlainQuantity[_MagnitudeType]: + def __rpow__(self, other) -> PlainQuantity[MagnitudeT]: try: _to_magnitude(other, self.force_ndarray, self.force_ndarray_like) except PintTypeError: @@ -1388,16 +1304,16 @@ def __rpow__(self, other) -> PlainQuantity[_MagnitudeType]: new_self = self.to_root_units() return other**new_self._magnitude - def __abs__(self) -> PlainQuantity[_MagnitudeType]: + def __abs__(self) -> PlainQuantity[MagnitudeT]: return self.__class__(abs(self._magnitude), self._units) - def __round__(self, ndigits: Optional[int] = 0) -> PlainQuantity[int]: - return self.__class__(round(self._magnitude, ndigits=ndigits), self._units) + def __round__(self, ndigits: int | None = None) -> PlainQuantity[int]: + return self.__class__(round(self._magnitude, ndigits), self._units) - def __pos__(self) -> PlainQuantity[_MagnitudeType]: + def __pos__(self) -> PlainQuantity[MagnitudeT]: return self.__class__(operator.pos(self._magnitude), self._units) - def __neg__(self) -> PlainQuantity[_MagnitudeType]: + def __neg__(self) -> PlainQuantity[MagnitudeT]: return self.__class__(operator.neg(self._magnitude), self._units) @check_implemented @@ -1417,6 +1333,9 @@ def bool_result(value): # We compare to the plain class of PlainQuantity because # each PlainQuantity class is unique. if not isinstance(other, PlainQuantity): + if other is None: + # A loop in pandas-dev/pandas/core/common.py(86)consensus_name_attr() can result in OTHER being None + return bool_result(False) if zero_or_nan(other, True): # Handle the special case in which we compare to zero or NaN # (or an array of zeros or NaNs) @@ -1484,9 +1403,7 @@ def compare(self, other, op): else: raise OffsetUnitCalculusError(self._units) else: - raise ValueError( - "Cannot compare PlainQuantity and {}".format(type(other)) - ) + raise ValueError(f"Cannot compare PlainQuantity and {type(other)}") # Registry equality check based on util.SharedRegistryObject if self._REGISTRY is not other._REGISTRY: @@ -1555,11 +1472,11 @@ def _is_multiplicative(self) -> bool: """Check if the PlainQuantity object has only multiplicative units.""" return True - def _get_non_multiplicative_units(self) -> List[str]: + def _get_non_multiplicative_units(self) -> list[str]: """Return a list of the of non-multiplicative units of the PlainQuantity object.""" return [] - def _get_delta_units(self) -> List[str]: + def _get_delta_units(self) -> list[str]: """Return list of delta units ot the PlainQuantity object.""" return [u for u in self._units if u.startswith("delta_")] @@ -1570,5 +1487,14 @@ def _has_compatible_delta(self, unit: str) -> bool: def _ok_for_muldiv(self, no_offset_units=None) -> bool: return True - def to_timedelta(self: PlainQuantity[float]) -> datetime.timedelta: + def to_timedelta(self: PlainQuantity[MagnitudeT]) -> datetime.timedelta: return datetime.timedelta(microseconds=self.to("microseconds").magnitude) + + # We put this last to avoid overriding UnitsContainer + # and I do not want to rename it. + # TODO: Maybe in the future we need to change it to a more meaningful + # non-colliding name. + + @property + def UnitsContainer(self) -> Callable[..., UnitsContainerT]: + return self._REGISTRY.UnitsContainer diff --git a/pint/facets/plain/registry.py b/pint/facets/plain/registry.py index ffa6fb43e..482992f31 100644 --- a/pint/facets/plain/registry.py +++ b/pint/facets/plain/registry.py @@ -4,6 +4,21 @@ :copyright: 2022 by Pint Authors, see AUTHORS for more details. :license: BSD, see LICENSE for more details. + + The registry contains the following important methods: + + - parse_unit_name: Parse a unit to identify prefix, unit name and suffix + by walking the list of prefix and suffix. + Result is cached: NO + - parse_units: Parse a units expression and returns a UnitContainer with + the canonical names. + The expression can only contain products, ratios and powers of units; + prefixed units and pluralized units. + Result is cached: YES + - parse_expression: Parse a mathematical expression including units and + return a quantity object. + Result is cached: NO + """ from __future__ import annotations @@ -12,46 +27,49 @@ import functools import inspect import itertools -import locale import pathlib import re from collections import defaultdict +from collections.abc import Callable, Generator, Iterable, Iterator from decimal import Decimal from fractions import Fraction -from numbers import Number from token import NAME, NUMBER +from tokenize import TokenInfo from typing import ( TYPE_CHECKING, Any, - Callable, - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Set, - Tuple, - Type, + Generic, TypeVar, Union, ) if TYPE_CHECKING: + from ...compat import Locale from ..context import Context - from pint import Quantity, Unit -from ..._typing import QuantityOrUnitLike, UnitLike -from ..._vendor import appdirs -from ...compat import HAS_BABEL, babel_parse, tokenizer -from ...errors import DimensionalityError, RedefinitionError, UndefinedUnitError + # from ..._typing import Quantity, Unit + +import platformdirs + +from ... import pint_eval +from ..._typing import ( + Handler, + QuantityArgument, + QuantityOrUnitLike, + Scalar, + UnitLike, +) +from ...compat import Self, TypeAlias, deprecated +from ...errors import ( + DimensionalityError, + OffsetUnitCalculusError, + RedefinitionError, + UndefinedUnitError, +) from ...pint_eval import build_eval_tree -from ...util import ParserHelper -from ...util import UnitsContainer -from ...util import UnitsContainer as UnitsContainerT from ...util import ( + ParserHelper, _is_dim, - build_dependent_class, create_class_with_registry, getattr_maybe_raise, logger, @@ -59,34 +77,29 @@ string_preprocessor, to_units_container, ) +from ...util import UnitsContainer as UnitsContainer from .definitions import ( AliasDefinition, CommentDefinition, DefaultsDefinition, DerivedDimensionDefinition, DimensionDefinition, + NamedDefinition, PrefixDefinition, UnitDefinition, ) from .objects import PlainQuantity, PlainUnit -if TYPE_CHECKING: - - if HAS_BABEL: - import babel - - Locale = babel.Locale - else: - Locale = None - T = TypeVar("T") _BLOCK_RE = re.compile(r"[ (]") -@functools.lru_cache() -def pattern_to_regex(pattern): - if hasattr(pattern, "finditer"): +@functools.lru_cache +def pattern_to_regex(pattern: str | re.Pattern[str]) -> re.Pattern[str]: + # TODO: This has been changed during typing improvements. + # if hasattr(pattern, "finditer"): + if not isinstance(pattern, str): pattern = pattern.pattern # Replace "{unit_name}" match string with float regex with unit_name as group @@ -97,7 +110,7 @@ def pattern_to_regex(pattern): return re.compile(pattern) -NON_INT_TYPE = Type[Union[float, Decimal, Fraction]] +NON_INT_TYPE = type[Union[float, Decimal, Fraction]] PreprocessorType = Callable[[str], str] @@ -106,15 +119,23 @@ class RegistryCache: def __init__(self) -> None: #: Maps dimensionality (UnitsContainer) to Units (str) - self.dimensional_equivalents: Dict[UnitsContainer, Set[str]] = {} + self.dimensional_equivalents: dict[UnitsContainer, frozenset[str]] = {} + #: Maps dimensionality (UnitsContainer) to Dimensionality (UnitsContainer) - self.root_units = {} + # TODO: this description is not right. + self.root_units: dict[UnitsContainer, tuple[Scalar, UnitsContainer]] = {} + #: Maps dimensionality (UnitsContainer) to Units (UnitsContainer) - self.dimensionality: Dict[UnitsContainer, UnitsContainer] = {} + self.dimensionality: dict[UnitsContainer, UnitsContainer] = {} + #: Cache the unit name associated to user input. ('mV' -> 'millivolt') - self.parse_unit: Dict[str, UnitsContainer] = {} + self.parse_unit: dict[str, UnitsContainer] = {} - def __eq__(self, other): + self.conversion_factor: dict[ + tuple[UnitsContainer, UnitsContainer], Scalar | DimensionalityError + ] = {} + + def __eq__(self, other: Any): if not isinstance(other, self.__class__): return False attrs = ( @@ -122,6 +143,7 @@ def __eq__(self, other): "root_units", "dimensionality", "parse_unit", + "conversion_factor", ) return all(getattr(self, attr) == getattr(other, attr) for attr in attrs) @@ -131,13 +153,18 @@ class RegistryMeta(type): instead of asking the developer to do it when subclassing. """ - def __call__(self, *args, **kwargs): + def __call__(self, *args: Any, **kwargs: Any): obj = super().__call__(*args, **kwargs) obj._after_init() return obj -class PlainRegistry(metaclass=RegistryMeta): +# Generic types used to mark types associated to Registries. +QuantityT = TypeVar("QuantityT", bound=PlainQuantity[Any]) +UnitT = TypeVar("UnitT", bound=PlainUnit) + + +class GenericPlainRegistry(Generic[QuantityT, UnitT], metaclass=RegistryMeta): """Base class for all registries. Capabilities: @@ -163,6 +190,8 @@ class PlainRegistry(metaclass=RegistryMeta): action to take in case a unit is redefined: 'warn', 'raise', 'ignore' auto_reduce_dimensions : If True, reduce dimensionality on appropriate operations. + autoconvert_to_preferred : + If True, converts preferred units on appropriate operations. preprocessors : list of callables which are iteratively ran on any input expression or unit string @@ -181,14 +210,10 @@ class PlainRegistry(metaclass=RegistryMeta): future release. """ - #: Babel.Locale instance or None - fmt_locale: Optional[Locale] = None + Quantity: type[QuantityT] + Unit: type[UnitT] _diskcache = None - - _quantity_class = PlainQuantity - _unit_class = PlainUnit - _def_parser = None def __init__( @@ -198,21 +223,22 @@ def __init__( force_ndarray_like: bool = False, on_redefinition: str = "warn", auto_reduce_dimensions: bool = False, - preprocessors: Optional[List[PreprocessorType]] = None, - fmt_locale: Optional[str] = None, + autoconvert_to_preferred: bool = False, + preprocessors: list[PreprocessorType] | None = None, + fmt_locale: str | None = None, non_int_type: NON_INT_TYPE = float, case_sensitive: bool = True, - cache_folder: Union[str, pathlib.Path, None] = None, - separate_format_defaults: Optional[bool] = None, + cache_folder: str | pathlib.Path | None = None, + separate_format_defaults: bool | None = None, + mpl_formatter: str = "{:P}", ): #: Map a definition class to a adder methods. - self._adders = dict() + self._adders: Handler = {} self._register_definition_adders() self._init_dynamic_classes() if cache_folder == ":auto:": - cache_folder = appdirs.user_cache_dir(appname="pint", appauthor=False) - cache_folder = pathlib.Path(cache_folder) + cache_folder = platformdirs.user_cache_path(appname="pint", appauthor=False) from ... import delegates # TODO: change thiss @@ -225,6 +251,7 @@ def __init__( delegates.ParserConfig(non_int_type), diskcache=self._diskcache ) + self.formatter = delegates.Formatter(self) self._filename = filename self.force_ndarray = force_ndarray self.force_ndarray_like = force_ndarray_like @@ -232,6 +259,9 @@ def __init__( # use a default preprocessor to support "%" self.preprocessors.insert(0, lambda string: string.replace("%", " percent ")) + # use a default preprocessor to support permille "‰" + self.preprocessors.insert(0, lambda string: string.replace("‰", " permille ")) + #: mode used to fill in the format defaults self.separate_format_defaults = separate_format_defaults @@ -241,8 +271,14 @@ def __init__( #: Determines if dimensionality should be reduced on appropriate operations. self.auto_reduce_dimensions = auto_reduce_dimensions + #: Determines if units will be converted to preffered on appropriate operations. + self.autoconvert_to_preferred = autoconvert_to_preferred + #: Default locale identifier string, used when calling format_babel without explicit locale. - self.set_fmt_locale(fmt_locale) + self.formatter.set_locale(fmt_locale) + + #: sets the formatter used when plotting with matplotlib + self.mpl_formatter = mpl_formatter #: Numerical type used for non integer values. self._non_int_type = non_int_type @@ -252,39 +288,37 @@ def __init__( #: Map between name (string) and value (string) of defaults stored in the #: definitions file. - self._defaults: Dict[str, str] = {} + self._defaults: dict[str, str] = {} #: Map dimension name (string) to its definition (DimensionDefinition). - self._dimensions: Dict[ - str, Union[DimensionDefinition, DerivedDimensionDefinition] + self._dimensions: dict[ + str, DimensionDefinition | DerivedDimensionDefinition ] = {} #: Map unit name (string) to its definition (UnitDefinition). #: Might contain prefixed units. - self._units: Dict[str, UnitDefinition] = {} + self._units: dict[str, UnitDefinition] = {} + + #: List base unit names + self._base_units: list[str] = [] #: Map unit name in lower case (string) to a set of unit names with the right #: case. #: Does not contain prefixed units. #: e.g: 'hz' - > set('Hz', ) - self._units_casei: Dict[str, Set[str]] = defaultdict(set) + self._units_casei: dict[str, set[str]] = defaultdict(set) #: Map prefix name (string) to its definition (PrefixDefinition). - self._prefixes: Dict[str, PrefixDefinition] = {"": PrefixDefinition("", 1)} + self._prefixes: dict[str, PrefixDefinition] = {"": PrefixDefinition("", 1)} #: Map suffix name (string) to canonical , and unit alias to canonical unit name - self._suffixes: Dict[str, str] = {"": "", "s": ""} + self._suffixes: dict[str, str] = {"": "", "s": ""} #: Map contexts to RegistryCache self._cache = RegistryCache() self._initialized = False - def __init_subclass__(cls, **kwargs): - super().__init_subclass__() - cls.Unit = build_dependent_class(cls, "Unit", "_unit_class") - cls.Quantity = build_dependent_class(cls, "Quantity", "_quantity_class") - def _init_dynamic_classes(self) -> None: """Generate subclasses on the fly and attach them to self""" @@ -305,7 +339,16 @@ def _after_init(self) -> None: self._build_cache(loaded_files) self._initialized = True - def _register_adder(self, definition_class, adder_func): + def _register_adder( + self, + definition_class: type[T], + adder_func: Callable[ + [ + T, + ], + None, + ], + ) -> None: """Register a block definition.""" self._adders[definition_class] = adder_func @@ -318,24 +361,26 @@ def _register_definition_adders(self) -> None: self._register_adder(DimensionDefinition, self._add_dimension) self._register_adder(DerivedDimensionDefinition, self._add_derived_dimension) - def __deepcopy__(self, memo) -> "PlainRegistry": + def __deepcopy__(self: Self, memo) -> type[Self]: new = object.__new__(type(self)) new.__dict__ = copy.deepcopy(self.__dict__, memo) new._init_dynamic_classes() return new - def __getattr__(self, item): + def __getattr__(self, item: str) -> UnitT: getattr_maybe_raise(self, item) + + # self.Unit will call parse_units return self.Unit(item) - def __getitem__(self, item): - logger.warning( - "Calling the getitem method from a UnitRegistry is deprecated. " - "use `parse_expression` method or use the registry as a callable." - ) + @deprecated( + "Calling the getitem method from a UnitRegistry will be removed in future versions of pint.\n" + "use `parse_expression` method or use the registry as a callable." + ) + def __getitem__(self, item: str) -> UnitT: return self.parse_expression(item) - def __contains__(self, item) -> bool: + def __contains__(self, item: str) -> bool: """Support checking prefixed units with the `in` operator""" try: self.__getattr__(item) @@ -343,7 +388,7 @@ def __contains__(self, item) -> bool: except UndefinedUnitError: return False - def __dir__(self) -> List[str]: + def __dir__(self) -> list[str]: #: Calling dir(registry) gives all units, methods, and attributes. #: Also used for autocompletion in IPython. return list(self._units.keys()) + list(object.__dir__(self)) @@ -357,7 +402,27 @@ def __iter__(self) -> Iterator[str]: """ return iter(sorted(self._units.keys())) - def set_fmt_locale(self, loc: Optional[str]) -> None: + @property + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.fmt_locale" + ) + def fmt_locale(self) -> Locale | None: + return self.formatter.locale + + @fmt_locale.setter + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.set_locale" + ) + def fmt_locale(self, loc: str | None): + self.formatter.set_locale(loc) + + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.set_locale" + ) + def set_fmt_locale(self, loc: str | None) -> None: """Change the locale used by default by `format_babel`. Parameters @@ -365,31 +430,28 @@ def set_fmt_locale(self, loc: Optional[str]) -> None: loc : str or None None` (do not translate), 'sys' (detect the system locale) or a locale id string. """ - if isinstance(loc, str): - if loc == "sys": - loc = locale.getdefaultlocale()[0] - # We call babel parse to fail here and not in the formatting operation - babel_parse(loc) - - self.fmt_locale = loc - - def UnitsContainer(self, *args, **kwargs) -> UnitsContainerT: - return UnitsContainer(*args, non_int_type=self.non_int_type, **kwargs) + self.formatter.set_locale(loc) @property + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.default_format" + ) def default_format(self) -> str: """Default formatting string for quantities.""" - return self.Quantity.default_format + return self.formatter.default_format @default_format.setter - def default_format(self, value: str): - self.Unit.default_format = value - self.Quantity.default_format = value - self.Measurement.default_format = value + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.default_format" + ) + def default_format(self, value: str) -> None: + self.formatter.default_format = value @property - def cache_folder(self) -> Optional[pathlib.Path]: + def cache_folder(self) -> pathlib.Path | None: if self._diskcache: return self._diskcache.cache_folder return None @@ -398,7 +460,7 @@ def cache_folder(self) -> Optional[pathlib.Path]: def non_int_type(self): return self._non_int_type - def define(self, definition): + def define(self, definition: str | type) -> None: """Add unit to the registry. Parameters @@ -421,7 +483,7 @@ def define(self, definition): # - then we define specific adder for each definition class. :-D ############ - def _helper_dispatch_adder(self, definition): + def _helper_dispatch_adder(self, definition: Any) -> None: """Helper function to add a single definition, choosing the appropiate method by class. """ @@ -436,7 +498,12 @@ def _helper_dispatch_adder(self, definition): adder_func(definition) - def _helper_adder(self, definition, target_dict, casei_target_dict): + def _helper_adder( + self, + definition: NamedDefinition, + target_dict: dict[str, Any], + casei_target_dict: dict[str, Any] | None, + ) -> None: """Helper function to store a definition in the internal dictionaries. It stores the definition under its name, symbol and aliases. """ @@ -444,6 +511,7 @@ def _helper_adder(self, definition, target_dict, casei_target_dict): definition.name, definition, target_dict, casei_target_dict ) + # TODO: Not sure why but using hasattr does not work here. if getattr(definition, "has_symbol", ""): self._helper_single_adder( definition.symbol, definition, target_dict, casei_target_dict @@ -455,7 +523,13 @@ def _helper_adder(self, definition, target_dict, casei_target_dict): self._helper_single_adder(alias, definition, target_dict, casei_target_dict) - def _helper_single_adder(self, key, value, target_dict, casei_target_dict): + def _helper_single_adder( + self, + key: str, + value: NamedDefinition, + target_dict: dict[str, Any], + casei_target_dict: dict[str, Any] | None, + ) -> None: """Helper function to store a definition in the internal dictionaries. It warns or raise error on redefinition. @@ -464,17 +538,17 @@ def _helper_single_adder(self, key, value, target_dict, casei_target_dict): if self._on_redefinition == "raise": raise RedefinitionError(key, type(value)) elif self._on_redefinition == "warn": - logger.warning("Redefining '%s' (%s)" % (key, type(value))) + logger.warning(f"Redefining '{key}' ({type(value)})") target_dict[key] = value if casei_target_dict is not None: casei_target_dict[key.lower()].add(key) - def _add_defaults(self, defaults_definition: DefaultsDefinition): + def _add_defaults(self, defaults_definition: DefaultsDefinition) -> None: for k, v in defaults_definition.items(): self._defaults[k] = v - def _add_alias(self, definition: AliasDefinition): + def _add_alias(self, definition: AliasDefinition) -> None: unit_dict = self._units unit = unit_dict[definition.name] while not isinstance(unit, UnitDefinition): @@ -482,27 +556,30 @@ def _add_alias(self, definition: AliasDefinition): for alias in definition.aliases: self._helper_single_adder(alias, unit, self._units, self._units_casei) - def _add_dimension(self, definition: DimensionDefinition): + def _add_dimension(self, definition: DimensionDefinition) -> None: self._helper_adder(definition, self._dimensions, None) - def _add_derived_dimension(self, definition: DerivedDimensionDefinition): + def _add_derived_dimension(self, definition: DerivedDimensionDefinition) -> None: for dim_name in definition.reference.keys(): if dim_name not in self._dimensions: self._add_dimension(DimensionDefinition(dim_name)) self._helper_adder(definition, self._dimensions, None) - def _add_prefix(self, definition: PrefixDefinition): + def _add_prefix(self, definition: PrefixDefinition) -> None: self._helper_adder(definition, self._prefixes, None) - def _add_unit(self, definition: UnitDefinition): + def _add_unit(self, definition: UnitDefinition) -> None: if definition.is_base: + self._base_units.append(definition.name) for dim_name in definition.reference.keys(): if dim_name not in self._dimensions: self._add_dimension(DimensionDefinition(dim_name)) self._helper_adder(definition, self._units, self._units_casei) - def load_definitions(self, file, is_resource: bool = False): + def load_definitions( + self, file: Iterable[str] | str | pathlib.Path, is_resource: bool = False + ): """Add units and prefixes defined in a definition text file. Parameters @@ -538,8 +615,8 @@ def _build_cache(self, loaded_files=None) -> None: self._cache = RegistryCache() - deps = { - name: definition.reference.keys() if definition.reference else set() + deps: dict[str, set[str]] = { + name: set(definition.reference.keys()) if definition.reference else set() for name, definition in self._units.items() } @@ -572,9 +649,7 @@ def _build_cache(self, loaded_files=None) -> None: logger.warning(f"Could not resolve {unit_name}: {exc!r}") return self._cache - def get_name( - self, name_or_alias: str, case_sensitive: Optional[bool] = None - ) -> str: + def get_name(self, name_or_alias: str, case_sensitive: bool | None = None) -> str: """Return the canonical name of a unit.""" if name_or_alias == "dimensionless": @@ -588,23 +663,26 @@ def get_name( candidates = self.parse_unit_name(name_or_alias, case_sensitive) if not candidates: raise UndefinedUnitError(name_or_alias) - elif len(candidates) == 1: - prefix, unit_name, _ = candidates[0] - else: + + prefix, unit_name, _ = candidates[0] + if len(candidates) > 1: logger.warning( - "Parsing {} yield multiple results. " - "Options are: {}".format(name_or_alias, candidates) + f"Parsing {name_or_alias} yield multiple results. Options are: {candidates!r}" ) - prefix, unit_name, _ = candidates[0] if prefix: + if not self._units[unit_name].is_multiplicative: + raise OffsetUnitCalculusError( + "Prefixing a unit requires multiplying the unit." + ) + name = prefix + unit_name symbol = self.get_symbol(name, case_sensitive) prefix_def = self._prefixes[prefix] self._units[name] = UnitDefinition( name, symbol, - (), + tuple(), prefix_def.converter, self.UnitsContainer({unit_name: 1}), ) @@ -612,28 +690,24 @@ def get_name( return unit_name - def get_symbol( - self, name_or_alias: str, case_sensitive: Optional[bool] = None - ) -> str: + def get_symbol(self, name_or_alias: str, case_sensitive: bool | None = None) -> str: """Return the preferred alias for a unit.""" candidates = self.parse_unit_name(name_or_alias, case_sensitive) if not candidates: raise UndefinedUnitError(name_or_alias) - elif len(candidates) == 1: - prefix, unit_name, _ = candidates[0] - else: + + prefix, unit_name, _ = candidates[0] + if len(candidates) > 1: logger.warning( - "Parsing {0} yield multiple results. " - "Options are: {1!r}".format(name_or_alias, candidates) + f"Parsing {name_or_alias} yield multiple results. Options are: {candidates!r}" ) - prefix, unit_name, _ = candidates[0] return self._prefixes[prefix].symbol + self._units[unit_name].symbol def _get_symbol(self, name: str) -> str: return self._units[name].symbol - def get_dimensionality(self, input_units) -> UnitsContainerT: + def get_dimensionality(self, input_units: UnitLike) -> UnitsContainer: """Convert unit or dict of units or dimensions to a dict of plain dimensions dimensions """ @@ -644,9 +718,7 @@ def get_dimensionality(self, input_units) -> UnitsContainerT: return self._get_dimensionality(input_units) - def _get_dimensionality( - self, input_units: Optional[UnitsContainerT] - ) -> UnitsContainerT: + def _get_dimensionality(self, input_units: UnitsContainer | None) -> UnitsContainer: """Convert a UnitsContainer to plain dimensions.""" if not input_units: return self.UnitsContainer() @@ -658,7 +730,7 @@ def _get_dimensionality( except KeyError: pass - accumulator = defaultdict(int) + accumulator: dict[str, int] = defaultdict(int) self._get_dimensionality_recurse(input_units, 1, accumulator) if "[]" in accumulator: @@ -670,21 +742,32 @@ def _get_dimensionality( return dims - def _get_dimensionality_recurse(self, ref, exp, accumulator): + def _get_dimensionality_recurse( + self, ref: UnitsContainer, exp: Scalar, accumulator: dict[str, int] + ) -> None: for key in ref: exp2 = exp * ref[key] if _is_dim(key): - reg = self._dimensions[key] - if reg.is_base: - accumulator[key] += exp2 - elif reg.reference is not None: + try: + reg = self._dimensions[key] + except KeyError: + raise ValueError( + f"{key} is not defined as dimension in the pint UnitRegistry" + ) + if isinstance(reg, DerivedDimensionDefinition): self._get_dimensionality_recurse(reg.reference, exp2, accumulator) + else: + # DimensionDefinition. + accumulator[key] += exp2 + else: reg = self._units[self.get_name(key)] if reg.reference is not None: self._get_dimensionality_recurse(reg.reference, exp2, accumulator) - def _get_dimensionality_ratio(self, unit1, unit2): + def _get_dimensionality_ratio( + self, unit1: UnitLike, unit2: UnitLike + ) -> Scalar | None: """Get the exponential ratio between two units, i.e. solve unit2 = unit1**x for x. Parameters @@ -718,7 +801,7 @@ def _get_dimensionality_ratio(self, unit1, unit2): def get_root_units( self, input_units: UnitLike, check_nonmult: bool = True - ) -> Tuple[Number, PlainUnit]: + ) -> tuple[Scalar, UnitT]: """Convert unit or dict of units to the root units. If any unit is non multiplicative and check_converter is True, @@ -745,7 +828,46 @@ def get_root_units( return f, self.Unit(units) - def _get_root_units(self, input_units, check_nonmult=True): + def _get_conversion_factor( + self, src: UnitsContainer, dst: UnitsContainer + ) -> Scalar | DimensionalityError: + """Get conversion factor in non-multiplicative units. + + Parameters + ---------- + src + Source units + dst + Target units + + Returns + ------- + Conversion factor or DimensionalityError + """ + cache = self._cache.conversion_factor + try: + return cache[(src, dst)] + except KeyError: + pass + + src_dim = self._get_dimensionality(src) + dst_dim = self._get_dimensionality(dst) + + # If the source and destination dimensionality are different, + # then the conversion cannot be performed. + if src_dim != dst_dim: + return DimensionalityError(src, dst, src_dim, dst_dim) + + # Here src and dst have only multiplicative units left. Thus we can + # convert with a factor. + factor, _ = self._get_root_units(src / dst) + + cache[(src, dst)] = factor + return factor + + def _get_root_units( + self, input_units: UnitsContainer, check_nonmult: bool = True + ) -> tuple[Scalar, UnitsContainer]: """Convert unit or dict of units to the root units. If any unit is non multiplicative and check_converter is True, @@ -775,12 +897,13 @@ def _get_root_units(self, input_units, check_nonmult=True): except KeyError: pass - accumulators = [1, defaultdict(int)] + accumulators: dict[str | None, int] = defaultdict(int) + accumulators[None] = 1 self._get_root_units_recurse(input_units, 1, accumulators) - factor = accumulators[0] + factor = accumulators[None] units = self.UnitsContainer( - {k: v for k, v in accumulators[1].items() if v != 0} + {k: v for k, v in accumulators.items() if k is not None and v != 0} ) # Check if any of the final units is non multiplicative and return None instead. @@ -791,7 +914,12 @@ def _get_root_units(self, input_units, check_nonmult=True): cache[input_units] = factor, units return factor, units - def get_base_units(self, input_units, check_nonmult=True, system=None): + def get_base_units( + self, + input_units: UnitsContainer | str, + check_nonmult: bool = True, + system=None, + ) -> tuple[Scalar, UnitT]: """Convert unit or dict of units to the plain units. If any unit is non multiplicative and check_converter is True, @@ -817,39 +945,48 @@ def get_base_units(self, input_units, check_nonmult=True, system=None): return self.get_root_units(input_units, check_nonmult) - def _get_root_units_recurse(self, ref, exp, accumulators): + # TODO: accumulators breaks typing list[int, dict[str, int]] + # So we have changed the behavior here + def _get_root_units_recurse( + self, ref: UnitsContainer, exp: Scalar, accumulators: dict[str | None, int] + ) -> None: + """ + + accumulators None keeps the scalar prefactor not associated with a specific unit. + + """ for key in ref: exp2 = exp * ref[key] key = self.get_name(key) reg = self._units[key] if reg.is_base: - accumulators[1][key] += exp2 + accumulators[key] += exp2 else: - accumulators[0] *= reg.converter.scale**exp2 + accumulators[None] *= reg.converter.scale**exp2 if reg.reference is not None: self._get_root_units_recurse(reg.reference, exp2, accumulators) - def get_compatible_units( - self, input_units, group_or_system=None - ) -> FrozenSet[Unit]: + def get_compatible_units(self, input_units: QuantityOrUnitLike) -> frozenset[UnitT]: """ """ input_units = to_units_container(input_units) - equiv = self._get_compatible_units(input_units, group_or_system) + equiv = self._get_compatible_units(input_units) return frozenset(self.Unit(eq) for eq in equiv) - def _get_compatible_units(self, input_units, group_or_system): + def _get_compatible_units( + self, input_units: UnitsContainer, *args, **kwargs + ) -> frozenset[str]: """ """ if not input_units: return frozenset() src_dim = self._get_dimensionality(input_units) - return self._cache.dimensional_equivalents[src_dim] + return self._cache.dimensional_equivalents.setdefault(src_dim, frozenset()) # TODO: remove context from here def is_compatible_with( - self, obj1: Any, obj2: Any, *contexts: Union[str, Context], **ctx_kwargs + self, obj1: Any, obj2: Any, *contexts: str | Context, **ctx_kwargs ) -> bool: """check if the other object is compatible @@ -912,7 +1049,14 @@ def convert( return self._convert(value, src, dst, inplace) - def _convert(self, value, src, dst, inplace=False, check_dimensionality=True): + def _convert( + self, + value: T, + src: UnitsContainer, + dst: UnitsContainer, + inplace: bool = False, + check_dimensionality: bool = True, + ) -> T: """Convert value from some source to destination units. Parameters @@ -935,19 +1079,10 @@ def _convert(self, value, src, dst, inplace=False, check_dimensionality=True): """ - if check_dimensionality: + factor = self._get_conversion_factor(src, dst) - src_dim = self._get_dimensionality(src) - dst_dim = self._get_dimensionality(dst) - - # If the source and destination dimensionality are different, - # then the conversion cannot be performed. - if src_dim != dst_dim: - raise DimensionalityError(src, dst, src_dim, dst_dim) - - # Here src and dst have only multiplicative units left. Thus we can - # convert with a factor. - factor, _ = self._get_root_units(src / dst) + if isinstance(factor, DimensionalityError): + raise factor # factor is type float and if our magnitude is type Decimal then # must first convert to Decimal before we can '*' the values @@ -964,8 +1099,8 @@ def _convert(self, value, src, dst, inplace=False, check_dimensionality=True): return value def parse_unit_name( - self, unit_name: str, case_sensitive: Optional[bool] = None - ) -> Tuple[Tuple[str, str, str], ...]: + self, unit_name: str, case_sensitive: bool | None = None + ) -> tuple[tuple[str, str, str], ...]: """Parse a unit to identify prefix, unit name and suffix by walking the list of prefix and suffix. In case of equivalent combinations (e.g. ('kilo', 'gram', '') and @@ -984,17 +1119,19 @@ def parse_unit_name( tuple of tuples (str, str, str) all non-equivalent combinations of (prefix, unit name, suffix) """ - return self._dedup_candidates( - self._parse_unit_name(unit_name, case_sensitive=case_sensitive) - ) - def _parse_unit_name( - self, unit_name: str, case_sensitive: Optional[bool] = None - ) -> Iterator[Tuple[str, str, str]]: - """Helper of parse_unit_name.""" case_sensitive = ( self.case_sensitive if case_sensitive is None else case_sensitive ) + return self._dedup_candidates( + self._yield_unit_triplets(unit_name, case_sensitive) + ) + + def _yield_unit_triplets( + self, unit_name: str, case_sensitive: bool + ) -> Generator[tuple[str, str, str]]: + """Helper of parse_unit_name.""" + stw = unit_name.startswith edw = unit_name.endswith for suffix, prefix in itertools.product(self._suffixes, self._prefixes): @@ -1019,10 +1156,13 @@ def _parse_unit_name( self._suffixes[suffix], ) + # TODO: keep this for backward compatibility + _parse_unit_name = _yield_unit_triplets + @staticmethod def _dedup_candidates( - candidates: Iterable[Tuple[str, str, str]] - ) -> Tuple[Tuple[str, str, str], ...]: + candidates: Iterable[tuple[str, str, str]], + ) -> tuple[tuple[str, str, str], ...]: """Helper of parse_unit_name. Given an iterable of unit triplets (prefix, name, suffix), remove those with @@ -1043,9 +1183,9 @@ def _dedup_candidates( def parse_units( self, input_string: str, - as_delta: Optional[bool] = None, - case_sensitive: Optional[bool] = None, - ) -> Unit: + as_delta: bool | None = None, + case_sensitive: bool | None = None, + ) -> UnitT: """Parse a units expression and returns a UnitContainer with the canonical names. @@ -1066,17 +1206,31 @@ def parse_units( pint.Unit """ - for p in self.preprocessors: - input_string = p(input_string) - units = self._parse_units(input_string, as_delta, case_sensitive) - return self.Unit(units) - def _parse_units( + return self.Unit( + self.parse_units_as_container(input_string, as_delta, case_sensitive) + ) + + def parse_units_as_container( + self, + input_string: str, + as_delta: bool | None = None, + case_sensitive: bool | None = None, + ) -> UnitsContainer: + as_delta = ( + as_delta if as_delta is not None else True + ) # TODO This only exists in nonmultiplicative + case_sensitive = ( + case_sensitive if case_sensitive is not None else self.case_sensitive + ) + return self._parse_units_as_container(input_string, as_delta, case_sensitive) + + def _parse_units_as_container( self, input_string: str, as_delta: bool = True, - case_sensitive: Optional[bool] = None, - ) -> UnitsContainerT: + case_sensitive: bool = True, + ) -> UnitsContainer: """Parse a units expression and returns a UnitContainer with the canonical names. """ @@ -1088,6 +1242,9 @@ def _parse_units( if as_delta and input_string in cache and input_string in self._units: return cache[input_string] + for p in self.preprocessors: + input_string = p(input_string) + if not input_string: return self.UnitsContainer() @@ -1116,21 +1273,37 @@ def _parse_units( return ret - def _eval_token(self, token, case_sensitive=None, use_decimal=False, **values): + def _eval_token( + self, + token: TokenInfo, + case_sensitive: bool | None = None, + **values: QuantityArgument, + ): + """Evaluate a single token using the following rules: - # TODO: remove this code when use_decimal is deprecated - if use_decimal: - raise DeprecationWarning( - "`use_decimal` is deprecated, use `non_int_type` keyword argument when instantiating the registry.\n" - ">>> from decimal import Decimal\n" - ">>> ureg = UnitRegistry(non_int_type=Decimal)" - ) + 1. numerical values as strings are replaced by their numeric counterparts + - integers are parsed as integers + - other numeric values are parses of non_int_type + 2. strings in (inf, infinity, nan, dimensionless) with their numerical value. + 3. strings in values.keys() are replaced by Quantity(values[key]) + 4. in other cases, the values are parsed as units and replaced by their canonical name. + Parameters + ---------- + token + Token to evaluate. + case_sensitive, optional + If true, a case sensitive matching of the unit name will be done in the registry. + If false, a case INsensitive matching of the unit name will be done in the registry. + (Default value = None, which uses registry setting) + **values + Other string that will be parsed using the Quantity constructor on their corresponding value. + """ token_type = token[0] token_text = token[1] if token_type == NAME: if token_text == "dimensionless": - return 1 * self.dimensionless + return self.Quantity(1) elif token_text.lower() in ("inf", "infinity"): return self.non_int_type("inf") elif token_text.lower() == "nan": @@ -1153,38 +1326,32 @@ def parse_pattern( self, input_string: str, pattern: str, - case_sensitive: Optional[bool] = None, - use_decimal: bool = False, + case_sensitive: bool | None = None, many: bool = False, - ) -> Union[List[str], str, None]: + ) -> list[str] | str | None: """Parse a string with a given regex pattern and returns result. Parameters ---------- - input_string : + input_string pattern_string: - The regex parse string - case_sensitive : - (Default value = None, which uses registry setting) - use_decimal : - (Default value = False) - many : + The regex parse string + case_sensitive, optional + If true, a case sensitive matching of the unit name will be done in the registry. + If false, a case INsensitive matching of the unit name will be done in the registry. + (Default value = None, which uses registry setting) + many, optional Match many results (Default value = False) - - - Returns - ------- - """ if not input_string: return [] if many else None # Parse string - pattern = pattern_to_regex(pattern) - matched = re.finditer(pattern, input_string) + regex = pattern_to_regex(pattern) + matched = re.finditer(regex, input_string) # Extract result(s) results = [] @@ -1193,13 +1360,10 @@ def parse_pattern( match = match.groupdict() # Parse units - units = [] - for unit, value in match.items(): - # Construct measure by multiplying value by unit - units.append( - float(value) - * self.parse_expression(unit, case_sensitive, use_decimal) - ) + units = [ + float(value) * self.parse_expression(unit, case_sensitive) + for unit, value in match.items() + ] # Add to results results.append(units) @@ -1211,12 +1375,11 @@ def parse_pattern( return results def parse_expression( - self, + self: Self, input_string: str, - case_sensitive: Optional[bool] = None, - use_decimal: bool = False, - **values, - ) -> Quantity: + case_sensitive: bool | None = None, + **values: QuantityArgument, + ) -> QuantityT: """Parse a mathematical expression including units and return a quantity object. Numerical constants can be specified as keyword arguments and will take precedence @@ -1224,38 +1387,38 @@ def parse_expression( Parameters ---------- - input_string : - - case_sensitive : - (Default value = None, which uses registry setting) - use_decimal : - (Default value = False) - **values : - - - Returns - ------- - + input_string + + case_sensitive, optional + If true, a case sensitive matching of the unit name will be done in the registry. + If false, a case INsensitive matching of the unit name will be done in the registry. + (Default value = None, which uses registry setting) + **values + Other string that will be parsed using the Quantity constructor on their corresponding value. """ - - # TODO: remove this code when use_decimal is deprecated - if use_decimal: - raise DeprecationWarning( - "`use_decimal` is deprecated, use `non_int_type` keyword argument when instantiating the registry.\n" - ">>> from decimal import Decimal\n" - ">>> ureg = UnitRegistry(non_int_type=Decimal)" - ) - if not input_string: return self.Quantity(1) for p in self.preprocessors: input_string = p(input_string) input_string = string_preprocessor(input_string) - gen = tokenizer(input_string) + gen = pint_eval.tokenizer(input_string) - return build_eval_tree(gen).evaluate( - lambda x: self._eval_token(x, case_sensitive=case_sensitive, **values) - ) + def _define_op(s: str): + return self._eval_token(s, case_sensitive=case_sensitive, **values) + + return build_eval_tree(gen).evaluate(_define_op) + + # We put this last to avoid overriding UnitsContainer + # and I do not want to rename it. + # TODO: Maybe in the future we need to change it to a more meaningful + # non-colliding name. + def UnitsContainer(self, *args: Any, **kwargs: Any) -> UnitsContainer: + return UnitsContainer(*args, non_int_type=self.non_int_type, **kwargs) __call__ = parse_expression + + +class PlainRegistry(GenericPlainRegistry[PlainQuantity[Any], PlainUnit]): + Quantity: TypeAlias = PlainQuantity[Any] + Unit: TypeAlias = PlainUnit diff --git a/pint/facets/plain/unit.py b/pint/facets/plain/unit.py index 5fb050ba6..0ee05abbc 100644 --- a/pint/facets/plain/unit.py +++ b/pint/facets/plain/unit.py @@ -12,24 +12,21 @@ import locale import operator from numbers import Number -from typing import TYPE_CHECKING, Any, Union +from typing import TYPE_CHECKING, Any from ..._typing import UnitLike -from ...compat import NUMERIC_TYPES +from ...compat import NUMERIC_TYPES, deprecated from ...errors import DimensionalityError from ...util import PrettyIPython, SharedRegistryObject, UnitsContainer from .definitions import UnitDefinition if TYPE_CHECKING: - from pint import Context + from ..context import Context class PlainUnit(PrettyIPython, SharedRegistryObject): """Implements a class to describe a unit supporting math operations.""" - #: Default formatting string. - default_format: str = "" - def __reduce__(self): # See notes in Quantity.__reduce__ from pint import _unpickle_unit @@ -46,8 +43,9 @@ def __init__(self, units: UnitLike) -> None: self._units = units._units else: raise TypeError( - "units must be of type str, Unit or " - "UnitsContainer; not {}.".format(type(units)) + "units must be of type str, Unit or " "UnitsContainer; not {}.".format( + type(units) + ) ) def __copy__(self) -> PlainUnit: @@ -58,14 +56,24 @@ def __deepcopy__(self, memo) -> PlainUnit: ret = self.__class__(copy.deepcopy(self._units, memo)) return ret + @deprecated( + "This function will be removed in future versions of pint.\n" + "Use ureg.formatter.format_unit_babel" + ) + def format_babel(self, spec: str = "", **kwspec: Any) -> str: + return self._REGISTRY.formatter.format_unit_babel(self, spec, **kwspec) + + def __format__(self, spec: str) -> str: + return self._REGISTRY.formatter.format_unit(self, spec) + def __str__(self) -> str: - return " ".join(k if v == 1 else f"{k} ** {v}" for k, v in self._units.items()) + return self._REGISTRY.formatter.format_unit(self) def __bytes__(self) -> bytes: return str(self).encode(locale.getpreferredencoding()) def __repr__(self) -> str: - return "".format(self._units) + return f"" @property def dimensionless(self) -> bool: @@ -96,7 +104,7 @@ def compatible_units(self, *contexts): return self._REGISTRY.get_compatible_units(self) def is_compatible_with( - self, other: Any, *contexts: Union[str, Context], **ctx_kwargs: Any + self, other: Any, *contexts: str | Context, **ctx_kwargs: Any ) -> bool: """check if the other object is compatible @@ -165,18 +173,18 @@ def __rtruediv__(self, other): return self._REGISTRY.Quantity(other, 1 / self._units) elif isinstance(other, UnitsContainer): return self.__class__(other / self._units) - else: - return NotImplemented + + return NotImplemented __div__ = __truediv__ __rdiv__ = __rtruediv__ - def __pow__(self, other) -> "PlainUnit": + def __pow__(self, other) -> PlainUnit: if isinstance(other, NUMERIC_TYPES): return self.__class__(self._units**other) else: - mess = "Cannot power PlainUnit by {}".format(type(other)) + mess = f"Cannot power PlainUnit by {type(other)}" raise TypeError(mess) def __hash__(self) -> int: @@ -207,8 +215,8 @@ def compare(self, other, op) -> bool: return self_q.compare(other, op) elif isinstance(other, (PlainUnit, UnitsContainer, dict)): return self_q.compare(self._REGISTRY.Quantity(1, other), op) - else: - return NotImplemented + + return NotImplemented __lt__ = lambda self, other: self.compare(other, op=operator.lt) __le__ = lambda self, other: self.compare(other, op=operator.le) diff --git a/pint/facets/system/__init__.py b/pint/facets/system/__init__.py index e95098bd9..b9cbc9593 100644 --- a/pint/facets/system/__init__.py +++ b/pint/facets/system/__init__.py @@ -12,6 +12,6 @@ from .definitions import SystemDefinition from .objects import System -from .registry import SystemRegistry +from .registry import GenericSystemRegistry, SystemRegistry -__all__ = ["SystemDefinition", "System", "SystemRegistry"] +__all__ = ["SystemDefinition", "System", "SystemRegistry", "GenericSystemRegistry"] diff --git a/pint/facets/system/definitions.py b/pint/facets/system/definitions.py index 824332443..f47a23fd8 100644 --- a/pint/facets/system/definitions.py +++ b/pint/facets/system/definitions.py @@ -8,10 +8,11 @@ from __future__ import annotations -import typing as ty +from collections.abc import Iterable from dataclasses import dataclass from ... import errors +from ...compat import Self @dataclass(frozen=True) @@ -23,7 +24,7 @@ class BaseUnitRule: new_unit_name: str #: name of the unit to be kicked out to make room for the new base uni #: If None, the current base unit with the same dimensionality will be used - old_unit_name: ty.Optional[str] = None + old_unit_name: str | None = None # Instead of defining __post_init__ here, # it will be added to the container class @@ -38,13 +39,16 @@ class SystemDefinition(errors.WithDefErr): #: name of the system name: str #: unit groups that will be included within the system - using_group_names: ty.Tuple[str, ...] + using_group_names: tuple[str, ...] #: rules to define new base unit within the system. - rules: ty.Tuple[BaseUnitRule, ...] + rules: tuple[BaseUnitRule, ...] @classmethod - def from_lines(cls, lines, non_int_type): + def from_lines( + cls: type[Self], lines: Iterable[str], non_int_type: type + ) -> Self | None: # TODO: this is to keep it backwards compatible + # TODO: check when is None returned. from ...delegates import ParserConfig, txt_defparser cfg = ParserConfig(non_int_type) @@ -55,7 +59,8 @@ def from_lines(cls, lines, non_int_type): return definition @property - def unit_replacements(self) -> ty.Tuple[ty.Tuple[str, str], ...]: + def unit_replacements(self) -> tuple[tuple[str, str | None], ...]: + # TODO: check if None can be dropped. return tuple((el.new_unit_name, el.old_unit_name) for el in self.rules) def __post_init__(self): diff --git a/pint/facets/system/objects.py b/pint/facets/system/objects.py index 829fb5c6d..751a66abf 100644 --- a/pint/facets/system/objects.py +++ b/pint/facets/system/objects.py @@ -9,6 +9,12 @@ from __future__ import annotations +import numbers +from collections.abc import Callable, Iterable +from numbers import Number +from typing import Any, Generic + +from ..._typing import UnitLike from ...babel_names import _babel_systems from ...compat import babel_parse from ...util import ( @@ -17,8 +23,20 @@ logger, to_units_container, ) +from .. import group +from ..plain import MagnitudeT from .definitions import SystemDefinition +GetRootUnits = Callable[[UnitLike, bool], tuple[Number, UnitLike]] + + +class SystemQuantity(Generic[MagnitudeT], group.GroupQuantity[MagnitudeT]): + pass + + +class SystemUnit(group.GroupUnit): + pass + class System(SharedRegistryObject): """A system is a Group plus a set of plain units. @@ -29,32 +47,28 @@ class System(SharedRegistryObject): The System belongs to one Registry. See SystemDefinition for the definition file syntax. - """ - def __init__(self, name): - """ - :param name: Name of the group - :type name: str - """ + Parameters + ---------- + name + Name of the group. + """ + def __init__(self, name: str): #: Name of the system #: :type: str self.name = name #: Maps root unit names to a dict indicating the new unit and its exponent. - #: :type: dict[str, dict[str, number]]] - self.base_units = {} + self.base_units: dict[str, dict[str, numbers.Number]] = {} #: Derived unit names. - #: :type: set(str) - self.derived_units = set() + self.derived_units: set[str] = set() #: Names of the _used_groups in used by this system. - #: :type: set(str) - self._used_groups = set() + self._used_groups: set[str] = set() - #: :type: frozenset | None - self._computed_members = None + self._computed_members: frozenset[str] | None = None # Add this system to the system dictionary self._REGISTRY._systems[self.name] = self @@ -62,7 +76,7 @@ def __init__(self, name): def __dir__(self): return list(self.members) - def __getattr__(self, item): + def __getattr__(self, item: str) -> Any: getattr_maybe_raise(self, item) u = getattr(self._REGISTRY, self.name + "_" + item, None) if u is not None: @@ -73,11 +87,11 @@ def __getattr__(self, item): def members(self): d = self._REGISTRY._groups if self._computed_members is None: - self._computed_members = set() + tmp: set[str] = set() for group_name in self._used_groups: try: - self._computed_members |= d[group_name].members + tmp |= d[group_name].members except KeyError: logger.warning( "Could not resolve {} in System {}".format( @@ -85,7 +99,7 @@ def members(self): ) ) - self._computed_members = frozenset(self._computed_members) + self._computed_members = frozenset(tmp) return self._computed_members @@ -93,19 +107,19 @@ def invalidate_members(self): """Invalidate computed members in this Group and all parent nodes.""" self._computed_members = None - def add_groups(self, *group_names): + def add_groups(self, *group_names: str) -> None: """Add groups to group.""" self._used_groups |= set(group_names) self.invalidate_members() - def remove_groups(self, *group_names): + def remove_groups(self, *group_names: str) -> None: """Remove groups from group.""" self._used_groups -= set(group_names) self.invalidate_members() - def format_babel(self, locale): + def format_babel(self, locale: str) -> str: """translate the name of the system.""" if locale and self.name in _babel_systems: name = _babel_systems[self.name] @@ -113,13 +127,30 @@ def format_babel(self, locale): return locale.measurement_systems[name] return self.name + # TODO: When 3.11 is minimal version, use Self + @classmethod - def from_lines(cls, lines, get_root_func, non_int_type=float): - system_definition = SystemDefinition.from_lines(lines, get_root_func) + def from_lines( + cls: type[System], + lines: Iterable[str], + get_root_func: GetRootUnits, + non_int_type: type = float, + ) -> System: + # TODO: we changed something here it used to be + # system_definition = SystemDefinition.from_lines(lines, get_root_func) + system_definition = SystemDefinition.from_lines(lines, non_int_type) + + if system_definition is None: + raise ValueError(f"Could not define System from from {lines}") + return cls.from_definition(system_definition, get_root_func) @classmethod - def from_definition(cls, system_definition: SystemDefinition, get_root_func=None): + def from_definition( + cls: type[System], + system_definition: SystemDefinition, + get_root_func: GetRootUnits | None = None, + ) -> System: if get_root_func is None: # TODO: kept for backwards compatibility get_root_func = cls._REGISTRY.get_root_units @@ -174,12 +205,12 @@ def from_definition(cls, system_definition: SystemDefinition, get_root_func=None class Lister: - def __init__(self, d): + def __init__(self, d: dict[str, Any]): self.d = d - def __dir__(self): + def __dir__(self) -> list[str]: return list(self.d.keys()) - def __getattr__(self, item): + def __getattr__(self, item: str) -> Any: getattr_maybe_raise(self, item) return self.d[item] diff --git a/pint/facets/system/registry.py b/pint/facets/system/registry.py index 2bab44bf3..e5235a4cb 100644 --- a/pint/facets/system/registry.py +++ b/pint/facets/system/registry.py @@ -9,26 +9,29 @@ from __future__ import annotations from numbers import Number -from typing import TYPE_CHECKING, Dict, FrozenSet, Tuple, Union +from typing import TYPE_CHECKING, Any, Generic from ... import errors +from ...compat import TypeAlias +from ..plain import QuantityT, UnitT if TYPE_CHECKING: - from pint import Quantity, Unit + from ..._typing import Quantity, Unit from ..._typing import UnitLike from ...util import UnitsContainer as UnitsContainerT from ...util import ( - build_dependent_class, create_class_with_registry, to_units_container, ) -from ..group import GroupRegistry +from ..group import GenericGroupRegistry +from . import objects from .definitions import SystemDefinition -from .objects import Lister, System -class SystemRegistry(GroupRegistry): +class GenericSystemRegistry( + Generic[QuantityT, UnitT], GenericGroupRegistry[QuantityT, UnitT] +): """Handle of Systems. Conversion between units with different dimensions according @@ -46,28 +49,23 @@ class SystemRegistry(GroupRegistry): # TODO: Change this to System: System to specify class # and use introspection to get system class as a way # to enjoy typing goodies - _system_class = System + System: type[objects.System] - def __init__(self, system=None, **kwargs): + def __init__(self, system: str | None = None, **kwargs): super().__init__(**kwargs) #: Map system name to system. - #: :type: dict[ str | System] - self._systems: Dict[str, System] = {} + self._systems: dict[str, objects.System] = {} #: Maps dimensionality (UnitsContainer) to Dimensionality (UnitsContainer) - self._base_units_cache = dict() + self._base_units_cache: dict[UnitsContainerT, UnitsContainerT] = {} - self._default_system = system - - def __init_subclass__(cls, **kwargs): - super().__init_subclass__() - cls.System = build_dependent_class(cls, "System", "_system_class") + self._default_system_name: str | None = system def _init_dynamic_classes(self) -> None: """Generate subclasses on the fly and attach them to self""" super()._init_dynamic_classes() - self.System = create_class_with_registry(self, self.System) + self.System = create_class_with_registry(self, objects.System) def _after_init(self) -> None: """Invoked at the end of ``__init__``. @@ -78,7 +76,7 @@ def _after_init(self) -> None: super()._after_init() #: System name to be used by default. - self._default_system = self._default_system or self._defaults.get( + self._default_system_name = self._default_system_name or self._defaults.get( "system", None ) @@ -86,8 +84,7 @@ def _register_definition_adders(self) -> None: super()._register_definition_adders() self._register_adder(SystemDefinition, self._add_system) - def _add_system(self, sd: SystemDefinition): - + def _add_system(self, sd: SystemDefinition) -> None: if sd.name in self._systems: raise ValueError(f"System {sd.name} already present in registry") @@ -101,29 +98,29 @@ def _add_system(self, sd: SystemDefinition): @property def sys(self): - return Lister(self._systems) + return objects.Lister(self._systems) @property - def default_system(self) -> System: - return self._default_system + def default_system(self) -> str | None: + return self._default_system_name @default_system.setter - def default_system(self, name): + def default_system(self, name: str) -> None: if name: if name not in self._systems: raise ValueError("Unknown system %s" % name) self._base_units_cache = {} - self._default_system = name + self._default_system_name = name - def get_system(self, name: str, create_if_needed: bool = True) -> System: + def get_system(self, name: str, create_if_needed: bool = True) -> objects.System: """Return a Group. Parameters ---------- name : str - Name of the group to be + Name of the group to be. create_if_needed : bool If True, create a group if not found. If False, raise an Exception. (Default value = True) @@ -144,10 +141,10 @@ def get_system(self, name: str, create_if_needed: bool = True) -> System: def get_base_units( self, - input_units: Union[UnitLike, Quantity], + input_units: UnitLike | Quantity, check_nonmult: bool = True, - system: Union[str, System, None] = None, - ) -> Tuple[Number, Unit]: + system: str | objects.System | None = None, + ) -> tuple[Number, Unit]: """Convert unit or dict of units to the plain units. If any unit is non multiplicative and check_converter is True, @@ -184,16 +181,15 @@ def _get_base_units( self, input_units: UnitsContainerT, check_nonmult: bool = True, - system: Union[str, System, None] = None, + system: str | objects.System | None = None, ): - if system is None: - system = self._default_system + system = self._default_system_name # The cache is only done for check_nonmult=True and the current system. if ( check_nonmult - and system == self._default_system + and system == self._default_system_name and input_units in self._base_units_cache ): return self._base_units_cache[input_units] @@ -226,17 +222,32 @@ def _get_base_units( return base_factor, destination_units - def _get_compatible_units(self, input_units, group_or_system) -> FrozenSet[Unit]: + def get_compatible_units( + self, input_units: UnitsContainerT, group_or_system: str | None = None + ) -> frozenset[Unit]: + """ """ + + group_or_system = group_or_system or self._default_system_name if group_or_system is None: - group_or_system = self._default_system + return super().get_compatible_units(input_units) + + input_units = to_units_container(input_units) + + equiv = self._get_compatible_units(input_units, group_or_system) + + return frozenset(self.Unit(eq) for eq in equiv) + def _get_compatible_units( + self, input_units: UnitsContainerT, group_or_system: str | None = None + ) -> frozenset[Unit]: if group_or_system and group_or_system in self._systems: members = self._systems[group_or_system].members # group_or_system has been handled by System - return frozenset(members & super()._get_compatible_units(input_units, None)) + return frozenset(members & super()._get_compatible_units(input_units)) try: + # This will be handled by groups return super()._get_compatible_units(input_units, group_or_system) except ValueError as ex: # It might be also a system @@ -245,3 +256,10 @@ def _get_compatible_units(self, input_units, group_or_system) -> FrozenSet[Unit] "Unknown Group o System with name '%s'" % group_or_system ) from ex raise ex + + +class SystemRegistry( + GenericSystemRegistry[objects.SystemQuantity[Any], objects.SystemUnit] +): + Quantity: TypeAlias = objects.SystemQuantity[Any] + Unit: TypeAlias = objects.SystemUnit diff --git a/pint/formatting.py b/pint/formatting.py index 554b3814f..b7a895e45 100644 --- a/pint/formatting.py +++ b/pint/formatting.py @@ -10,262 +10,58 @@ from __future__ import annotations -import re -import warnings -from typing import Callable, Dict - -from .babel_names import _babel_lengths, _babel_units -from .compat import babel_parse - -__JOIN_REG_EXP = re.compile(r"{\d*}") - - -def _join(fmt, iterable): - """Join an iterable with the format specified in fmt. - - The format can be specified in two ways: - - PEP3101 format with two replacement fields (eg. '{} * {}') - - The concatenating string (eg. ' * ') - - Parameters - ---------- - fmt : str - - iterable : - - - Returns - ------- - str - - """ - if not iterable: - return "" - if not __JOIN_REG_EXP.search(fmt): - return fmt.join(iterable) - miter = iter(iterable) - first = next(miter) - for val in miter: - ret = fmt.format(first, val) - first = ret - return first - - -_PRETTY_EXPONENTS = "⁰¹²³⁴⁵⁶⁷⁸⁹" - - -def _pretty_fmt_exponent(num): - """Format an number into a pretty printed exponent. - - Parameters - ---------- - num : int - - Returns - ------- - str - - """ - # unicode dot operator (U+22C5) looks like a superscript decimal - ret = f"{num:n}".replace("-", "⁻").replace(".", "\u22C5") - for n in range(10): - ret = ret.replace(str(n), _PRETTY_EXPONENTS[n]) - return ret - - -#: _FORMATS maps format specifications to the corresponding argument set to -#: formatter(). -_FORMATS: Dict[str, dict] = { - "P": { # Pretty format. - "as_ratio": True, - "single_denominator": False, - "product_fmt": "·", - "division_fmt": "/", - "power_fmt": "{}{}", - "parentheses_fmt": "({})", - "exp_call": _pretty_fmt_exponent, - }, - "L": { # Latex format. - "as_ratio": True, - "single_denominator": True, - "product_fmt": r" \cdot ", - "division_fmt": r"\frac[{}][{}]", - "power_fmt": "{}^[{}]", - "parentheses_fmt": r"\left({}\right)", - }, - "Lx": {"siopts": "", "pm_fmt": " +- "}, # Latex format with SIunitx. - "H": { # HTML format. - "as_ratio": True, - "single_denominator": True, - "product_fmt": r" ", - "division_fmt": r"{}/{}", - "power_fmt": r"{}{}", - "parentheses_fmt": r"({})", - }, - "": { # Default format. - "as_ratio": True, - "single_denominator": False, - "product_fmt": " * ", - "division_fmt": " / ", - "power_fmt": "{} ** {}", - "parentheses_fmt": r"({})", - }, - "C": { # Compact format. - "as_ratio": True, - "single_denominator": False, - "product_fmt": "*", # TODO: Should this just be ''? - "division_fmt": "/", - "power_fmt": "{}**{}", - "parentheses_fmt": r"({})", - }, -} - -#: _FORMATTERS maps format names to callables doing the formatting -_FORMATTERS: Dict[str, Callable] = {} - - -def register_unit_format(name): - """register a function as a new format for units - - The registered function must have a signature of: - - .. code:: python - - def new_format(unit, registry, **options): - pass - - Parameters - ---------- - name : str - The name of the new format (to be used in the format mini-language). A error is - raised if the new format would overwrite a existing format. - - Examples - -------- - .. code:: python - - @pint.register_unit_format("custom") - def format_custom(unit, registry, **options): - result = "" # do the formatting - return result - - - ureg = pint.UnitRegistry() - u = ureg.m / ureg.s ** 2 - f"{u:custom}" - """ - - def wrapper(func): - if name in _FORMATTERS: - raise ValueError(f"format {name!r} already exists") # or warn instead - _FORMATTERS[name] = func - - return wrapper - - -@register_unit_format("P") -def format_pretty(unit, registry, **options): - return formatter( - unit.items(), - as_ratio=True, - single_denominator=False, - product_fmt="·", - division_fmt="/", - power_fmt="{}{}", - parentheses_fmt="({})", - exp_call=_pretty_fmt_exponent, - **options, - ) - - -@register_unit_format("L") -def format_latex(unit, registry, **options): - preprocessed = { - r"\mathrm{{{}}}".format(u.replace("_", r"\_")): p for u, p in unit.items() - } - formatted = formatter( - preprocessed.items(), - as_ratio=True, - single_denominator=True, - product_fmt=r" \cdot ", - division_fmt=r"\frac[{}][{}]", - power_fmt="{}^[{}]", - parentheses_fmt=r"\left({}\right)", - **options, - ) - return formatted.replace("[", "{").replace("]", "}") - - -@register_unit_format("Lx") -def format_latex_siunitx(unit, registry, **options): - if registry is None: - raise ValueError( - "Can't format as siunitx without a registry." - " This is usually triggered when formatting a instance" - ' of the internal `UnitsContainer` with a spec of `"Lx"`' - " and might indicate a bug in `pint`." - ) - - formatted = siunitx_format_unit(unit, registry) - return rf"\si[]{{{formatted}}}" - - -@register_unit_format("H") -def format_html(unit, registry, **options): - return formatter( - unit.items(), - as_ratio=True, - single_denominator=True, - product_fmt=r" ", - division_fmt=r"{}/{}", - power_fmt=r"{}{}", - parentheses_fmt=r"({})", - **options, - ) - - -@register_unit_format("D") -def format_default(unit, registry, **options): - return formatter( - unit.items(), - as_ratio=True, - single_denominator=False, - product_fmt=" * ", - division_fmt=" / ", - power_fmt="{} ** {}", - parentheses_fmt=r"({})", - **options, - ) - - -@register_unit_format("C") -def format_compact(unit, registry, **options): - return formatter( - unit.items(), - as_ratio=True, - single_denominator=False, - product_fmt="*", # TODO: Should this just be ''? - division_fmt="/", - power_fmt="{}**{}", - parentheses_fmt=r"({})", - **options, - ) +from collections.abc import Iterable +from numbers import Number + +from .delegates.formatter._format_helpers import ( + _PRETTY_EXPONENTS, # noqa: F401 +) +from .delegates.formatter._format_helpers import ( + join_u as _join, # noqa: F401 +) +from .delegates.formatter._format_helpers import ( + pretty_fmt_exponent as _pretty_fmt_exponent, # noqa: F401 +) +from .delegates.formatter._spec_helpers import ( + _BASIC_TYPES, # noqa: F401 + FORMATTER, # noqa: F401 + REGISTERED_FORMATTERS, + extract_custom_flags, # noqa: F401 + remove_custom_flags, # noqa: F401 +) +from .delegates.formatter._spec_helpers import ( + parse_spec as _parse_spec, # noqa: F401 +) +from .delegates.formatter._spec_helpers import ( + split_format as split_format, # noqa: F401 +) + +# noqa +from .delegates.formatter._to_register import register_unit_format # noqa: F401 + +# Backwards compatiblity stuff +from .delegates.formatter.latex import ( + _EXP_PATTERN, # noqa: F401 + latex_escape, # noqa: F401 + matrix_to_latex, # noqa: F401 + ndarray_to_latex, # noqa: F401 + ndarray_to_latex_parts, # noqa: F401 + siunitx_format_unit, # noqa: F401 + vector_to_latex, # noqa: F401 +) def formatter( - items, - as_ratio=True, - single_denominator=False, - product_fmt=" * ", - division_fmt=" / ", - power_fmt="{} ** {}", - parentheses_fmt="({0})", - exp_call=lambda x: f"{x:n}", - locale=None, - babel_length="long", - babel_plural_form="one", - sort=True, -): + items: Iterable[tuple[str, Number]], + as_ratio: bool = True, + single_denominator: bool = False, + product_fmt: str = " * ", + division_fmt: str = " / ", + power_fmt: str = "{} ** {}", + parentheses_fmt: str = "({0})", + exp_call: FORMATTER = "{:n}".format, + sort: bool = True, +) -> str: """Format a list of (name, exponent) pairs. Parameters @@ -285,12 +81,6 @@ def formatter( the format used for exponentiation. (Default value = "{} ** {}") parentheses_fmt : str the format used for parenthesis. (Default value = "({0})") - locale : str - the locale object as defined in babel. (Default value = None) - babel_length : str - the length of the translated unit, as defined in babel cldr. (Default value = "long") - babel_plural_form : str - the plural form, calculated as defined in babel. (Default value = "one") exp_call : callable (Default value = lambda x: f"{x:n}") sort : bool, optional @@ -303,6 +93,13 @@ def formatter( """ + join_u = _join + + if sort is False: + items = tuple(items) + else: + items = sorted(items) + if not items: return "" @@ -313,34 +110,7 @@ def formatter( pos_terms, neg_terms = [], [] - if sort: - items = sorted(items) for key, value in items: - if locale and babel_length and babel_plural_form and key in _babel_units: - _key = _babel_units[key] - locale = babel_parse(locale) - unit_patterns = locale._data["unit_patterns"] - compound_unit_patterns = locale._data["compound_unit_patterns"] - plural = "one" if abs(value) <= 0 else babel_plural_form - if babel_length not in _babel_lengths: - other_lengths = [ - _babel_length - for _babel_length in reversed(_babel_lengths) - if babel_length != _babel_length - ] - else: - other_lengths = [] - for _babel_length in [babel_length] + other_lengths: - pat = unit_patterns.get(_key, {}).get(_babel_length, {}).get(plural) - if pat is not None: - # Don't remove this positional! This is the format used in Babel - key = pat.replace("{0}", "").strip() - break - division_fmt = compound_unit_patterns.get("per", {}).get( - babel_length, division_fmt - ) - power_fmt = "{}{}" - exp_call = _pretty_fmt_exponent if value == 1: pos_terms.append(key) elif value > 0: @@ -361,39 +131,18 @@ def formatter( return pos_ret if single_denominator: - neg_ret = _join(product_fmt, neg_terms) + neg_ret = join_u(product_fmt, neg_terms) if len(neg_terms) > 1: neg_ret = parentheses_fmt.format(neg_ret) else: - neg_ret = _join(division_fmt, neg_terms) - - return _join(division_fmt, [pos_ret, neg_ret]) - + neg_ret = join_u(division_fmt, neg_terms) -# Extract just the type from the specification mini-language: see -# http://docs.python.org/2/library/string.html#format-specification-mini-language -# We also add uS for uncertainties. -_BASIC_TYPES = frozenset("bcdeEfFgGnosxX%uS") + # TODO: first or last pos_ret should be pluralized - -def _parse_spec(spec): - result = "" - for ch in reversed(spec): - if ch == "~" or ch in _BASIC_TYPES: - continue - elif ch in list(_FORMATTERS.keys()) + ["~"]: - if result: - raise ValueError("expected ':' after format specifier") - else: - result = ch - elif ch.isalpha(): - raise ValueError("Unknown conversion specified " + ch) - else: - break - return result + return _join(division_fmt, [pos_ret, neg_ret]) -def format_unit(unit, spec, registry=None, **options): +def format_unit(unit, spec: str, registry=None, **options): # registry may be None to allow formatting `UnitsContainer` objects # in that case, the spec may not be "Lx" @@ -406,155 +155,15 @@ def format_unit(unit, spec, registry=None, **options): if not spec: spec = "D" - fmt = _FORMATTERS.get(spec) - if fmt is None: - raise ValueError(f"Unknown conversion specified: {spec}") - - return fmt(unit, registry=registry, **options) - - -def siunitx_format_unit(units, registry): - """Returns LaTeX code for the unit that can be put into an siunitx command.""" - - def _tothe(power): - if isinstance(power, int) or (isinstance(power, float) and power.is_integer()): - if power == 1: - return "" - elif power == 2: - return r"\squared" - elif power == 3: - return r"\cubed" - else: - return r"\tothe{{{:d}}}".format(int(power)) - else: - # limit float powers to 3 decimal places - return r"\tothe{{{:.3f}}}".format(power).rstrip("0") - - lpos = [] - lneg = [] - # loop through all units in the container - for unit, power in sorted(units.items()): - # remove unit prefix if it exists - # siunitx supports \prefix commands - - lpick = lpos if power >= 0 else lneg - prefix = None - # TODO: fix this to be fore efficient and detect also aliases. - for p in registry._prefixes.values(): - p = str(p.name) - if len(p) > 0 and unit.find(p) == 0: - prefix = p - unit = unit.replace(prefix, "", 1) - - if power < 0: - lpick.append(r"\per") - if prefix is not None: - lpick.append(r"\{}".format(prefix)) - lpick.append(r"\{}".format(unit)) - lpick.append(r"{}".format(_tothe(abs(power)))) - - return "".join(lpos) + "".join(lneg) - - -def extract_custom_flags(spec): - import re - - if not spec: - return "" - - # sort by length, with longer items first - known_flags = sorted(_FORMATTERS.keys(), key=len, reverse=True) - - flag_re = re.compile("(" + "|".join(known_flags + ["~"]) + ")") - custom_flags = flag_re.findall(spec) - - return "".join(custom_flags) - - -def remove_custom_flags(spec): - for flag in sorted(_FORMATTERS.keys(), key=len, reverse=True) + ["~"]: - if flag: - spec = spec.replace(flag, "") - return spec - - -def split_format(spec, default, separate_format_defaults=True): - mspec = remove_custom_flags(spec) - uspec = extract_custom_flags(spec) - - default_mspec = remove_custom_flags(default) - default_uspec = extract_custom_flags(default) - - if separate_format_defaults in (False, None): - # should we warn always or only if there was no explicit choice? - # Given that we want to eventually remove the flag again, I'd say yes? - if spec and separate_format_defaults is None: - if not uspec and default_uspec: - warnings.warn( - ( - "The given format spec does not contain a unit formatter." - " Falling back to the builtin defaults, but in the future" - " the unit formatter specified in the `default_format`" - " attribute will be used instead." - ), - DeprecationWarning, - ) - if not mspec and default_mspec: - warnings.warn( - ( - "The given format spec does not contain a magnitude formatter." - " Falling back to the builtin defaults, but in the future" - " the magnitude formatter specified in the `default_format`" - " attribute will be used instead." - ), - DeprecationWarning, - ) - elif not spec: - mspec, uspec = default_mspec, default_uspec - else: - mspec = mspec if mspec else default_mspec - uspec = uspec if uspec else default_uspec - - return mspec, uspec - - -def vector_to_latex(vec, fmtfun=lambda x: format(x, ".2f")): - return matrix_to_latex([vec], fmtfun) - - -def matrix_to_latex(matrix, fmtfun=lambda x: format(x, ".2f")): - ret = [] - - for row in matrix: - ret += [" & ".join(fmtfun(f) for f in row)] - - return r"\begin{pmatrix}%s\end{pmatrix}" % "\\\\ \n".join(ret) - - -def ndarray_to_latex_parts(ndarr, fmtfun=lambda x: format(x, ".2f"), dim=()): - if isinstance(fmtfun, str): - fmt = fmtfun - fmtfun = lambda x: format(x, fmt) - - if ndarr.ndim == 0: - _ndarr = ndarr.reshape(1) - return [vector_to_latex(_ndarr, fmtfun)] - if ndarr.ndim == 1: - return [vector_to_latex(ndarr, fmtfun)] - if ndarr.ndim == 2: - return [matrix_to_latex(ndarr, fmtfun)] + if registry is None: + _formatter = REGISTERED_FORMATTERS.get(spec, None) else: - ret = [] - if ndarr.ndim == 3: - header = ("arr[%s," % ",".join("%d" % d for d in dim)) + "%d,:,:]" - for elno, el in enumerate(ndarr): - ret += [header % elno + " = " + matrix_to_latex(el, fmtfun)] - else: - for elno, el in enumerate(ndarr): - ret += ndarray_to_latex_parts(el, fmtfun, dim + (elno,)) - - return ret + try: + _formatter = registry.formatter._formatters[spec] + except Exception: + _formatter = registry.formatter._formatters.get(spec, None) + if _formatter is None: + raise ValueError(f"Unknown conversion specified: {spec}") -def ndarray_to_latex(ndarr, fmtfun=lambda x: format(x, ".2f"), dim=()): - return "\n".join(ndarray_to_latex_parts(ndarr, fmtfun, dim)) + return _formatter.format_unit(unit) diff --git a/pint/matplotlib.py b/pint/matplotlib.py index 3785c7db9..2ca43fa33 100644 --- a/pint/matplotlib.py +++ b/pint/matplotlib.py @@ -21,7 +21,8 @@ class PintAxisInfo(matplotlib.units.AxisInfo): def __init__(self, units): """Set the default label to the pretty-print of the unit.""" - super().__init__(label="{:P}".format(units)) + formatter = units._REGISTRY.mpl_formatter + super().__init__(label=formatter.format(units)) class PintConverter(matplotlib.units.ConversionInterface): @@ -33,17 +34,20 @@ def __init__(self, registry): def convert(self, value, unit, axis): """Convert :`Quantity` instances for matplotlib to use.""" + # Short circuit for arrays + if hasattr(value, "units"): + return value.to(unit).magnitude if iterable(value): return [self._convert_value(v, unit, axis) for v in value] - else: - return self._convert_value(value, unit, axis) + + return self._convert_value(value, unit, axis) def _convert_value(self, value, unit, axis): """Handle converting using attached unit or falling back to axis units.""" if hasattr(value, "units"): return value.to(unit).magnitude - else: - return self._reg.Quantity(value, axis.get_units()).to(unit).magnitude + + return self._reg.Quantity(value, axis.get_units()).to(unit).magnitude @staticmethod def axisinfo(unit, axis): diff --git a/pint/pint-convert b/pint/pint-convert deleted file mode 100755 index 600016bd2..000000000 --- a/pint/pint-convert +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env python3 - -""" - pint-convert - ~~~~~~~~~~~~ - - :copyright: 2020 by Pint Authors, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import annotations - -import argparse -import re - -from pint import UnitRegistry - -parser = argparse.ArgumentParser(description="Unit converter.", usage=argparse.SUPPRESS) -parser.add_argument( - "-s", - "--system", - metavar="sys", - default="SI", - help="unit system to convert to (default: SI)", -) -parser.add_argument( - "-p", - "--prec", - metavar="n", - type=int, - default=12, - help="number of maximum significant figures (default: 12)", -) -parser.add_argument( - "-u", - "--prec-unc", - metavar="n", - type=int, - default=2, - help="number of maximum uncertainty digits (default: 2)", -) -parser.add_argument( - "-U", - "--no-unc", - dest="unc", - action="store_false", - help="ignore uncertainties in constants", -) -parser.add_argument( - "-C", - "--no-corr", - dest="corr", - action="store_false", - help="ignore correlations between constants", -) -parser.add_argument( - "fr", metavar="from", type=str, help="unit or quantity to convert from" -) -parser.add_argument("to", type=str, nargs="?", help="unit to convert to") -try: - args = parser.parse_args() -except SystemExit: - parser.print_help() - raise - -ureg = UnitRegistry() -ureg.auto_reduce_dimensions = True -ureg.autoconvert_offset_to_baseunit = True -ureg.enable_contexts("Gau", "ESU", "sp", "energy", "boltzmann") -ureg.default_system = args.system - -if args.unc: - import uncertainties - - # Measured constants subject to correlation - # R_i: Rydberg constant - # g_e: Electron g factor - # m_u: Atomic mass constant - # m_e: Electron mass - # m_p: Proton mass - # m_n: Neutron mass - R_i = (ureg._units["R_inf"].converter.scale, 0.0000000000021e7) - g_e = (ureg._units["g_e"].converter.scale, 0.00000000000035) - m_u = (ureg._units["m_u"].converter.scale, 0.00000000050e-27) - m_e = (ureg._units["m_e"].converter.scale, 0.00000000028e-30) - m_p = (ureg._units["m_p"].converter.scale, 0.00000000051e-27) - m_n = (ureg._units["m_n"].converter.scale, 0.00000000095e-27) - if args.corr: - # Correlation matrix between measured constants (to be completed below) - # R_i g_e m_u m_e m_p m_n - corr = [ - [1.0, -0.00206, 0.00369, 0.00436, 0.00194, 0.00233], # R_i - [-0.00206, 1.0, 0.99029, 0.99490, 0.97560, 0.52445], # g_e - [0.00369, 0.99029, 1.0, 0.99536, 0.98516, 0.52959], # m_u - [0.00436, 0.99490, 0.99536, 1.0, 0.98058, 0.52714], # m_e - [0.00194, 0.97560, 0.98516, 0.98058, 1.0, 0.51521], # m_p - [0.00233, 0.52445, 0.52959, 0.52714, 0.51521, 1.0], - ] # m_n - (R_i, g_e, m_u, m_e, m_p, m_n) = uncertainties.correlated_values_norm( - [R_i, g_e, m_u, m_e, m_p, m_n], corr - ) - else: - R_i = uncertainties.ufloat(*R_i) - g_e = uncertainties.ufloat(*g_e) - m_u = uncertainties.ufloat(*m_u) - m_e = uncertainties.ufloat(*m_e) - m_p = uncertainties.ufloat(*m_p) - m_n = uncertainties.ufloat(*m_n) - ureg._units["R_inf"].converter.scale = R_i - ureg._units["g_e"].converter.scale = g_e - ureg._units["m_u"].converter.scale = m_u - ureg._units["m_e"].converter.scale = m_e - ureg._units["m_p"].converter.scale = m_p - ureg._units["m_n"].converter.scale = m_n - - # Measured constants with zero correlation - ureg._units["gravitational_constant"].converter.scale = uncertainties.ufloat( - ureg._units["gravitational_constant"].converter.scale, 0.00015e-11 - ) - ureg._units["d_220"].converter.scale = uncertainties.ufloat( - ureg._units["d_220"].converter.scale, 0.000000032e-10 - ) - ureg._units["K_alpha_Cu_d_220"].converter.scale = uncertainties.ufloat( - ureg._units["K_alpha_Cu_d_220"].converter.scale, 0.00000022 - ) - ureg._units["K_alpha_Mo_d_220"].converter.scale = uncertainties.ufloat( - ureg._units["K_alpha_Mo_d_220"].converter.scale, 0.00000019 - ) - ureg._units["K_alpha_W_d_220"].converter.scale = uncertainties.ufloat( - ureg._units["K_alpha_W_d_220"].converter.scale, 0.000000098 - ) - - ureg._root_units_cache = dict() - ureg._build_cache() - - -def convert(u_from, u_to=None, unc=None, factor=None): - q = ureg.Quantity(u_from) - fmt = ".{}g".format(args.prec) - if unc: - q = q.plus_minus(unc) - if u_to: - nq = q.to(u_to) - else: - nq = q.to_base_units() - if factor: - q *= ureg.Quantity(factor) - nq *= ureg.Quantity(factor).to_base_units() - prec_unc = use_unc(nq.magnitude, fmt, args.prec_unc) - if prec_unc > 0: - fmt = ".{}uS".format(prec_unc) - else: - try: - nq = nq.magnitude.n * nq.units - except Exception: - pass - fmt = "{:" + fmt + "} {:~P}" - print(("{:} = " + fmt).format(q, nq.magnitude, nq.units)) - - -def use_unc(num, fmt, prec_unc): - unc = 0 - try: - if isinstance(num, uncertainties.UFloat): - full = ("{:" + fmt + "}").format(num) - unc = re.search(r"\+/-[0.]*([\d.]*)", full).group(1) - unc = len(unc.replace(".", "")) - except Exception: - pass - return max(0, min(prec_unc, unc)) - - -convert(args.fr, args.to) diff --git a/pint/pint_convert.py b/pint/pint_convert.py new file mode 100644 index 000000000..49200727c --- /dev/null +++ b/pint/pint_convert.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python3 +# type: ignore +""" +pint-convert +~~~~~~~~~~~~ + +:copyright: 2020 by Pint Authors, see AUTHORS for more details. +:license: BSD, see LICENSE for more details. +""" + +from __future__ import annotations + +import argparse +import contextlib +import re +from typing import Any + +from pint import UnitRegistry +from pint.compat import HAS_UNCERTAINTIES, ufloat + + +def _set(ureg: UnitRegistry, key: str, value: Any): + obj = ureg._units[key].converter + object.__setattr__(obj, "scale", value) + + +def _define_constants(ureg: UnitRegistry): + # Measured constants subject to correlation + # R_i: Rydberg constant + # g_e: Electron g factor + # m_u: Atomic mass constant + # m_e: Electron mass + # m_p: Proton mass + # m_n: Neutron mass + R_i = (ureg._units["R_inf"].converter.scale, 0.0000000000021e7) + g_e = (ureg._units["g_e"].converter.scale, 0.00000000000035) + m_u = (ureg._units["m_u"].converter.scale, 0.00000000050e-27) + m_e = (ureg._units["m_e"].converter.scale, 0.00000000028e-30) + m_p = (ureg._units["m_p"].converter.scale, 0.00000000051e-27) + m_n = (ureg._units["m_n"].converter.scale, 0.00000000095e-27) + if args.corr: + # Correlation matrix between measured constants (to be completed below) + # R_i g_e m_u m_e m_p m_n + corr = [ + [1.0, -0.00206, 0.00369, 0.00436, 0.00194, 0.00233], # R_i + [-0.00206, 1.0, 0.99029, 0.99490, 0.97560, 0.52445], # g_e + [0.00369, 0.99029, 1.0, 0.99536, 0.98516, 0.52959], # m_u + [0.00436, 0.99490, 0.99536, 1.0, 0.98058, 0.52714], # m_e + [0.00194, 0.97560, 0.98516, 0.98058, 1.0, 0.51521], # m_p + [0.00233, 0.52445, 0.52959, 0.52714, 0.51521, 1.0], + ] # m_n + try: + import uncertainties + + (R_i, g_e, m_u, m_e, m_p, m_n) = uncertainties.correlated_values_norm( + [R_i, g_e, m_u, m_e, m_p, m_n], corr + ) + except AttributeError: + raise Exception( + "Correlation cannot be calculated!\n Please install numpy package" + ) + else: + R_i = ufloat(*R_i) + g_e = ufloat(*g_e) + m_u = ufloat(*m_u) + m_e = ufloat(*m_e) + m_p = ufloat(*m_p) + m_n = ufloat(*m_n) + + _set(ureg, "R_inf", R_i) + _set(ureg, "g_e", g_e) + _set(ureg, "m_u", m_u) + _set(ureg, "m_e", m_e) + _set(ureg, "m_p", m_p) + _set(ureg, "m_n", m_n) + + # Measured constants with zero correlation + _set( + ureg, + "gravitational_constant", + ufloat(ureg._units["gravitational_constant"].converter.scale, 0.00015e-11), + ) + + _set( + ureg, + "d_220", + ufloat(ureg._units["d_220"].converter.scale, 0.000000032e-10), + ) + + _set( + ureg, + "K_alpha_Cu_d_220", + ufloat(ureg._units["K_alpha_Cu_d_220"].converter.scale, 0.00000022), + ) + + _set( + ureg, + "K_alpha_Mo_d_220", + ufloat(ureg._units["K_alpha_Mo_d_220"].converter.scale, 0.00000019), + ) + + _set( + ureg, + "K_alpha_W_d_220", + ufloat(ureg._units["K_alpha_W_d_220"].converter.scale, 0.000000098), + ) + + ureg._root_units_cache = {} + ureg._build_cache() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Unit converter.", usage=argparse.SUPPRESS + ) + parser.add_argument( + "-s", + "--system", + metavar="sys", + default="SI", + help="unit system to convert to (default: SI)", + ) + parser.add_argument( + "-p", + "--prec", + metavar="n", + type=int, + default=12, + help="number of maximum significant figures (default: 12)", + ) + parser.add_argument( + "-u", + "--prec-unc", + metavar="n", + type=int, + default=2, + help="number of maximum uncertainty digits (default: 2)", + ) + parser.add_argument( + "-U", + "--with-unc", + dest="unc", + action="store_true", + help="consider uncertainties in constants", + ) + parser.add_argument( + "-C", + "--no-corr", + dest="corr", + action="store_false", + help="ignore correlations between constants", + ) + parser.add_argument( + "fr", metavar="from", type=str, help="unit or quantity to convert from" + ) + parser.add_argument("to", type=str, nargs="?", help="unit to convert to") + try: + args = parser.parse_args() + except SystemExit: + parser.print_help() + raise + + ureg = UnitRegistry() + ureg.auto_reduce_dimensions = True + ureg.autoconvert_offset_to_baseunit = True + ureg.enable_contexts("Gau", "ESU", "sp", "energy", "boltzmann") + ureg.default_system = args.system + + u_from = args.fr + u_to = args.to + unc = None + factor = None + prec_unc = 0 + q = ureg.Quantity(u_from) + fmt = f".{args.prec}g" + + if unc: + q = q.plus_minus(unc) + + if u_to: + nq = q.to(u_to) + else: + nq = q.to_base_units() + + if factor: + q *= ureg.Quantity(factor) + nq *= ureg.Quantity(factor).to_base_units() + + if args.unc: + if not HAS_UNCERTAINTIES: + raise Exception( + "Failed to import uncertainties library!\n Please install uncertainties package" + ) + + _define_constants(ureg) + + num = nq.magnitude + fmt = fmt + prec_unc = args.prec_unc + + with contextlib.suppress(Exception): + if isinstance(num, type(ufloat(1, 0))): + full = ("{:" + fmt + "}").format(num) + unc = re.search(r"\+/-[0.]*([\d.]*)", full).group(1) + unc = len(unc.replace(".", "")) + else: + unc = 0 + + prec_unc = max(0, min(prec_unc, unc)) + else: + prec_unc = 0 + + if prec_unc > 0: + fmt = f".{prec_unc}uS" + else: + with contextlib.suppress(Exception): + nq = nq.magnitude.n * nq.units + + fmt = "{:" + fmt + "} {:~P}" + print(("{:} = " + fmt).format(q, nq.magnitude, nq.units)) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index 2054260b4..8c5f30e31 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -12,25 +12,27 @@ import operator import token as tokenlib import tokenize +from collections.abc import Iterable +from io import BytesIO +from tokenize import TokenInfo +from typing import Any, Callable, Generator, Generic, Iterator, TypeVar +from .compat import HAS_UNCERTAINTIES, ufloat from .errors import DefinitionSyntaxError -# For controlling order of operations -_OP_PRIORITY = { - "**": 3, - "^": 3, - "unary": 2, - "*": 1, - "": 1, # operator for implicit ops - "//": 1, - "/": 1, - "%": 1, - "+": 0, - "-": 0, -} +S = TypeVar("S") + +if HAS_UNCERTAINTIES: + _ufloat = ufloat # type: ignore +else: + + def _ufloat(*args: Any, **kwargs: Any): + raise TypeError( + "Please install the uncertainties package to be able to parse quantities with uncertainty." + ) -def _power(left, right): +def _power(left: Any, right: Any) -> Any: from . import Quantity from .compat import is_duck_array @@ -45,7 +47,18 @@ def _power(left, right): return operator.pow(left, right) -_BINARY_OPERATOR_MAP = { +UnaryOpT = Callable[ + [ + Any, + ], + Any, +] +BinaryOpT = Callable[[Any, Any], Any] + +_UNARY_OPERATOR_MAP: dict[str, UnaryOpT] = {"+": lambda x: x, "-": lambda x: x * -1} + +_BINARY_OPERATOR_MAP: dict[str, BinaryOpT] = { + "+/-": _ufloat, "**": _power, "*": operator.mul, "": operator.mul, # operator for implicit ops @@ -56,7 +69,255 @@ def _power(left, right): "//": operator.floordiv, } -_UNARY_OPERATOR_MAP = {"+": lambda x: x, "-": lambda x: x * -1} +# For controlling order of operations +_OP_PRIORITY = { + "+/-": 4, + "**": 3, + "^": 3, + "unary": 2, + "*": 1, + "": 1, # operator for implicit ops + "//": 1, + "/": 1, + "%": 1, + "+": 0, + "-": 0, +} + + +class IteratorLookAhead(Generic[S]): + """An iterator with lookahead buffer. + + Adapted: https://stackoverflow.com/a/1517965/1291237 + """ + + def __init__(self, iter: Iterator[S]): + self.iter = iter + self.buffer: list[S] = [] + + def __iter__(self): + return self + + def __next__(self) -> S: + if self.buffer: + return self.buffer.pop(0) + else: + return self.iter.__next__() + + def lookahead(self, n: int) -> S: + """Return an item n entries ahead in the iteration.""" + while n >= len(self.buffer): + try: + self.buffer.append(self.iter.__next__()) + except StopIteration: + raise ValueError("Cannot look ahead, out of range") + return self.buffer[n] + + +def plain_tokenizer(input_string: str) -> Generator[TokenInfo, None, None]: + """Standard python tokenizer""" + for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline): + if tokinfo.type != tokenlib.ENCODING: + yield tokinfo + + +def uncertainty_tokenizer(input_string: str) -> Generator[TokenInfo, None, None]: + """Tokenizer capable of parsing uncertainties as v+/-u and v±u""" + + def _number_or_nan(token: TokenInfo) -> bool: + if token.type == tokenlib.NUMBER or ( + token.type == tokenlib.NAME and token.string == "nan" + ): + return True + return False + + def _get_possible_e( + toklist: IteratorLookAhead[TokenInfo], e_index: int + ) -> TokenInfo | None: + possible_e_token = toklist.lookahead(e_index) + if ( + possible_e_token.string[0] == "e" + and len(possible_e_token.string) > 1 + and possible_e_token.string[1].isdigit() + ): + end = possible_e_token.end + possible_e = tokenize.TokenInfo( + type=tokenlib.STRING, + string=possible_e_token.string, + start=possible_e_token.start, + end=end, + line=possible_e_token.line, + ) + elif ( + possible_e_token.string[0] in ["e", "E"] + and toklist.lookahead(e_index + 1).string in ["+", "-"] + and toklist.lookahead(e_index + 2).type == tokenlib.NUMBER + ): + # Special case: Python allows a leading zero for exponents (i.e., 042) but not for numbers + if ( + toklist.lookahead(e_index + 2).string == "0" + and toklist.lookahead(e_index + 3).type == tokenlib.NUMBER + ): + exp_number = toklist.lookahead(e_index + 3).string + end = toklist.lookahead(e_index + 3).end + else: + exp_number = toklist.lookahead(e_index + 2).string + end = toklist.lookahead(e_index + 2).end + possible_e = tokenize.TokenInfo( + type=tokenlib.STRING, + string=f"e{toklist.lookahead(e_index+1).string}{exp_number}", + start=possible_e_token.start, + end=end, + line=possible_e_token.line, + ) + else: + possible_e = None + return possible_e + + def _apply_e_notation(mantissa: TokenInfo, exponent: TokenInfo) -> TokenInfo: + if mantissa.string == "nan": + return mantissa + if float(mantissa.string) == 0.0: + return mantissa + return tokenize.TokenInfo( + type=tokenlib.NUMBER, + string=f"{mantissa.string}{exponent.string}", + start=mantissa.start, + end=exponent.end, + line=exponent.line, + ) + + def _finalize_e( + nominal_value: TokenInfo, + std_dev: TokenInfo, + toklist: IteratorLookAhead[TokenInfo], + possible_e: TokenInfo, + ) -> tuple[TokenInfo, TokenInfo]: + nominal_value = _apply_e_notation(nominal_value, possible_e) + std_dev = _apply_e_notation(std_dev, possible_e) + next(toklist) # consume 'e' and positive exponent value + if possible_e.string[1] in ["+", "-"]: + next(toklist) # consume "+" or "-" in exponent + exp_number = next(toklist) # consume exponent value + if ( + exp_number.string == "0" + and toklist.lookahead(0).type == tokenlib.NUMBER + ): + exp_number = next(toklist) + assert exp_number.end == end + # We've already applied the number, we're just consuming all the tokens + return nominal_value, std_dev + + # when tokenize encounters whitespace followed by an unknown character, + # (such as ±) it proceeds to mark every character of the whitespace as ERRORTOKEN, + # in addition to marking the unknown character as ERRORTOKEN. Rather than + # wading through all that vomit, just eliminate the problem + # in the input by rewriting ± as +/-. + input_string = input_string.replace("±", "+/-") + toklist = IteratorLookAhead(plain_tokenizer(input_string)) + for tokinfo in toklist: + assert tokinfo is not None + line = tokinfo.line + start = tokinfo.start + if ( + tokinfo.string == "+" + and toklist.lookahead(0).string == "/" + and toklist.lookahead(1).string == "-" + ): + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=start, + end=toklist.lookahead(1).end, + line=line, + ) + for _ in range(-1, 1): + next(toklist) + yield plus_minus_op + elif ( + tokinfo.string == "(" + and ((seen_minus := 1 if toklist.lookahead(0).string == "-" else 0) or True) + and _number_or_nan(toklist.lookahead(seen_minus)) + and toklist.lookahead(seen_minus + 1).string == "+" + and toklist.lookahead(seen_minus + 2).string == "/" + and toklist.lookahead(seen_minus + 3).string == "-" + and _number_or_nan(toklist.lookahead(seen_minus + 4)) + and toklist.lookahead(seen_minus + 5).string == ")" + ): + # ( NUM_OR_NAN +/- NUM_OR_NAN ) POSSIBLE_E_NOTATION + possible_e = _get_possible_e(toklist, seen_minus + 6) + if possible_e: + end = possible_e.end + else: + end = toklist.lookahead(seen_minus + 5).end + if seen_minus: + minus_op = next(toklist) + yield minus_op + nominal_value = next(toklist) + tokinfo = next(toklist) # consume '+' + next(toklist) # consume '/' + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=tokinfo.start, + end=next(toklist).end, # consume '-' + line=line, + ) + std_dev = next(toklist) + next(toklist) # consume final ')' + if possible_e: + nominal_value, std_dev = _finalize_e( + nominal_value, std_dev, toklist, possible_e + ) + yield nominal_value + yield plus_minus_op + yield std_dev + elif ( + tokinfo.type == tokenlib.NUMBER + and toklist.lookahead(0).string == "(" + and toklist.lookahead(1).type == tokenlib.NUMBER + and toklist.lookahead(2).string == ")" + ): + # NUM_OR_NAN ( NUM_OR_NAN ) POSSIBLE_E_NOTATION + possible_e = _get_possible_e(toklist, 3) + if possible_e: + end = possible_e.end + else: + end = toklist.lookahead(2).end + nominal_value = tokinfo + tokinfo = next(toklist) # consume '(' + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=tokinfo.start, + end=tokinfo.end, # this is funky because there's no "+/-" in nominal(std_dev) notation + line=line, + ) + std_dev = next(toklist) + if "." not in std_dev.string: + std_dev = tokenize.TokenInfo( + type=std_dev.type, + string="0." + std_dev.string, + start=std_dev.start, + end=std_dev.end, + line=line, + ) + next(toklist) # consume final ')' + if possible_e: + nominal_value, std_dev = _finalize_e( + nominal_value, std_dev, toklist, possible_e + ) + yield nominal_value + yield plus_minus_op + yield std_dev + else: + yield tokinfo + + +if HAS_UNCERTAINTIES: + tokenizer = uncertainty_tokenizer +else: + tokenizer = plain_tokenizer class EvalTreeNode: @@ -68,25 +329,38 @@ class EvalTreeNode: left --> single value """ - def __init__(self, left, operator=None, right=None): + def __init__( + self, + left: EvalTreeNode | TokenInfo, + operator: TokenInfo | None = None, + right: EvalTreeNode | None = None, + ): self.left = left self.operator = operator self.right = right - def to_string(self): + def to_string(self) -> str: # For debugging purposes if self.right: + assert isinstance(self.left, EvalTreeNode), "self.left not EvalTreeNode (1)" comps = [self.left.to_string()] if self.operator: - comps.append(self.operator[1]) + comps.append(self.operator.string) comps.append(self.right.to_string()) elif self.operator: - comps = [self.operator[1], self.left.to_string()] + assert isinstance(self.left, EvalTreeNode), "self.left not EvalTreeNode (2)" + comps = [self.operator.string, self.left.to_string()] else: - return self.left[1] + assert isinstance(self.left, TokenInfo), "self.left not TokenInfo (1)" + return self.left.string return "(%s)" % " ".join(comps) - def evaluate(self, define_op, bin_op=None, un_op=None): + def evaluate( + self, + define_op: UnaryOpT, + bin_op: dict[str, BinaryOpT] | None = None, + un_op: dict[str, UnaryOpT] | None = None, + ): """Evaluate node. Parameters @@ -107,33 +381,35 @@ def evaluate(self, define_op, bin_op=None, un_op=None): un_op = un_op or _UNARY_OPERATOR_MAP if self.right: + assert isinstance(self.left, EvalTreeNode), "self.left not EvalTreeNode (3)" # binary or implicit operator - op_text = self.operator[1] if self.operator else "" + op_text = self.operator.string if self.operator else "" if op_text not in bin_op: - raise DefinitionSyntaxError('missing binary operator "%s"' % op_text) - left = self.left.evaluate(define_op, bin_op, un_op) - return bin_op[op_text](left, self.right.evaluate(define_op, bin_op, un_op)) + raise DefinitionSyntaxError(f"missing binary operator '{op_text}'") + + return bin_op[op_text]( + self.left.evaluate(define_op, bin_op, un_op), + self.right.evaluate(define_op, bin_op, un_op), + ) elif self.operator: + assert isinstance(self.left, EvalTreeNode), "self.left not EvalTreeNode (4)" # unary operator - op_text = self.operator[1] + op_text = self.operator.string if op_text not in un_op: - raise DefinitionSyntaxError('missing unary operator "%s"' % op_text) + raise DefinitionSyntaxError(f"missing unary operator '{op_text}'") return un_op[op_text](self.left.evaluate(define_op, bin_op, un_op)) - else: - # single value - return define_op(self.left) - -from typing import Iterable + # single value + return define_op(self.left) -def build_eval_tree( - tokens: Iterable[tokenize.TokenInfo], - op_priority=None, - index=0, - depth=0, - prev_op=None, -) -> tuple[EvalTreeNode | None, int] | EvalTreeNode: +def _build_eval_tree( + tokens: list[TokenInfo], + op_priority: dict[str, int], + index: int = 0, + depth: int = 0, + prev_op: str = "", +) -> tuple[EvalTreeNode, int]: """Build an evaluation tree from a set of tokens. Params: @@ -153,14 +429,12 @@ def build_eval_tree( 5) Combine left side, operator, and right side into a new left side 6) Go back to step #2 - """ - - if op_priority is None: - op_priority = _OP_PRIORITY + Raises + ------ + DefinitionSyntaxError + If there is a syntax error. - if depth == 0 and prev_op is None: - # ensure tokens is list so we can access by index - tokens = list(tokens) + """ result = None @@ -171,19 +445,21 @@ def build_eval_tree( if token_type == tokenlib.OP: if token_text == ")": - if prev_op is None: + if prev_op == "": raise DefinitionSyntaxError( - "unopened parentheses in tokens: %s" % current_token + f"unopened parentheses in tokens: {current_token}" ) elif prev_op == "(": # close parenthetical group + assert result is not None return result, index else: # parenthetical group ending, but we need to close sub-operations within group + assert result is not None return result, index - 1 elif token_text == "(": # gather parenthetical group - right, index = build_eval_tree( + right, index = _build_eval_tree( tokens, op_priority, index + 1, 0, token_text ) if not tokens[index][1] == ")": @@ -204,11 +480,11 @@ def build_eval_tree( # (2 * 3 / 4) --> ((2 * 3) / 4) if op_priority[token_text] <= op_priority.get( prev_op, -1 - ) and token_text not in ["**", "^"]: + ) and token_text not in ("**", "^"): # previous operator is higher priority, so end previous binary op return result, index - 1 # get right side of binary op - right, index = build_eval_tree( + right, index = _build_eval_tree( tokens, op_priority, index + 1, depth + 1, token_text ) result = EvalTreeNode( @@ -216,18 +492,18 @@ def build_eval_tree( ) else: # unary operator - right, index = build_eval_tree( + right, index = _build_eval_tree( tokens, op_priority, index + 1, depth + 1, "unary" ) result = EvalTreeNode(left=right, operator=current_token) - elif token_type == tokenlib.NUMBER or token_type == tokenlib.NAME: + elif token_type in (tokenlib.NUMBER, tokenlib.NAME): if result: # tokens with an implicit operation i.e. "1 kg" if op_priority[""] <= op_priority.get(prev_op, -1): # previous operator is higher priority than implicit, so end # previous binary op return result, index - 1 - right, index = build_eval_tree( + right, index = _build_eval_tree( tokens, op_priority, index, depth + 1, "" ) result = EvalTreeNode(left=result, right=right) @@ -240,13 +516,57 @@ def build_eval_tree( raise DefinitionSyntaxError("unclosed parentheses in tokens") if depth > 0 or prev_op: # have to close recursion + assert result is not None return result, index else: # recursion all closed, so just return the final result - return result + assert result is not None + return result, -1 if index + 1 >= len(tokens): # should hit ENDMARKER before this ever happens raise DefinitionSyntaxError("unexpected end to tokens") index += 1 + + +def build_eval_tree( + tokens: Iterable[TokenInfo], + op_priority: dict[str, int] | None = None, +) -> EvalTreeNode: + """Build an evaluation tree from a set of tokens. + + Params: + Index, depth, and prev_op used recursively, so don't touch. + Tokens is an iterable of tokens from an expression to be evaluated. + + Transform the tokens from an expression into a recursive parse tree, following order + of operations. Operations can include binary ops (3 + 4), implicit ops (3 kg), or + unary ops (-1). + + General Strategy: + 1) Get left side of operator + 2) If no tokens left, return final result + 3) Get operator + 4) Use recursion to create tree starting at token on right side of operator (start at step #1) + 4.1) If recursive call encounters an operator with lower or equal priority to step #2, exit recursion + 5) Combine left side, operator, and right side into a new left side + 6) Go back to step #2 + + Raises + ------ + DefinitionSyntaxError + If there is a syntax error. + + """ + + if op_priority is None: + op_priority = _OP_PRIORITY + + if not isinstance(tokens, list): + # ensure tokens is list so we can access by index + tokens = list(tokens) + + result, _ = _build_eval_tree(tokens, op_priority, 0, 0) + + return result diff --git a/pint/registry.py b/pint/registry.py index a5aa9b3b0..7a94c9014 100644 --- a/pint/registry.py +++ b/pint/registry.py @@ -14,57 +14,96 @@ from __future__ import annotations -from . import registry_helpers -from .facets import ( - ContextRegistry, - DaskRegistry, - FormattingRegistry, - MeasurementRegistry, - NonMultiplicativeRegistry, - NumpyRegistry, - SystemRegistry, -) +from typing import Generic + +from . import facets, registry_helpers +from .compat import TypeAlias from .util import logger, pi_theorem +# To build the Quantity and Unit classes +# we follow the UnitRegistry bases +# but + + +class Quantity( + facets.SystemRegistry.Quantity, + facets.ContextRegistry.Quantity, + facets.DaskRegistry.Quantity, + facets.NumpyRegistry.Quantity, + facets.MeasurementRegistry.Quantity, + facets.NonMultiplicativeRegistry.Quantity, + facets.PlainRegistry.Quantity, +): + pass + -class UnitRegistry( - SystemRegistry, - ContextRegistry, - DaskRegistry, - NumpyRegistry, - MeasurementRegistry, - FormattingRegistry, - NonMultiplicativeRegistry, +class Unit( + facets.SystemRegistry.Unit, + facets.ContextRegistry.Unit, + facets.DaskRegistry.Unit, + facets.NumpyRegistry.Unit, + facets.MeasurementRegistry.Unit, + facets.NonMultiplicativeRegistry.Unit, + facets.PlainRegistry.Unit, ): + pass + + +class GenericUnitRegistry( + Generic[facets.QuantityT, facets.UnitT], + facets.GenericSystemRegistry[facets.QuantityT, facets.UnitT], + facets.GenericContextRegistry[facets.QuantityT, facets.UnitT], + facets.GenericDaskRegistry[facets.QuantityT, facets.UnitT], + facets.GenericNumpyRegistry[facets.QuantityT, facets.UnitT], + facets.GenericMeasurementRegistry[facets.QuantityT, facets.UnitT], + facets.GenericNonMultiplicativeRegistry[facets.QuantityT, facets.UnitT], + facets.GenericPlainRegistry[facets.QuantityT, facets.UnitT], +): + pass + + +class UnitRegistry(GenericUnitRegistry[Quantity, Unit]): """The unit registry stores the definitions and relationships between units. Parameters ---------- filename : path of the units definition file to load or line-iterable object. - Empty to load the default definition file. + Empty string to load the default definition file. (default) None to leave the UnitRegistry empty. force_ndarray : bool convert any input, scalar or not to a numpy.ndarray. + (Default: False) force_ndarray_like : bool convert all inputs other than duck arrays to a numpy.ndarray. + (Default: False) default_as_delta : In the context of a multiplication of units, interpret non-multiplicative units as their *delta* counterparts. + (Default: False) autoconvert_offset_to_baseunit : If True converts offset units in quantities are converted to their plain units in multiplicative - context. If False no conversion happens. + context. If False no conversion happens. (Default: False) + logarithmic_math : bool + If True, logarithmic units are + added as logarithmic additions. on_redefinition : str action to take in case a unit is redefined. - 'warn', 'raise', 'ignore' + 'warn', 'raise', 'ignore' (Default: 'raise') auto_reduce_dimensions : If True, reduce dimensionality on appropriate operations. + (Default: False) + autoconvert_to_preferred : + If True, converts preferred units on appropriate operations. + (Default: False) preprocessors : list of callables which are iteratively ran on any input expression - or unit string + or unit string or None for no preprocessor. + (Default=None) fmt_locale : - locale identifier string, used in `format_babel`. Default to None + locale identifier string, used in `format_babel` or None. + (Default=None) case_sensitive : bool, optional Control default case sensitivity of unit parsing. (Default: True) cache_folder : str or pathlib.Path or None, optional @@ -72,6 +111,9 @@ class UnitRegistry( If None, the cache is disabled. (default) """ + Quantity: TypeAlias = Quantity + Unit: TypeAlias = Unit + def __init__( self, filename="", @@ -79,16 +121,17 @@ def __init__( force_ndarray_like: bool = False, default_as_delta: bool = True, autoconvert_offset_to_baseunit: bool = False, + logarithmic_math: bool = False, on_redefinition: str = "warn", system=None, auto_reduce_dimensions=False, + autoconvert_to_preferred=False, preprocessors=None, fmt_locale=None, non_int_type=float, case_sensitive: bool = True, cache_folder=None, ): - super().__init__( filename=filename, force_ndarray=force_ndarray, @@ -96,8 +139,10 @@ def __init__( on_redefinition=on_redefinition, default_as_delta=default_as_delta, autoconvert_offset_to_baseunit=autoconvert_offset_to_baseunit, + logarithmic_math=logarithmic_math, system=system, auto_reduce_dimensions=auto_reduce_dimensions, + autoconvert_to_preferred=autoconvert_to_preferred, preprocessors=preprocessors, fmt_locale=fmt_locale, non_int_type=non_int_type, @@ -140,7 +185,7 @@ def setup_matplotlib(self, enable: bool = True) -> None: check = registry_helpers.check -class LazyRegistry: +class LazyRegistry(Generic[facets.QuantityT, facets.UnitT]): def __init__(self, args=None, kwargs=None): self.__dict__["params"] = args or (), kwargs or {} diff --git a/pint/registry_helpers.py b/pint/registry_helpers.py index 8517ff348..f2961cc74 100644 --- a/pint/registry_helpers.py +++ b/pint/registry_helpers.py @@ -11,17 +11,17 @@ from __future__ import annotations import functools -from inspect import signature +from collections.abc import Callable, Iterable +from inspect import Parameter, signature from itertools import zip_longest -from typing import TYPE_CHECKING, Callable, Iterable, TypeVar, Union +from typing import TYPE_CHECKING, Any, TypeVar from ._typing import F from .errors import DimensionalityError from .util import UnitsContainer, to_units_container if TYPE_CHECKING: - from pint import Quantity, Unit - + from ._typing import Quantity, Unit from .registry import UnitRegistry T = TypeVar("T") @@ -72,7 +72,6 @@ def _to_units_container(a, registry=None): def _parse_wrap_args(args, registry=None): - # Arguments which contain definitions # (i.e. names that appear alone and for the first time) defs_args = set() @@ -120,8 +119,13 @@ def _parse_wrap_args(args, registry=None): "Not all variable referenced in %s are defined using !" % args[ndx] ) - def _converter(ureg, values, strict): - new_values = list(value for value in values) + def _converter(ureg, sig, values, kw, strict): + len_initial_values = len(values) + + # pack kwargs + for i, param_name in enumerate(sig.parameters): + if i >= len_initial_values: + values.append(kw[param_name]) values_by_name = {} @@ -129,13 +133,13 @@ def _converter(ureg, values, strict): for ndx in defs_args_ndx: value = values[ndx] values_by_name[args_as_uc[ndx][0]] = value - new_values[ndx] = getattr(value, "_magnitude", value) + values[ndx] = getattr(value, "_magnitude", value) # second pass: calculate derived values based on named values for ndx in dependent_args_ndx: value = values[ndx] assert _replace_units(args_as_uc[ndx][0], values_by_name) is not None - new_values[ndx] = ureg._convert( + values[ndx] = ureg._convert( getattr(value, "_magnitude", value), getattr(value, "_units", UnitsContainer({})), _replace_units(args_as_uc[ndx][0], values_by_name), @@ -143,9 +147,8 @@ def _converter(ureg, values, strict): # third pass: convert other arguments for ndx in unit_args_ndx: - if isinstance(values[ndx], ureg.Quantity): - new_values[ndx] = ureg._convert( + values[ndx] = ureg._convert( values[ndx]._magnitude, values[ndx]._units, args_as_uc[ndx][0] ) else: @@ -153,7 +156,7 @@ def _converter(ureg, values, strict): if isinstance(values[ndx], str): # if the value is a string, we try to parse it tmp_value = ureg.parse_expression(values[ndx]) - new_values[ndx] = ureg._convert( + values[ndx] = ureg._convert( tmp_value._magnitude, tmp_value._units, args_as_uc[ndx][0] ) else: @@ -161,37 +164,43 @@ def _converter(ureg, values, strict): "A wrapped function using strict=True requires " "quantity or a string for all arguments with not None units. " "(error found for {}, {})".format( - args_as_uc[ndx][0], new_values[ndx] + args_as_uc[ndx][0], values[ndx] ) ) - return new_values, values_by_name + # unpack kwargs + for i, param_name in enumerate(sig.parameters): + if i >= len_initial_values: + kw[param_name] = values[i] + + return values[:len_initial_values], kw, values_by_name return _converter -def _apply_defaults(func, args, kwargs): +def _apply_defaults(sig, args, kwargs): """Apply default keyword arguments. Named keywords may have been left blank. This function applies the default values so that every argument is defined. """ - sig = signature(func) - bound_arguments = sig.bind(*args, **kwargs) - for param in sig.parameters.values(): - if param.name not in bound_arguments.arguments: - bound_arguments.arguments[param.name] = param.default - args = [bound_arguments.arguments[key] for key in sig.parameters.keys()] - return args, {} + for i, param in enumerate(sig.parameters.values()): + if ( + i >= len(args) + and param.default != Parameter.empty + and param.name not in kwargs + ): + kwargs[param.name] = param.default + return list(args), kwargs def wraps( - ureg: "UnitRegistry", - ret: Union[str, "Unit", Iterable[Union[str, "Unit", None]], None], - args: Union[str, "Unit", Iterable[Union[str, "Unit", None]], None], + ureg: UnitRegistry, + ret: str | Unit | Iterable[str | Unit | None] | None, + args: str | Unit | Iterable[str | Unit | None] | None, strict: bool = True, -) -> Callable[[Callable[..., T]], Callable[..., Quantity[T]]]: +) -> Callable[[Callable[..., Any]], Callable[..., Quantity]]: """Wraps a function to become pint-aware. Use it when a function requires a numerical value but in some specific @@ -255,9 +264,9 @@ def wraps( ) ret = _to_units_container(ret, ureg) - def decorator(func: Callable[..., T]) -> Callable[..., Quantity[T]]: - - count_params = len(signature(func).parameters) + def decorator(func: Callable[..., Any]) -> Callable[..., Quantity]: + sig = signature(func) + count_params = len(sig.parameters) if len(args) != count_params: raise TypeError( "%s takes %i parameters, but %i units were passed" @@ -272,15 +281,16 @@ def decorator(func: Callable[..., T]) -> Callable[..., Quantity[T]]: ) @functools.wraps(func, assigned=assigned, updated=updated) - def wrapper(*values, **kw) -> Quantity[T]: - - values, kw = _apply_defaults(func, values, kw) + def wrapper(*values, **kw) -> Quantity: + values, kw = _apply_defaults(sig, values, kw) # In principle, the values are used as is # When then extract the magnitudes when needed. - new_values, values_by_name = converter(ureg, values, strict) + new_values, new_kw, values_by_name = converter( + ureg, sig, values, kw, strict + ) - result = func(*new_values, **kw) + result = func(*new_values, **new_kw) if is_ret_container: out_units = ( @@ -305,7 +315,7 @@ def wrapper(*values, **kw) -> Quantity[T]: def check( - ureg: "UnitRegistry", *args: Union[str, UnitsContainer, "Unit", None] + ureg: UnitRegistry, *args: str | UnitsContainer | Unit | None ) -> Callable[[F], F]: """Decorator to for quantity type checking for function inputs. @@ -339,8 +349,8 @@ def check( ] def decorator(func): - - count_params = len(signature(func).parameters) + sig = signature(func) + count_params = len(sig.parameters) if len(dimensions) != count_params: raise TypeError( "%s takes %i parameters, but %i dimensions were passed" @@ -356,10 +366,13 @@ def decorator(func): @functools.wraps(func, assigned=assigned, updated=updated) def wrapper(*args, **kwargs): - list_args, empty = _apply_defaults(func, args, kwargs) + list_args, kw = _apply_defaults(sig, args, kwargs) - for dim, value in zip(dimensions, list_args): + for i, param_name in enumerate(sig.parameters): + if i >= len(args): + list_args.append(kw[param_name]) + for dim, value in zip(dimensions, list_args): if dim is None: continue diff --git a/pint/testing.py b/pint/testing.py index 1c458f517..b1da02935 100644 --- a/pint/testing.py +++ b/pint/testing.py @@ -1,3 +1,13 @@ +""" + pint.testing + ~~~~~~~~~~~~ + + Functions for testing whether pint quantities are equal. + + :copyright: 2016 by Pint Authors, see AUTHORS for more details.. + :license: BSD, see LICENSE for more details. +""" + from __future__ import annotations import math @@ -34,20 +44,45 @@ def _get_comparable_magnitudes(first, second, msg): return m1, m2 -def assert_equal(first, second, msg=None): +def assert_equal(first, second, msg: str | None = None) -> None: + """ + Assert that two quantities are equal + + Parameters + ---------- + first + First quantity to compare + + second + Second quantity to compare + + msg + If supplied, message to show if the two quantities aren't equal. + + Raises + ------ + AssertionError + The two quantities are not equal. + """ if msg is None: - msg = "Comparing %r and %r. " % (first, second) + msg = f"Comparing {first!r} and {second!r}. " m1, m2 = _get_comparable_magnitudes(first, second, msg) - msg += " (Converted to %r and %r): Magnitudes are not equal" % (m1, m2) + msg += f" (Converted to {m1!r} and {m2!r}): Magnitudes are not equal" if isinstance(m1, ndarray) or isinstance(m2, ndarray): np.testing.assert_array_equal(m1, m2, err_msg=msg) elif not isinstance(m1, Number): - warnings.warn(RuntimeWarning) + warnings.warn( + f"In assert_equal, m1 is not a number {first} ({m1}) vs. {second} ({m2}) ", + UserWarning, + ) return elif not isinstance(m2, Number): - warnings.warn(RuntimeWarning) + warnings.warn( + f"In assert_equal, m2 is not a number {first} ({m1}) vs. {second} ({m2}) ", + UserWarning, + ) return elif math.isnan(m1): assert math.isnan(m2), msg @@ -57,26 +92,61 @@ def assert_equal(first, second, msg=None): assert m1 == m2, msg -def assert_allclose(first, second, rtol=1e-07, atol=0, msg=None): +def assert_allclose( + first, second, rtol: float = 1e-07, atol: float = 0, msg: str | None = None +) -> None: + """ + Assert that two quantities are all close + + Unlike numpy, this uses a symmetric check of closeness. + + Parameters + ---------- + first + First quantity to compare + + second + Second quantity to compare + + rtol + Relative tolerance to use when checking for closeness. + + atol + Absolute tolerance to use when checking for closeness. + + msg + If supplied, message to show if the two quantities aren't equal. + + Raises + ------ + AssertionError + The two quantities are not close to within the supplied tolerance. + """ if msg is None: try: - msg = "Comparing %r and %r. " % (first, second) - except TypeError: + msg = f"Comparing {first!r} and {second!r}. " + except (TypeError, ValueError): try: - msg = "Comparing %s and %s. " % (first, second) + msg = f"Comparing {first} and {second}. " except Exception: msg = "Comparing" m1, m2 = _get_comparable_magnitudes(first, second, msg) - msg += " (Converted to %r and %r)" % (m1, m2) + msg += f" (Converted to {m1!r} and {m2!r})" if isinstance(m1, ndarray) or isinstance(m2, ndarray): np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg) elif not isinstance(m1, Number): - warnings.warn(RuntimeWarning) + warnings.warn( + f"In assert_equal, m1 is not a number {first} ({m1}) vs. {second} ({m2}) ", + UserWarning, + ) return elif not isinstance(m2, Number): - warnings.warn(RuntimeWarning) + warnings.warn( + f"In assert_equal, m1 is not a number {first} ({m1}) vs. {second} ({m2}) ", + UserWarning, + ) return elif math.isnan(m1): assert math.isnan(m2), msg diff --git a/pint/testsuite/__init__.py b/pint/testsuite/__init__.py index 8c0cd0947..baafc5016 100644 --- a/pint/testsuite/__init__.py +++ b/pint/testsuite/__init__.py @@ -1,9 +1,12 @@ +from __future__ import annotations + +import contextlib import doctest import math import os +import pathlib import unittest import warnings -from contextlib import contextmanager from pint import UnitRegistry from pint.testsuite.helpers import PintOutputChecker @@ -25,7 +28,7 @@ def teardown_class(cls): cls.U_ = None -@contextmanager +@contextlib.contextmanager def assert_no_warnings(): with warnings.catch_warnings(): warnings.simplefilter("error") @@ -40,13 +43,12 @@ def testsuite(): # TESTING THE DOCUMENTATION requires pyyaml, serialize, numpy and uncertainties if HAS_NUMPY and HAS_UNCERTAINTIES: - try: + with contextlib.suppress(ImportError): import serialize # noqa: F401 import yaml # noqa: F401 add_docs(suite) - except ImportError: - pass + return suite @@ -98,7 +100,7 @@ def add_docs(suite): """ docpath = os.path.join(os.path.dirname(__file__), "..", "..", "docs") docpath = os.path.abspath(docpath) - if os.path.exists(docpath): + if pathlib.Path(docpath).exists(): checker = PintOutputChecker() for name in (name for name in os.listdir(docpath) if name.endswith(".rst")): file = os.path.join(docpath, name) diff --git a/pint/testsuite/baseline/test_basic_plot.png b/pint/testsuite/baseline/test_basic_plot.png index 63be609b9..b0c4d189b 100644 Binary files a/pint/testsuite/baseline/test_basic_plot.png and b/pint/testsuite/baseline/test_basic_plot.png differ diff --git a/pint/testsuite/baseline/test_plot_with_non_default_format.png b/pint/testsuite/baseline/test_plot_with_non_default_format.png new file mode 100644 index 000000000..1cb5b1898 Binary files /dev/null and b/pint/testsuite/baseline/test_plot_with_non_default_format.png differ diff --git a/pint/testsuite/baseline/test_plot_with_set_units.png b/pint/testsuite/baseline/test_plot_with_set_units.png index 5fd3ce0d1..a59924ce8 100644 Binary files a/pint/testsuite/baseline/test_plot_with_set_units.png and b/pint/testsuite/baseline/test_plot_with_set_units.png differ diff --git a/benchmarks/benchmarks/__init__.py b/pint/testsuite/benchmarks/__init__.py similarity index 100% rename from benchmarks/benchmarks/__init__.py rename to pint/testsuite/benchmarks/__init__.py diff --git a/pint/_vendor/__init__.py b/pint/testsuite/benchmarks/conftest.py similarity index 100% rename from pint/_vendor/__init__.py rename to pint/testsuite/benchmarks/conftest.py diff --git a/pint/testsuite/benchmarks/test_00_common.py b/pint/testsuite/benchmarks/test_00_common.py new file mode 100644 index 000000000..43ee3fee3 --- /dev/null +++ b/pint/testsuite/benchmarks/test_00_common.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +import subprocess +import sys + + +def test_import(benchmark): + # on py37+ the "-X importtime" usage gives us a more precise + # measurement of the import time we actually care about, + # without the subprocess or interpreter overhead + + cmd = [sys.executable, "-X", "importtime", "-c", "import pint"] + p = subprocess.run(cmd, stderr=subprocess.PIPE) + + line = p.stderr.splitlines()[-1] + field = line.split(b"|")[-2].strip() + total = int(field) # microseconds + return total diff --git a/pint/testsuite/benchmarks/test_01_eval.py b/pint/testsuite/benchmarks/test_01_eval.py new file mode 100644 index 000000000..70f5d85a8 --- /dev/null +++ b/pint/testsuite/benchmarks/test_01_eval.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import pytest + +from pint.pint_eval import plain_tokenizer, uncertainty_tokenizer + +VALUES = [ + "1", + "1 + 2 + 5", + "10 m", + "10 metros + 5 segundos", + "10 metros * (5 segundos)", +] + + +def _tok(tok, value): + return tuple(tok(value)) + + +@pytest.mark.parametrize("tokenizer", (plain_tokenizer, uncertainty_tokenizer)) +@pytest.mark.parametrize("value", VALUES) +def test_pint_eval(benchmark, tokenizer, value): + benchmark(_tok, tokenizer, value) diff --git a/pint/testsuite/benchmarks/test_01_registry_creation.py b/pint/testsuite/benchmarks/test_01_registry_creation.py new file mode 100644 index 000000000..9013f2554 --- /dev/null +++ b/pint/testsuite/benchmarks/test_01_registry_creation.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +import pint + + +def test_create_empty_registry(benchmark): + benchmark(pint.UnitRegistry, None) + + +def test_create_tiny_registry(benchmark, tiny_definition_file): + benchmark(pint.UnitRegistry, tiny_definition_file) + + +def test_create_default_registry(benchmark): + benchmark( + pint.UnitRegistry, + cache_folder=None, + ) + + +def test_create_default_registry_use_cache(benchmark, tmppath_factory): + folder = tmppath_factory / "cache01" + pint.UnitRegistry(cache_folder=tmppath_factory / "cache01") + benchmark(pint.UnitRegistry, cache_folder=folder) diff --git a/pint/testsuite/benchmarks/test_05_registry_creation.py b/pint/testsuite/benchmarks/test_05_registry_creation.py new file mode 100644 index 000000000..9013f2554 --- /dev/null +++ b/pint/testsuite/benchmarks/test_05_registry_creation.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +import pint + + +def test_create_empty_registry(benchmark): + benchmark(pint.UnitRegistry, None) + + +def test_create_tiny_registry(benchmark, tiny_definition_file): + benchmark(pint.UnitRegistry, tiny_definition_file) + + +def test_create_default_registry(benchmark): + benchmark( + pint.UnitRegistry, + cache_folder=None, + ) + + +def test_create_default_registry_use_cache(benchmark, tmppath_factory): + folder = tmppath_factory / "cache01" + pint.UnitRegistry(cache_folder=tmppath_factory / "cache01") + benchmark(pint.UnitRegistry, cache_folder=folder) diff --git a/pint/testsuite/benchmarks/test_10_registry.py b/pint/testsuite/benchmarks/test_10_registry.py new file mode 100644 index 000000000..3a1d42da5 --- /dev/null +++ b/pint/testsuite/benchmarks/test_10_registry.py @@ -0,0 +1,200 @@ +from __future__ import annotations + +import pathlib +from collections.abc import Callable +from operator import getitem +from typing import Any, TypeVar + +import pytest + +import pint + +from ...compat import TypeAlias + +UNITS = ("meter", "kilometer", "second", "minute", "angstrom", "millisecond", "ms") + +OTHER_UNITS = ("meter", "angstrom", "kilometer/second", "angstrom/minute") + +ALL_VALUES = ("int", "float", "complex") + + +T = TypeVar("T") + +SetupType: TypeAlias = tuple[pint.UnitRegistry, dict[str, Any]] + + +def no_benchmark(fun: Callable[..., T], *args: Any, **kwargs: Any) -> T: + return fun(*args, **kwargs) + + +@pytest.fixture +def setup(registry_tiny: pint.UnitRegistry) -> SetupType: + data: dict[str, Any] = {} + data["int"] = 1 + data["float"] = 1.0 + data["complex"] = complex(1, 2) + + return registry_tiny, data + + +@pytest.fixture +def my_setup(setup: SetupType) -> SetupType: + ureg, data = setup + for unit in UNITS + OTHER_UNITS: + data["uc_%s" % unit] = pint.util.to_units_container(unit, ureg) + return ureg, data + + +def test_build_cache(setup: SetupType, benchmark): + ureg, _ = setup + benchmark(ureg._build_cache) + + +@pytest.mark.parametrize("key", UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_getattr(benchmark, setup: SetupType, key: str, pre_run: bool): + ureg, _ = setup + if pre_run: + no_benchmark(getattr, ureg, key) + benchmark(getattr, ureg, key) + + +@pytest.mark.parametrize("key", UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_getitem(benchmark, setup: SetupType, key: str, pre_run: bool): + ureg, _ = setup + if pre_run: + no_benchmark(getitem, ureg, key) + benchmark(getitem, ureg, key) + + +@pytest.mark.parametrize("key", UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_parse_unit_name(benchmark, setup: SetupType, key: str, pre_run: bool): + ureg, _ = setup + if pre_run: + no_benchmark(ureg.parse_unit_name, key) + benchmark(ureg.parse_unit_name, key) + + +@pytest.mark.parametrize("key", UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_parse_units(benchmark, setup: SetupType, key: str, pre_run: bool): + ureg, _ = setup + if pre_run: + no_benchmark(ureg.parse_units, key) + benchmark(ureg.parse_units, key) + + +@pytest.mark.parametrize("key", UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_parse_expression(benchmark, setup: SetupType, key: str, pre_run: bool): + ureg, _ = setup + if pre_run: + no_benchmark(ureg.parse_expression, "1.0 " + key) + benchmark(ureg.parse_expression, "1.0 " + key) + + +@pytest.mark.parametrize("unit", OTHER_UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_base_units(benchmark, setup: SetupType, unit: str, pre_run: bool): + ureg, _ = setup + if pre_run: + no_benchmark(ureg.get_base_units, unit) + benchmark(ureg.get_base_units, unit) + + +@pytest.mark.parametrize("unit", OTHER_UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_to_units_container_registry( + benchmark, setup: SetupType, unit: str, pre_run: bool +): + ureg, _ = setup + if pre_run: + no_benchmark(pint.util.to_units_container, unit, ureg) + benchmark(pint.util.to_units_container, unit, ureg) + + +@pytest.mark.parametrize("unit", OTHER_UNITS) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_to_units_container_detached( + benchmark, setup: SetupType, unit: str, pre_run: bool +): + ureg, _ = setup + if pre_run: + no_benchmark(pint.util.to_units_container, unit, ureg) + benchmark(pint.util.to_units_container, unit, ureg) + + +@pytest.mark.parametrize( + "key", (("uc_meter", "uc_kilometer"), ("uc_kilometer/second", "uc_angstrom/minute")) +) +@pytest.mark.parametrize("pre_run", (True, False)) +def test_convert_from_uc(benchmark, my_setup: SetupType, key: str, pre_run: bool): + src, dst = key + ureg, data = my_setup + if pre_run: + no_benchmark(ureg._convert, 1.0, data[src], data[dst]) + benchmark(ureg._convert, 1.0, data[src], data[dst]) + + +def test_parse_math_expression(benchmark, my_setup): + ureg, _ = my_setup + benchmark(ureg.parse_expression, "3 + 5 * 2 + value", value=10) + + +# This code is duplicated with other benchmarks but simplify comparison + + +@pytest.fixture +def cache_folder(tmppath_factory: pathlib.Path): + folder = tmppath_factory / "cache" + folder.mkdir(parents=True, exist_ok=True) + return folder + + +@pytest.mark.parametrize("use_cache_folder", (None, True)) +def test_load_definitions_stage_1(benchmark, cache_folder, use_cache_folder): + """empty registry creation""" + + if use_cache_folder is True: + use_cache_folder = cache_folder + else: + use_cache_folder = None + benchmark(pint.UnitRegistry, None, cache_folder=use_cache_folder) + + +@pytest.mark.skip( + "Test failing ValueError: Group USCSLengthInternational already present in registry" +) +@pytest.mark.parametrize("use_cache_folder", (None, True)) +def test_load_definitions_stage_2(benchmark, cache_folder, use_cache_folder): + """empty registry creation + parsing default files + definition object loading""" + + if use_cache_folder is True: + use_cache_folder = cache_folder + else: + use_cache_folder = None + + from pint import errors + + defpath = pathlib.Path(errors.__file__).parent / "default_en.txt" + empty_registry = pint.UnitRegistry(None, cache_folder=use_cache_folder) + benchmark(empty_registry.load_definitions, defpath, True) + + +@pytest.mark.parametrize("use_cache_folder", (None, True)) +def test_load_definitions_stage_3(benchmark, cache_folder, use_cache_folder): + """empty registry creation + parsing default files + definition object loading + cache building""" + + if use_cache_folder is True: + use_cache_folder = cache_folder + else: + use_cache_folder = None + + from pint import errors + + defpath = pathlib.Path(errors.__file__).parent / "default_en.txt" + empty_registry = pint.UnitRegistry(None, cache_folder=use_cache_folder) + loaded_files = empty_registry.load_definitions(defpath, True) + benchmark(empty_registry._build_cache, loaded_files) diff --git a/pint/testsuite/benchmarks/test_20_quantity.py b/pint/testsuite/benchmarks/test_20_quantity.py new file mode 100644 index 000000000..815e3c09c --- /dev/null +++ b/pint/testsuite/benchmarks/test_20_quantity.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import itertools as it +import operator +from typing import Any + +import pytest + +import pint + +UNITS = ("meter", "kilometer", "second", "minute", "angstrom") +ALL_VALUES = ("int", "float", "complex") +ALL_VALUES_Q = tuple( + f"{a}_{b}" for a, b in it.product(ALL_VALUES, ("meter", "kilometer")) +) + +OP1 = (operator.neg, operator.truth) +OP2_CMP = (operator.eq,) # operator.lt) +OP2_MATH = (operator.add, operator.sub, operator.mul, operator.truediv) + + +@pytest.fixture +def setup(registry_tiny) -> tuple[pint.UnitRegistry, dict[str, Any]]: + data = {} + data["int"] = 1 + data["float"] = 1.0 + data["complex"] = complex(1, 2) + + ureg = registry_tiny + + for key in ALL_VALUES: + data[key + "_meter"] = data[key] * ureg.meter + data[key + "_kilometer"] = data[key] * ureg.kilometer + + return ureg, data + + +@pytest.mark.parametrize("key", ALL_VALUES) +def test_build_by_mul(benchmark, setup, key): + ureg, data = setup + benchmark(operator.mul, data[key], ureg.meter) + + +@pytest.mark.parametrize("key", ALL_VALUES_Q) +@pytest.mark.parametrize("op", OP1) +def test_op1(benchmark, setup, key, op): + _, data = setup + benchmark(op, data[key]) + + +@pytest.mark.parametrize("keys", tuple(it.product(ALL_VALUES_Q, ALL_VALUES_Q))) +@pytest.mark.parametrize("op", OP2_MATH + OP2_CMP) +def test_op2(benchmark, setup, keys, op): + _, data = setup + key1, key2 = keys + benchmark(op, data[key1], data[key2]) + + +@pytest.mark.parametrize("key", ALL_VALUES_Q) +def test_wrapper(benchmark, setup, key): + ureg, data = setup + value, unit = key.split("_") + + @ureg.wraps(None, (unit,)) + def f(a): + pass + + benchmark(f, data[key]) + + +@pytest.mark.parametrize("key", ALL_VALUES_Q) +def test_wrapper_nonstrict(benchmark, setup, key): + ureg, data = setup + value, unit = key.split("_") + + @ureg.wraps(None, (unit,), strict=False) + def f(a): + pass + + benchmark(f, data[value]) + + +@pytest.mark.parametrize("key", ALL_VALUES_Q) +def test_wrapper_ret(benchmark, setup, key): + ureg, data = setup + value, unit = key.split("_") + + @ureg.wraps(unit, (unit,)) + def f(a): + return a + + benchmark(f, data[key]) diff --git a/pint/testsuite/benchmarks/test_30_numpy.py b/pint/testsuite/benchmarks/test_30_numpy.py new file mode 100644 index 000000000..fe99b1e35 --- /dev/null +++ b/pint/testsuite/benchmarks/test_30_numpy.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import itertools as it +import operator +from collections.abc import Generator +from typing import Any + +import pytest + +import pint +from pint.compat import np + +from ..helpers import requires_numpy + +SMALL_VEC_LEN = 3 +MID_VEC_LEN = 1_000 +LARGE_VEC_LEN = 1_000_000 + +LENGTHS = ("short", "mid") +ALL_VALUES = tuple( + f"{a}_{b}" for a, b in it.product(LENGTHS, ("list", "tuple", "array")) +) +ALL_ARRAYS = ("short_array", "mid_array") +UNITS = ("meter", "kilometer") +ALL_ARRAYS_Q = tuple(f"{a}_{b}" for a, b in it.product(ALL_ARRAYS, UNITS)) + +OP1 = (operator.neg,) # operator.truth, +OP2_CMP = (operator.eq, operator.lt) +OP2_MATH = (operator.add, operator.sub, operator.mul, operator.truediv) + +if np is None: + NUMPY_OP1_MATH = NUMPY_OP2_CMP = NUMPY_OP2_MATH = () +else: + NUMPY_OP1_MATH = (np.sqrt, np.square) + NUMPY_OP2_CMP = (np.equal, np.less) + NUMPY_OP2_MATH = (np.add, np.subtract, np.multiply, np.true_divide) + + +def float_range(n: int) -> Generator[float]: + return (float(x) for x in range(1, n + 1)) + + +@pytest.fixture +def setup(registry_tiny) -> tuple[pint.UnitRegistry, dict[str, Any]]: + data = {} + short = list(float_range(3)) + mid = list(float_range(1_000)) + + data["short_list"] = short + data["short_tuple"] = tuple(short) + data["short_array"] = np.asarray(short) + data["mid_list"] = mid + data["mid_tuple"] = tuple(mid) + data["mid_array"] = np.asarray(mid) + + ureg = registry_tiny + + for key in ALL_ARRAYS: + data[key + "_meter"] = data[key] * ureg.meter + data[key + "_kilometer"] = data[key] * ureg.kilometer + + return ureg, data + + +@requires_numpy +def test_finding_meter_getattr(benchmark, setup): + ureg, _ = setup + benchmark(getattr, ureg, "meter") + + +@requires_numpy +def test_finding_meter_getitem(benchmark, setup): + ureg, _ = setup + benchmark(operator.getitem, ureg, "meter") + + +@requires_numpy +@pytest.mark.parametrize( + "unit", ["meter", "angstrom", "meter/second", "angstrom/minute"] +) +def test_base_units(benchmark, setup, unit): + ureg, _ = setup + benchmark(ureg.get_base_units, unit) + + +@requires_numpy +@pytest.mark.parametrize("key", ALL_ARRAYS) +def test_build_by_mul(benchmark, setup, key): + ureg, data = setup + benchmark(operator.mul, data[key], ureg.meter) + + +@requires_numpy +@pytest.mark.parametrize("key", ALL_ARRAYS_Q) +@pytest.mark.parametrize("op", OP1 + NUMPY_OP1_MATH) +def test_op1(benchmark, setup, key, op): + _, data = setup + benchmark(op, data[key]) + + +@requires_numpy +@pytest.mark.parametrize( + "keys", + ( + ("short_array_meter", "short_array_meter"), + ("short_array_meter", "short_array_kilometer"), + ("short_array_kilometer", "short_array_meter"), + ("short_array_kilometer", "short_array_kilometer"), + ("mid_array_meter", "mid_array_meter"), + ("mid_array_meter", "mid_array_kilometer"), + ("mid_array_kilometer", "mid_array_meter"), + ("mid_array_kilometer", "mid_array_kilometer"), + ), +) +@pytest.mark.parametrize("op", OP2_MATH + OP2_CMP + NUMPY_OP2_MATH + NUMPY_OP2_CMP) +def test_op2(benchmark, setup, keys, op): + _, data = setup + key1, key2 = keys + benchmark(op, data[key1], data[key2]) diff --git a/pint/testsuite/conftest.py b/pint/testsuite/conftest.py index 6492cad85..631de1400 100644 --- a/pint/testsuite/conftest.py +++ b/pint/testsuite/conftest.py @@ -1,29 +1,20 @@ # pytest fixtures +from __future__ import annotations -import io +import pathlib import pytest import pint - -@pytest.fixture -def registry_empty(): - return pint.UnitRegistry(None) - - -@pytest.fixture -def registry_tiny(): - return pint.UnitRegistry( - io.StringIO( - """ +_TINY = """ yocto- = 1e-24 = y- zepto- = 1e-21 = z- atto- = 1e-18 = a- femto- = 1e-15 = f- pico- = 1e-12 = p- nano- = 1e-9 = n- -micro- = 1e-6 = µ- = μ- = u- +micro- = 1e-6 = µ- = μ- = u- = mu- = mc- milli- = 1e-3 = m- centi- = 1e-2 = c- deci- = 1e-1 = d- @@ -44,8 +35,32 @@ def registry_tiny(): angstrom = 1e-10 * meter = Å = ångström = Å minute = 60 * second = min """ - ) - ) + + +@pytest.fixture(scope="session") +def tmppath_factory(tmpdir_factory) -> pathlib.Path: + tmp = tmpdir_factory.mktemp("pint") + return pathlib.Path(tmp) + + +@pytest.fixture(scope="session") +def tiny_definition_file(tmppath_factory: pathlib.Path) -> pathlib.Path: + folder = tmppath_factory / "definitions" + folder.mkdir(exist_ok=True, parents=True) + path = folder / "tiny.txt" + if not path.exists(): + path.write_text(_TINY, encoding="utf-8") + return path + + +@pytest.fixture +def registry_empty(): + return pint.UnitRegistry(None) + + +@pytest.fixture +def registry_tiny(tiny_definition_file: pathlib.Path): + return pint.UnitRegistry(tiny_definition_file) @pytest.fixture @@ -65,6 +80,12 @@ def module_registry(): return pint.UnitRegistry() +@pytest.fixture(scope="module") +def log_module_registry(): + """Only use for those test that do not modify the registry.""" + return pint.UnitRegistry(logarithmic_math=True, autoconvert_offset_to_baseunit=True) + + @pytest.fixture(scope="session") def sess_registry(): """Only use for those test that do not modify the registry.""" diff --git a/pint/testsuite/helpers.py b/pint/testsuite/helpers.py index 0348a4549..d317e0755 100644 --- a/pint/testsuite/helpers.py +++ b/pint/testsuite/helpers.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import contextlib import doctest import pickle import re @@ -10,6 +13,7 @@ from ..compat import ( HAS_BABEL, + HAS_MIP, HAS_NUMPY, HAS_NUMPY_ARRAY_FUNCTION, HAS_UNCERTAINTIES, @@ -34,20 +38,22 @@ _unit_re = re.compile(r"") +def internal(ureg): + return ureg + + class PintOutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): check = super().check_output(want, got, optionflags) if check: return check - try: + with contextlib.suppress(Exception): if eval(want) == eval(got): return True - except Exception: - pass for regex in (_q_re, _sq_re): - try: + with contextlib.suppress(Exception): parsed_got = regex.match(got.replace(r"\\", "")).groupdict() parsed_want = regex.match(want.replace(r"\\", "")).groupdict() @@ -61,12 +67,10 @@ def check_output(self, want, got, optionflags): return False return True - except Exception: - pass cnt = 0 for regex in (_unit_re,): - try: + with contextlib.suppress(Exception): parsed_got, tmp = regex.subn("\1", got) cnt += tmp parsed_want, temp = regex.subn("\1", want) @@ -75,9 +79,6 @@ def check_output(self, want, got, optionflags): if parsed_got == parsed_want: return True - except Exception: - pass - if cnt: # If there was any replacement, we try again the previous methods. return self.check_output(parsed_want, parsed_got, optionflags) @@ -127,9 +128,26 @@ def requires_numpy_at_least(version): ) -requires_babel = pytest.mark.skipif( - not HAS_BABEL, reason="Requires Babel with units support" -) +def requires_babel(tested_locales=[]): + if not HAS_BABEL: + return pytest.mark.skip("Requires Babel with units support") + + import locale + + default_locale = locale.getlocale(locale.LC_NUMERIC) + locales_unavailable = False + try: + for loc in tested_locales: + locale.setlocale(locale.LC_NUMERIC, loc) + except locale.Error: + locales_unavailable = True + locale.setlocale(locale.LC_NUMERIC, default_locale) + + return pytest.mark.skipif( + locales_unavailable, reason="Tested locales not available." + ) + + requires_not_babel = pytest.mark.skipif( HAS_BABEL, reason="Requires Babel not to be installed" ) @@ -139,6 +157,7 @@ def requires_numpy_at_least(version): requires_not_uncertainties = pytest.mark.skipif( HAS_UNCERTAINTIES, reason="Requires Uncertainties not to be installed." ) +requires_mip = pytest.mark.skipif(not HAS_MIP, reason="Requires MIP") # Parametrization diff --git a/pint/testsuite/test_application_registry.py b/pint/testsuite/test_application_registry.py index a9bc84ee1..477e9f650 100644 --- a/pint/testsuite/test_application_registry.py +++ b/pint/testsuite/test_application_registry.py @@ -1,5 +1,7 @@ """Tests for global UnitRegistry, Unit, and Quantity """ +from __future__ import annotations + import pickle import pytest diff --git a/pint/testsuite/test_babel.py b/pint/testsuite/test_babel.py index 5c32879b9..ee8e4bb42 100644 --- a/pint/testsuite/test_babel.py +++ b/pint/testsuite/test_babel.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import pytest @@ -11,59 +13,66 @@ def test_no_babel(func_registry): ureg = func_registry distance = 24.0 * ureg.meter with pytest.raises(Exception): - distance.format_babel(locale="fr_FR", length="long") + ureg.formatter.format_unit_babel(distance, locale="fr_FR", length="long") -@helpers.requires_babel() +@helpers.requires_babel(["fr_FR", "ro_RO"]) def test_format(func_registry): ureg = func_registry dirname = os.path.dirname(__file__) ureg.load_definitions(os.path.join(dirname, "../xtranslated.txt")) - distance = 24.0 * ureg.meter - assert distance.format_babel(locale="fr_FR", length="long") == "24.0 mètres" - time = 8.0 * ureg.second - assert time.format_babel(locale="fr_FR", length="long") == "8.0 secondes" - assert time.format_babel(locale="ro", length="short") == "8.0 s" + distance = 24.1 * ureg.meter + assert distance.format_babel(locale="fr_FR", length="long") == "24,1 mètres" + time = 8.1 * ureg.second + assert time.format_babel(locale="fr_FR", length="long") == "8,1 secondes" + assert time.format_babel(locale="ro_RO", length="short") == "8,1 s" acceleration = distance / time**2 assert ( - acceleration.format_babel(locale="fr_FR", length="long") - == "0.375 mètre par seconde²" + acceleration.format_babel(spec=".3nP", locale="fr_FR", length="long") + == "0,367 mètre par seconde²" ) mks = ureg.get_system("mks") assert mks.format_babel(locale="fr_FR") == "métrique" -@helpers.requires_babel() +@helpers.requires_babel(["fr_FR", "ro_RO"]) def test_registry_locale(): ureg = UnitRegistry(fmt_locale="fr_FR") dirname = os.path.dirname(__file__) ureg.load_definitions(os.path.join(dirname, "../xtranslated.txt")) - distance = 24.0 * ureg.meter - assert distance.format_babel(length="long") == "24.0 mètres" - time = 8.0 * ureg.second - assert time.format_babel(length="long") == "8.0 secondes" - assert time.format_babel(locale="ro", length="short") == "8.0 s" + distance = 24.1 * ureg.meter + assert distance.format_babel(length="long") == "24,1 mètres" + time = 8.1 * ureg.second + assert time.format_babel(length="long") == "8,1 secondes" + assert time.format_babel(locale="ro_RO", length="short") == "8,1 s" acceleration = distance / time**2 - assert acceleration.format_babel(length="long") == "0.375 mètre par seconde²" + assert ( + acceleration.format_babel(spec=".3nC", length="long") + == "0,367 mètre/seconde**2" + ) + assert ( + acceleration.format_babel(spec=".3nP", length="long") + == "0,367 mètre par seconde²" + ) mks = ureg.get_system("mks") assert mks.format_babel(locale="fr_FR") == "métrique" -@helpers.requires_babel() +@helpers.requires_babel(["fr_FR"]) def test_unit_format_babel(): ureg = UnitRegistry(fmt_locale="fr_FR") volume = ureg.Unit("ml") assert volume.format_babel() == "millilitre" - ureg.default_format = "~" + ureg.formatter.default_format = "~" assert volume.format_babel() == "ml" dimensionless_unit = ureg.Unit("") assert dimensionless_unit.format_babel() == "" - ureg.fmt_locale = None + ureg.set_fmt_locale(None) with pytest.raises(ValueError): volume.format_babel() @@ -76,24 +85,24 @@ def test_no_registry_locale(func_registry): distance.format_babel() -@helpers.requires_babel() +@helpers.requires_babel(["fr_FR"]) def test_str(func_registry): ureg = func_registry - d = 24.0 * ureg.meter + d = 24.1 * ureg.meter - s = "24.0 meter" + s = "24.1 meter" assert str(d) == s assert "%s" % d == s - assert "{}".format(d) == s + assert f"{d}" == s ureg.set_fmt_locale("fr_FR") - s = "24.0 mètres" + s = "24,1 mètres" assert str(d) == s assert "%s" % d == s - assert "{}".format(d) == s + assert f"{d}" == s ureg.set_fmt_locale(None) - s = "24.0 meter" + s = "24.1 meter" assert str(d) == s assert "%s" % d == s - assert "{}".format(d) == s + assert f"{d}" == s diff --git a/pint/testsuite/test_compat.py b/pint/testsuite/test_compat.py index 5f3ba5d00..70a6e8e75 100644 --- a/pint/testsuite/test_compat.py +++ b/pint/testsuite/test_compat.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import math from datetime import datetime, timedelta diff --git a/pint/testsuite/test_compat_downcast.py b/pint/testsuite/test_compat_downcast.py index 8293580c3..2fccbacab 100644 --- a/pint/testsuite/test_compat_downcast.py +++ b/pint/testsuite/test_compat_downcast.py @@ -1,3 +1,7 @@ +from __future__ import annotations + +import operator + import pytest from pint import UnitRegistry @@ -37,7 +41,7 @@ def q_base(local_registry): # Define identity function for use in tests -def identity(ureg, x): +def id_matrix(ureg, x): return x @@ -62,17 +66,17 @@ def array(request): @pytest.mark.parametrize( "op, magnitude_op, unit_op", [ - pytest.param(identity, identity, identity, id="identity"), + pytest.param(id_matrix, id_matrix, id_matrix, id="identity"), pytest.param( lambda ureg, x: x + 1 * ureg.m, lambda ureg, x: x + 1, - identity, + id_matrix, id="addition", ), pytest.param( lambda ureg, x: x - 20 * ureg.cm, lambda ureg, x: x - 0.2, - identity, + id_matrix, id="subtraction", ), pytest.param( @@ -83,7 +87,7 @@ def array(request): ), pytest.param( lambda ureg, x: x / (1 * ureg.s), - identity, + id_matrix, lambda ureg, u: u / ureg.s, id="division", ), @@ -93,23 +97,22 @@ def array(request): WR(lambda u: u**2), id="square", ), - pytest.param(WR(lambda x: x.T), WR(lambda x: x.T), identity, id="transpose"), - pytest.param(WR(np.mean), WR(np.mean), identity, id="mean ufunc"), - pytest.param(WR(np.sum), WR(np.sum), identity, id="sum ufunc"), + pytest.param(WR(lambda x: x.T), WR(lambda x: x.T), id_matrix, id="transpose"), + pytest.param(WR(np.mean), WR(np.mean), id_matrix, id="mean ufunc"), + pytest.param(WR(np.sum), WR(np.sum), id_matrix, id="sum ufunc"), pytest.param(WR(np.sqrt), WR(np.sqrt), WR(lambda u: u**0.5), id="sqrt ufunc"), pytest.param( WR(lambda x: np.reshape(x, (25,))), WR(lambda x: np.reshape(x, (25,))), - identity, + id_matrix, id="reshape function", ), - pytest.param(WR(np.amax), WR(np.amax), identity, id="amax function"), + pytest.param(WR(np.amax), WR(np.amax), id_matrix, id="amax function"), ], ) def test_univariate_op_consistency( local_registry, q_base, op, magnitude_op, unit_op, array ): - q = local_registry.Quantity(array, "meter") res = op(local_registry, q) assert np.all( @@ -122,15 +125,12 @@ def test_univariate_op_consistency( @pytest.mark.parametrize( "op, unit", [ - pytest.param( - lambda x, y: x * y, lambda ureg: ureg("kg m"), id="multiplication" - ), - pytest.param(lambda x, y: x / y, lambda ureg: ureg("m / kg"), id="division"), + pytest.param(operator.mul, lambda ureg: ureg("kg m"), id="multiplication"), + pytest.param(operator.truediv, lambda ureg: ureg("m / kg"), id="division"), pytest.param(np.multiply, lambda ureg: ureg("kg m"), id="multiply ufunc"), ], ) def test_bivariate_op_consistency(local_registry, q_base, op, unit, array): - # This is to avoid having a ureg built at the module level. unit = unit(local_registry) @@ -145,11 +145,15 @@ def test_bivariate_op_consistency(local_registry, q_base, op, unit, array): "op", [ pytest.param( - WR2(lambda a, u: a * u), + WR2(operator.mul), id="array-first", marks=pytest.mark.xfail(reason="upstream issue numpy/numpy#15200"), ), - pytest.param(WR2(lambda a, u: u * a), id="unit-first"), + pytest.param( + WR2(operator.mul), + id="unit-first", + marks=pytest.mark.xfail(reason="upstream issue numpy/numpy#15200"), + ), ], ) @pytest.mark.parametrize( diff --git a/pint/testsuite/test_compat_upcast.py b/pint/testsuite/test_compat_upcast.py index ad267c1d6..76ec69cbf 100644 --- a/pint/testsuite/test_compat_upcast.py +++ b/pint/testsuite/test_compat_upcast.py @@ -1,3 +1,7 @@ +from __future__ import annotations + +import operator + import pytest # Conditionally import NumPy and any upcast type libraries @@ -49,9 +53,9 @@ def test_quantification(module_registry, ds): @pytest.mark.parametrize( "op", [ - lambda x, y: x + y, + operator.add, lambda x, y: x - (-y), - lambda x, y: x * y, + operator.mul, lambda x, y: x / (y**-1), ], ) @@ -126,9 +130,7 @@ def test_array_function_deferral(da, module_registry): upper = 3 * module_registry.m args = (da, lower, upper) assert ( - lower.__array_function__( - np.clip, tuple(set(type(arg) for arg in args)), args, {} - ) + lower.__array_function__(np.clip, tuple({type(arg) for arg in args}), args, {}) is NotImplemented ) diff --git a/pint/testsuite/test_contexts.py b/pint/testsuite/test_contexts.py index ea6eadcea..073a5a69e 100644 --- a/pint/testsuite/test_contexts.py +++ b/pint/testsuite/test_contexts.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import itertools import logging import math @@ -16,8 +18,10 @@ from pint.testsuite import helpers from pint.util import UnitsContainer +from .helpers import internal + -def add_ctxs(ureg): +def add_ctxs(ureg: UnitRegistry): a, b = UnitsContainer({"[length]": 1}), UnitsContainer({"[time]": -1}) d = Context("lc") d.add_transformation(a, b, lambda ureg, x: ureg.speed_of_light / x) @@ -33,7 +37,7 @@ def add_ctxs(ureg): ureg.add_context(d) -def add_arg_ctxs(ureg): +def add_arg_ctxs(ureg: UnitRegistry): a, b = UnitsContainer({"[length]": 1}), UnitsContainer({"[time]": -1}) d = Context("lc") d.add_transformation(a, b, lambda ureg, x, n: ureg.speed_of_light / x / n) @@ -49,7 +53,7 @@ def add_arg_ctxs(ureg): ureg.add_context(d) -def add_argdef_ctxs(ureg): +def add_argdef_ctxs(ureg: UnitRegistry): a, b = UnitsContainer({"[length]": 1}), UnitsContainer({"[time]": -1}) d = Context("lc", defaults=dict(n=1)) assert d.defaults == dict(n=1) @@ -67,7 +71,7 @@ def add_argdef_ctxs(ureg): ureg.add_context(d) -def add_sharedargdef_ctxs(ureg): +def add_sharedargdef_ctxs(ureg: UnitRegistry): a, b = UnitsContainer({"[length]": 1}), UnitsContainer({"[time]": -1}) d = Context("lc", defaults=dict(n=1)) assert d.defaults == dict(n=1) @@ -90,37 +94,37 @@ def test_known_context(self, func_registry): ureg = func_registry add_ctxs(ureg) with ureg.context("lc"): - assert ureg._active_ctx - assert ureg._active_ctx.graph + assert internal(ureg)._active_ctx + assert internal(ureg)._active_ctx.graph - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph with ureg.context("lc", n=1): - assert ureg._active_ctx - assert ureg._active_ctx.graph + assert internal(ureg)._active_ctx + assert internal(ureg)._active_ctx.graph - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph def test_known_context_enable(self, func_registry): ureg = func_registry add_ctxs(ureg) ureg.enable_contexts("lc") - assert ureg._active_ctx - assert ureg._active_ctx.graph + assert internal(ureg)._active_ctx + assert internal(ureg)._active_ctx.graph ureg.disable_contexts(1) - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph ureg.enable_contexts("lc", n=1) - assert ureg._active_ctx - assert ureg._active_ctx.graph + assert internal(ureg)._active_ctx + assert internal(ureg)._active_ctx.graph ureg.disable_contexts(1) - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph def test_graph(self, func_registry): ureg = func_registry @@ -139,27 +143,27 @@ def test_graph(self, func_registry): g.update({l: {t, c}, t: {l}, c: {l}}) with ureg.context("lc"): - assert ureg._active_ctx.graph == g_sp + assert internal(ureg)._active_ctx.graph == g_sp with ureg.context("lc", n=1): - assert ureg._active_ctx.graph == g_sp + assert internal(ureg)._active_ctx.graph == g_sp with ureg.context("ab"): - assert ureg._active_ctx.graph == g_ab + assert internal(ureg)._active_ctx.graph == g_ab with ureg.context("lc"): with ureg.context("ab"): - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g with ureg.context("ab"): with ureg.context("lc"): - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g with ureg.context("lc", "ab"): - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g with ureg.context("ab", "lc"): - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g def test_graph_enable(self, func_registry): ureg = func_registry @@ -178,33 +182,33 @@ def test_graph_enable(self, func_registry): g.update({l: {t, c}, t: {l}, c: {l}}) ureg.enable_contexts("lc") - assert ureg._active_ctx.graph == g_sp + assert internal(ureg)._active_ctx.graph == g_sp ureg.disable_contexts(1) ureg.enable_contexts("lc", n=1) - assert ureg._active_ctx.graph == g_sp + assert internal(ureg)._active_ctx.graph == g_sp ureg.disable_contexts(1) ureg.enable_contexts("ab") - assert ureg._active_ctx.graph == g_ab + assert internal(ureg)._active_ctx.graph == g_ab ureg.disable_contexts(1) ureg.enable_contexts("lc") ureg.enable_contexts("ab") - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g ureg.disable_contexts(2) ureg.enable_contexts("ab") ureg.enable_contexts("lc") - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g ureg.disable_contexts(2) ureg.enable_contexts("lc", "ab") - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g ureg.disable_contexts(2) ureg.enable_contexts("ab", "lc") - assert ureg._active_ctx.graph == g + assert internal(ureg)._active_ctx.graph == g ureg.disable_contexts(2) def test_known_nested_context(self, func_registry): @@ -212,22 +216,22 @@ def test_known_nested_context(self, func_registry): add_ctxs(ureg) with ureg.context("lc"): - x = dict(ureg._active_ctx) - y = dict(ureg._active_ctx.graph) - assert ureg._active_ctx - assert ureg._active_ctx.graph + x = dict(internal(ureg)._active_ctx) + y = dict(internal(ureg)._active_ctx.graph) + assert internal(ureg)._active_ctx + assert internal(ureg)._active_ctx.graph with ureg.context("ab"): - assert ureg._active_ctx - assert ureg._active_ctx.graph - assert x != ureg._active_ctx - assert y != ureg._active_ctx.graph + assert internal(ureg)._active_ctx + assert internal(ureg)._active_ctx.graph + assert x != internal(ureg)._active_ctx + assert y != internal(ureg)._active_ctx.graph - assert x == ureg._active_ctx - assert y == ureg._active_ctx.graph + assert x == internal(ureg)._active_ctx + assert y == internal(ureg)._active_ctx.graph - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph def test_unknown_context(self, func_registry): ureg = func_registry @@ -235,25 +239,25 @@ def test_unknown_context(self, func_registry): with pytest.raises(KeyError): with ureg.context("la"): pass - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph def test_unknown_nested_context(self, func_registry): ureg = func_registry add_ctxs(ureg) with ureg.context("lc"): - x = dict(ureg._active_ctx) - y = dict(ureg._active_ctx.graph) + x = dict(internal(ureg)._active_ctx) + y = dict(internal(ureg)._active_ctx.graph) with pytest.raises(KeyError): with ureg.context("la"): pass - assert x == ureg._active_ctx - assert y == ureg._active_ctx.graph + assert x == internal(ureg)._active_ctx + assert y == internal(ureg)._active_ctx.graph - assert not ureg._active_ctx - assert not ureg._active_ctx.graph + assert not internal(ureg)._active_ctx + assert not internal(ureg)._active_ctx.graph def test_one_context(self, func_registry): ureg = func_registry @@ -323,7 +327,6 @@ def test_nested_context(self, func_registry): q.to("Hz") def test_context_with_arg(self, func_registry): - ureg = func_registry add_arg_ctxs(ureg) @@ -352,7 +355,6 @@ def test_context_with_arg(self, func_registry): q.to("Hz") def test_enable_context_with_arg(self, func_registry): - ureg = func_registry add_arg_ctxs(ureg) @@ -386,7 +388,6 @@ def test_enable_context_with_arg(self, func_registry): ureg.disable_contexts(1) def test_context_with_arg_def(self, func_registry): - ureg = func_registry add_argdef_ctxs(ureg) @@ -427,7 +428,6 @@ def test_context_with_arg_def(self, func_registry): q.to("Hz") def test_context_with_sharedarg_def(self, func_registry): - ureg = func_registry add_sharedargdef_ctxs(ureg) @@ -499,25 +499,24 @@ def test_anonymous_context(self, func_registry): helpers.assert_quantity_equal(x.to("s"), ureg("1 s")) def _test_ctx(self, ctx, ureg): - q = 500 * ureg.meter s = (ureg.speed_of_light / q).to("Hz") - nctx = len(ureg._contexts) + nctx = len(internal(ureg)._contexts) - assert ctx.name not in ureg._contexts + assert ctx.name not in internal(ureg)._contexts ureg.add_context(ctx) - assert ctx.name in ureg._contexts - assert len(ureg._contexts) == nctx + 1 + len(ctx.aliases) + assert ctx.name in internal(ureg)._contexts + assert len(internal(ureg)._contexts) == nctx + 1 + len(ctx.aliases) with ureg.context(ctx.name): assert q.to("Hz") == s assert s.to("meter") == q ureg.remove_context(ctx.name) - assert ctx.name not in ureg._contexts - assert len(ureg._contexts) == nctx + assert ctx.name not in internal(ureg)._contexts + assert len(internal(ureg)._contexts) == nctx @pytest.mark.parametrize( "badrow", @@ -563,7 +562,6 @@ def test_parse_invalid(self, badrow): ], ) def test_parse_simple(self, func_registry, source, name, aliases, defaults): - a = Context.__keytransform__( UnitsContainer({"[time]": -1}), UnitsContainer({"[length]": 1}) ) @@ -579,7 +577,6 @@ def test_parse_simple(self, func_registry, source, name, aliases, defaults): self._test_ctx(c, func_registry) def test_parse_auto_inverse(self, func_registry): - a = Context.__keytransform__( UnitsContainer({"[time]": -1.0}), UnitsContainer({"[length]": 1.0}) ) @@ -638,7 +635,6 @@ def test_parse_parameterized(self, func_registry): Context.from_lines(s) def test_warnings(self, caplog, func_registry): - ureg = func_registry with caplog.at_level(logging.DEBUG, "pint"): @@ -669,11 +665,11 @@ def test_defined(self, class_registry): b = Context.__keytransform__( UnitsContainer({"[length]": 1.0}), UnitsContainer({"[time]": -1.0}) ) - assert a in ureg._contexts["sp"].funcs - assert b in ureg._contexts["sp"].funcs + assert a in internal(ureg)._contexts["sp"].funcs + assert b in internal(ureg)._contexts["sp"].funcs with ureg.context("sp"): - assert a in ureg._active_ctx - assert b in ureg._active_ctx + assert a in internal(ureg)._active_ctx + assert b in internal(ureg)._active_ctx def test_spectroscopy(self, class_registry): ureg = class_registry @@ -689,9 +685,9 @@ def test_spectroscopy(self, class_registry): da, db = Context.__keytransform__( a.dimensionality, b.dimensionality ) - p = find_shortest_path(ureg._active_ctx.graph, da, db) + p = find_shortest_path(internal(ureg)._active_ctx.graph, da, db) assert p - msg = "{} <-> {}".format(a, b) + msg = f"{a} <-> {b}" # assertAlmostEqualRelError converts second to first helpers.assert_quantity_almost_equal(b, a, rtol=0.01, msg=msg) @@ -711,9 +707,9 @@ def test_textile(self, class_registry): a = qty_direct.to_base_units() b = qty_indirect.to_base_units() da, db = Context.__keytransform__(a.dimensionality, b.dimensionality) - p = find_shortest_path(ureg._active_ctx.graph, da, db) + p = find_shortest_path(internal(ureg)._active_ctx.graph, da, db) assert p - msg = "{} <-> {}".format(a, b) + msg = f"{a} <-> {b}" helpers.assert_quantity_almost_equal(b, a, rtol=0.01, msg=msg) # Check RKM <-> cN/tex conversion diff --git a/pint/testsuite/test_converters.py b/pint/testsuite/test_converters.py index 62ffdb7ec..40346c700 100644 --- a/pint/testsuite/test_converters.py +++ b/pint/testsuite/test_converters.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import itertools from pint.compat import np @@ -69,7 +71,7 @@ def test_converter_inplace(self): @helpers.requires_numpy def test_log_converter_inplace(self): - arb_value = 3.14 + arb_value = 3.13 c = LogarithmicConverter(scale=1, logbase=10, logfactor=1) from_to = lambda value, inplace: c.from_reference( diff --git a/pint/testsuite/test_dask.py b/pint/testsuite/test_dask.py index 69c80fe0d..e52640ff4 100644 --- a/pint/testsuite/test_dask.py +++ b/pint/testsuite/test_dask.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import importlib -import os +import pathlib import pytest @@ -135,8 +137,8 @@ def test_visualize(local_registry, dask_array): assert res is None # These commands only work on Unix and Windows - assert os.path.exists("mydask.png") - os.remove("mydask.png") + assert pathlib.Path("mydask.png").exists() + pathlib.Path("mydask.png").unlink() def test_compute_persist_equivalent(local_registry, dask_array, numpy_array): @@ -149,6 +151,8 @@ def test_compute_persist_equivalent(local_registry, dask_array, numpy_array): assert np.all(res_compute == res_persist) assert res_compute.units == res_persist.units == units_ + assert type(res_compute) == local_registry.Quantity + assert type(res_persist) == local_registry.Quantity @pytest.mark.parametrize("method", ["compute", "persist", "visualize"]) diff --git a/pint/testsuite/test_definitions.py b/pint/testsuite/test_definitions.py index fbf7450e3..56a107689 100644 --- a/pint/testsuite/test_definitions.py +++ b/pint/testsuite/test_definitions.py @@ -1,3 +1,7 @@ +from __future__ import annotations + +import math + import pytest from pint.definitions import Definition @@ -24,7 +28,6 @@ def test_invalid(self): Definition.from_string("[x] = [time] * meter") def test_prefix_definition(self): - with pytest.raises(ValueError): Definition.from_string("m- = 1e-3 k") @@ -82,7 +85,7 @@ def test_unit_definition(self): assert x.reference == UnitsContainer(kelvin=1) x = Definition.from_string( - "turn = 6.28 * radian = _ = revolution = = cycle = _" + f"turn = {math.tau} * radian = _ = revolution = = cycle = _" ) assert isinstance(x, UnitDefinition) assert x.name == "turn" @@ -90,7 +93,7 @@ def test_unit_definition(self): assert x.symbol == "turn" assert not x.is_base assert isinstance(x.converter, ScaleConverter) - assert x.converter.scale == 6.28 + assert x.converter.scale == math.tau assert x.reference == UnitsContainer(radian=1) with pytest.raises(ValueError): @@ -99,7 +102,6 @@ def test_unit_definition(self): ) def test_log_unit_definition(self): - x = Definition.from_string( "decibelmilliwatt = 1e-3 watt; logbase: 10; logfactor: 10 = dBm" ) @@ -138,7 +140,7 @@ def test_log_unit_definition(self): assert x.converter.logfactor == 1 assert x.reference == UnitsContainer() - eulersnumber = 2.71828182845904523536028747135266249775724709369995 + eulersnumber = math.e x = Definition.from_string( "neper = 1 ; logbase: %1.50f; logfactor: 0.5 = Np" % eulersnumber ) diff --git a/pint/testsuite/test_diskcache.py b/pint/testsuite/test_diskcache.py index 399f9f765..16f3460c6 100644 --- a/pint/testsuite/test_diskcache.py +++ b/pint/testsuite/test_diskcache.py @@ -1,23 +1,28 @@ +from __future__ import annotations + import decimal import pickle import time +import flexparser as fp import pytest import pint -from pint._vendor import flexparser as fp from pint.facets.plain import UnitDefinition FS_SLEEP = 0.010 +from .helpers import internal + + @pytest.fixture def float_cache_filename(tmp_path): ureg = pint.UnitRegistry(cache_folder=tmp_path / "cache_with_float") - assert ureg._diskcache - assert ureg._diskcache.cache_folder + assert internal(ureg)._diskcache + assert internal(ureg)._diskcache.cache_folder - return tuple(ureg._diskcache.cache_folder.glob("*.pickle")) + return tuple(internal(ureg)._diskcache.cache_folder.glob("*.pickle")) def test_must_be_three_files(float_cache_filename): @@ -30,7 +35,7 @@ def test_must_be_three_files(float_cache_filename): def test_no_cache(): ureg = pint.UnitRegistry(cache_folder=None) - assert ureg._diskcache is None + assert internal(ureg)._diskcache is None assert ureg.cache_folder is None @@ -38,11 +43,11 @@ def test_decimal(tmp_path, float_cache_filename): ureg = pint.UnitRegistry( cache_folder=tmp_path / "cache_with_decimal", non_int_type=decimal.Decimal ) - assert ureg._diskcache - assert ureg._diskcache.cache_folder == tmp_path / "cache_with_decimal" + assert internal(ureg)._diskcache + assert internal(ureg)._diskcache.cache_folder == tmp_path / "cache_with_decimal" assert ureg.cache_folder == tmp_path / "cache_with_decimal" - files = tuple(ureg._diskcache.cache_folder.glob("*.pickle")) + files = tuple(internal(ureg)._diskcache.cache_folder.glob("*.pickle")) assert len(files) == 3 # check that the filenames with decimal are different to the ones with float @@ -66,9 +71,11 @@ def test_auto(float_cache_filename): float_filenames = tuple(p.name for p in float_cache_filename) ureg = pint.UnitRegistry(cache_folder=":auto:") - assert ureg._diskcache - assert ureg._diskcache.cache_folder - auto_files = tuple(p.name for p in ureg._diskcache.cache_folder.glob("*.pickle")) + assert internal(ureg)._diskcache + assert internal(ureg)._diskcache.cache_folder + auto_files = tuple( + p.name for p in internal(ureg)._diskcache.cache_folder.glob("*.pickle") + ) for file in float_filenames: assert file in auto_files @@ -82,7 +89,7 @@ def test_change_file(tmp_path): # (this will create two cache files, one for the file another for RegistryCache) ureg = pint.UnitRegistry(dfile, cache_folder=tmp_path) assert ureg.x == 1234 - files = tuple(ureg._diskcache.cache_folder.glob("*.pickle")) + files = tuple(internal(ureg)._diskcache.cache_folder.glob("*.pickle")) assert len(files) == 2 # Modify the definition file @@ -93,5 +100,5 @@ def test_change_file(tmp_path): # Verify that the definiton file was loaded (the cache was invalidated). ureg = pint.UnitRegistry(dfile, cache_folder=tmp_path) assert ureg.x == 1235 - files = tuple(ureg._diskcache.cache_folder.glob("*.pickle")) + files = tuple(internal(ureg)._diskcache.cache_folder.glob("*.pickle")) assert len(files) == 4 diff --git a/pint/testsuite/test_errors.py b/pint/testsuite/test_errors.py index 6a42eec6f..e0c4ec3f4 100644 --- a/pint/testsuite/test_errors.py +++ b/pint/testsuite/test_errors.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pickle import pytest @@ -116,7 +118,7 @@ def test_pickle_definition_syntax_error(self, subtests): q2 = ureg.Quantity("1 bar") for protocol in range(pickle.HIGHEST_PROTOCOL + 1): - for ex in [ + for ex in ( DefinitionSyntaxError("foo"), RedefinitionError("foo", "bar"), UndefinedUnitError("meter"), @@ -125,7 +127,7 @@ def test_pickle_definition_syntax_error(self, subtests): Quantity("1 kg")._units, Quantity("1 s")._units ), OffsetUnitCalculusError(q1._units, q2._units), - ]: + ): with subtests.test(protocol=protocol, etype=type(ex)): pik = pickle.dumps(ureg.Quantity("1 foo"), protocol) with pytest.raises(UndefinedUnitError): @@ -142,3 +144,13 @@ def test_pickle_definition_syntax_error(self, subtests): with pytest.raises(PintError): raise ex + + def test_dimensionality_error_message(self): + ureg = UnitRegistry(system="SI") + with pytest.raises(ValueError) as error: + ureg.get_dimensionality("[bilbo]") + + assert ( + str(error.value) + == "[bilbo] is not defined as dimension in the pint UnitRegistry" + ) diff --git a/pint/testsuite/test_formatter.py b/pint/testsuite/test_formatter.py index 9e362fc68..d8b5722bc 100644 --- a/pint/testsuite/test_formatter.py +++ b/pint/testsuite/test_formatter.py @@ -1,40 +1,49 @@ +from __future__ import annotations + import pytest from pint import formatting as fmt +from pint.delegates.formatter._format_helpers import formatter, join_u class TestFormatter: def test_join(self): - for empty in (tuple(), []): - assert fmt._join("s", empty) == "" - assert fmt._join("*", "1 2 3".split()) == "1*2*3" - assert fmt._join("{0}*{1}", "1 2 3".split()) == "1*2*3" + for empty in ((), []): + assert join_u("s", empty) == "" + assert join_u("*", "1 2 3".split()) == "1*2*3" + assert join_u("{0}*{1}", "1 2 3".split()) == "1*2*3" def test_formatter(self): - assert fmt.formatter(dict().items()) == "" - assert fmt.formatter(dict(meter=1).items()) == "meter" - assert fmt.formatter(dict(meter=-1).items()) == "1 / meter" - assert fmt.formatter(dict(meter=-1).items(), as_ratio=False) == "meter ** -1" + assert formatter({}.items(), ()) == "" + assert formatter(dict(meter=1).items(), ()) == "meter" + assert formatter((), dict(meter=-1).items()) == "1 / meter" + assert formatter((), dict(meter=-1).items(), as_ratio=False) == "meter ** -1" assert ( - fmt.formatter(dict(meter=-1, second=-1).items(), as_ratio=False) + formatter((), dict(meter=-1, second=-1).items(), as_ratio=False) == "meter ** -1 * second ** -1" ) - assert fmt.formatter(dict(meter=-1, second=-1).items()) == "1 / meter / second" assert ( - fmt.formatter(dict(meter=-1, second=-1).items(), single_denominator=True) + formatter( + (), + dict(meter=-1, second=-1).items(), + ) + == "1 / meter / second" + ) + assert ( + formatter((), dict(meter=-1, second=-1).items(), single_denominator=True) == "1 / (meter * second)" ) assert ( - fmt.formatter(dict(meter=-1, second=-2).items()) + formatter((), dict(meter=-1, second=-2).items()) == "1 / meter / second ** 2" ) assert ( - fmt.formatter(dict(meter=-1, second=-2).items(), single_denominator=True) + formatter((), dict(meter=-1, second=-2).items(), single_denominator=True) == "1 / (meter * second ** 2)" ) - def test_parse_spec(self): + def testparse_spec(self): assert fmt._parse_spec("") == "" assert fmt._parse_spec("") == "" with pytest.raises(ValueError): diff --git a/pint/testsuite/test_formatting.py b/pint/testsuite/test_formatting.py index 48e770b3b..d8f10715b 100644 --- a/pint/testsuite/test_formatting.py +++ b/pint/testsuite/test_formatting.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest import pint.formatting as fmt @@ -57,6 +59,8 @@ def test_split_format(format, default, flag, expected): def test_register_unit_format(func_registry): @fmt.register_unit_format("custom") def format_custom(unit, registry, **options): + # Ensure the registry is correct.. + registry.Unit(unit) return "" quantity = 1.0 * func_registry.meter diff --git a/pint/testsuite/test_infer_base_unit.py b/pint/testsuite/test_infer_base_unit.py index f2605c68c..f5d710b7d 100644 --- a/pint/testsuite/test_infer_base_unit.py +++ b/pint/testsuite/test_infer_base_unit.py @@ -1,109 +1,135 @@ +from __future__ import annotations + from decimal import Decimal from fractions import Fraction import pytest -from pint import Quantity as Q from pint import UnitRegistry from pint.testsuite import helpers from pint.util import infer_base_unit -class TestInferBaseUnit: - def test_infer_base_unit(self): - from pint.util import infer_base_unit - - test_units = Q(1, "meter**2").units - registry = Q(1, "meter**2")._REGISTRY - - assert infer_base_unit(Q(1, "millimeter * nanometer")) == test_units - - assert infer_base_unit("millimeter * nanometer", registry) == test_units - - assert ( - infer_base_unit(Q(1, "millimeter * nanometer").units, registry) - == test_units - ) - - with pytest.raises(ValueError, match=r"No registry provided."): - infer_base_unit("millimeter") - - def test_infer_base_unit_decimal(self): - from pint.util import infer_base_unit - - ureg = UnitRegistry(non_int_type=Decimal) - QD = ureg.Quantity - - ibu_d = infer_base_unit(QD(Decimal("1"), "millimeter * nanometer")) +def test_infer_base_unit(sess_registry): + test_units = sess_registry.Quantity(1, "meter**2").units + registry = sess_registry - assert ibu_d == QD(Decimal("1"), "meter**2").units + assert ( + infer_base_unit(sess_registry.Quantity(1, "millimeter * nanometer")) + == test_units + ) - assert all(isinstance(v, Decimal) for v in ibu_d.values()) + assert infer_base_unit("millimeter * nanometer", registry) == test_units - def test_infer_base_unit_fraction(self): - from pint.util import infer_base_unit - - ureg = UnitRegistry(non_int_type=Fraction) - QD = ureg.Quantity - - ibu_d = infer_base_unit(QD(Fraction("1"), "millimeter * nanometer")) - - assert ibu_d == QD(Fraction("1"), "meter**2").units - - assert all(isinstance(v, Fraction) for v in ibu_d.values()) - - def test_units_adding_to_zero(self): - assert infer_base_unit(Q(1, "m * mm / m / um * s")) == Q(1, "s").units - - def test_to_compact(self): - r = Q(1000000000, "m") * Q(1, "mm") / Q(1, "s") / Q(1, "ms") - compact_r = r.to_compact() - expected = Q(1000.0, "kilometer**2 / second**2") - helpers.assert_quantity_almost_equal(compact_r, expected) - - r = (Q(1, "m") * Q(1, "mm") / Q(1, "m") / Q(2, "um") * Q(2, "s")).to_compact() - helpers.assert_quantity_almost_equal(r, Q(1000, "s")) - - def test_to_compact_decimal(self): - ureg = UnitRegistry(non_int_type=Decimal) - Q = ureg.Quantity - r = ( - Q(Decimal("1000000000.0"), "m") - * Q(Decimal("1"), "mm") - / Q(Decimal("1"), "s") - / Q(Decimal("1"), "ms") - ) - compact_r = r.to_compact() - expected = Q(Decimal("1000.0"), "kilometer**2 / second**2") - assert compact_r == expected - - r = ( - Q(Decimal(1), "m") * Q(1, "mm") / Q(1, "m**2") / Q(2, "um") * Q(2, "s") - ).to_compact() - assert r == Q(1000, "s/m") - - def test_to_compact_fraction(self): - ureg = UnitRegistry(non_int_type=Fraction) - Q = ureg.Quantity - r = ( - Q(Fraction("10000000000/10"), "m") - * Q(Fraction("1"), "mm") - / Q(Fraction("1"), "s") - / Q(Fraction("1"), "ms") + assert ( + infer_base_unit( + sess_registry.Quantity(1, "millimeter * nanometer").units, registry ) - compact_r = r.to_compact() - expected = Q(Fraction("1000.0"), "kilometer**2 / second**2") - assert compact_r == expected - - r = ( - Q(Fraction(1), "m") * Q(1, "mm") / Q(1, "m**2") / Q(2, "um") * Q(2, "s") - ).to_compact() - assert r == Q(1000, "s/m") - - def test_volts(self): - from pint.util import infer_base_unit - - r = Q(1, "V") * Q(1, "mV") / Q(1, "kV") - b = infer_base_unit(r) - assert b == Q(1, "V").units - helpers.assert_quantity_almost_equal(r, Q(1, "uV")) + == test_units + ) + + with pytest.raises(ValueError, match=r"No registry provided."): + infer_base_unit("millimeter") + + +def test_infer_base_unit_decimal(sess_registry): + ureg = UnitRegistry(non_int_type=Decimal) + QD = ureg.Quantity + + ibu_d = infer_base_unit(QD(Decimal(1), "millimeter * nanometer")) + + assert ibu_d == QD(Decimal(1), "meter**2").units + + assert all(isinstance(v, Decimal) for v in ibu_d.values()) + + +def test_infer_base_unit_fraction(sess_registry): + ureg = UnitRegistry(non_int_type=Fraction) + QD = ureg.Quantity + + ibu_d = infer_base_unit(QD(Fraction("1"), "millimeter * nanometer")) + + assert ibu_d == QD(Fraction("1"), "meter**2").units + + assert all(isinstance(v, Fraction) for v in ibu_d.values()) + + +def test_units_adding_to_zero(sess_registry): + assert ( + infer_base_unit(sess_registry.Quantity(1, "m * mm / m / um * s")) + == sess_registry.Quantity(1, "s").units + ) + + +def test_to_compact(sess_registry): + r = ( + sess_registry.Quantity(1000000000, "m") + * sess_registry.Quantity(1, "mm") + / sess_registry.Quantity(1, "s") + / sess_registry.Quantity(1, "ms") + ) + compact_r = r.to_compact() + expected = sess_registry.Quantity(1000.0, "kilometer**2 / second**2") + helpers.assert_quantity_almost_equal(compact_r, expected) + + r = ( + sess_registry.Quantity(1, "m") + * sess_registry.Quantity(1, "mm") + / sess_registry.Quantity(1, "m") + / sess_registry.Quantity(2, "um") + * sess_registry.Quantity(2, "s") + ).to_compact() + helpers.assert_quantity_almost_equal(r, sess_registry.Quantity(1000, "s")) + + +def test_to_compact_decimal(sess_registry): + ureg = UnitRegistry(non_int_type=Decimal) + Q = ureg.Quantity + r = ( + Q(Decimal("1000000000.0"), "m") + * Q(Decimal(1), "mm") + / Q(Decimal(1), "s") + / Q(Decimal(1), "ms") + ) + compact_r = r.to_compact() + expected = Q(Decimal("1000.0"), "kilometer**2 / second**2") + assert compact_r == expected + + r = ( + Q(Decimal(1), "m") * Q(1, "mm") / Q(1, "m**2") / Q(2, "um") * Q(2, "s") + ).to_compact() + assert r == Q(1000, "s/m") + + +def test_to_compact_fraction(sess_registry): + ureg = UnitRegistry(non_int_type=Fraction) + Q = ureg.Quantity + r = ( + Q(Fraction("10000000000/10"), "m") + * Q(Fraction("1"), "mm") + / Q(Fraction("1"), "s") + / Q(Fraction("1"), "ms") + ) + compact_r = r.to_compact() + expected = Q(Fraction("1000.0"), "kilometer**2 / second**2") + assert compact_r == expected + + r = ( + sess_registry.Quantity(Fraction(1), "m") + * sess_registry.Quantity(1, "mm") + / sess_registry.Quantity(1, "m**2") + / sess_registry.Quantity(2, "um") + * sess_registry.Quantity(2, "s") + ).to_compact() + assert r == Q(1000, "s/m") + + +def test_volts(sess_registry): + r = ( + sess_registry.Quantity(1, "V") + * sess_registry.Quantity(1, "mV") + / sess_registry.Quantity(1, "kV") + ) + b = infer_base_unit(r) + assert b == sess_registry.Quantity(1, "V").units + helpers.assert_quantity_almost_equal(r, sess_registry.Quantity(1, "uV")) diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index cf7e39c79..010074dde 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -1,20 +1,30 @@ +from __future__ import annotations + import copy +import decimal import math import pprint import pytest -from pint import Context, DimensionalityError, UnitRegistry, get_application_registry +from pint import ( + Context, + DimensionalityError, + UnitRegistry, + get_application_registry, +) from pint.compat import np +from pint.delegates.formatter._compound_unit_helpers import sort_by_dimensionality from pint.facets.plain.unit import UnitsContainer from pint.testing import assert_equal from pint.testsuite import QuantityTestCase, helpers from pint.util import ParserHelper +from .helpers import internal + # TODO: do not subclass from QuantityTestCase class TestIssues(QuantityTestCase): - kwargs = dict(autoconvert_offset_to_baseunit=False) @pytest.mark.xfail @@ -60,29 +70,6 @@ def test_issue37(self, module_registry): np.testing.assert_array_equal(qq.magnitude, x * m) assert qq.units == module_registry.meter.units - @pytest.mark.xfail - @helpers.requires_numpy - def test_issue39(self, module_registry): - x = np.matrix([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) - q = module_registry.meter * x - assert isinstance(q, module_registry.Quantity) - np.testing.assert_array_equal(q.magnitude, x) - assert q.units == module_registry.meter.units - q = x * module_registry.meter - assert isinstance(q, module_registry.Quantity) - np.testing.assert_array_equal(q.magnitude, x) - assert q.units == module_registry.meter.units - - m = np.matrix(2 * np.ones(3, 3)) - qq = q * m - assert isinstance(qq, module_registry.Quantity) - np.testing.assert_array_equal(qq.magnitude, x * m) - assert qq.units == module_registry.meter.units - qq = m * q - assert isinstance(qq, module_registry.Quantity) - np.testing.assert_array_equal(qq.magnitude, x * m) - assert qq.units == module_registry.meter.units - @helpers.requires_numpy def test_issue44(self, module_registry): x = 4.0 * module_registry.dimensionless @@ -249,7 +236,6 @@ def test_issue77(self, module_registry): assert dis.value == acc.value * tim.value**2 / 2 def test_issue85(self, module_registry): - T = 4.0 * module_registry.kelvin m = 1.0 * module_registry.amu va = 2.0 * module_registry.k * T / m @@ -262,7 +248,6 @@ def test_issue85(self, module_registry): helpers.assert_quantity_almost_equal(va.to_base_units(), vb.to_base_units()) def test_issue86(self, module_registry): - module_registry.autoconvert_offset_to_baseunit = True def parts(q): @@ -334,7 +319,6 @@ def test_issue93(self, module_registry): helpers.assert_quantity_almost_equal(z, 5.1 * module_registry.meter) def test_issue104(self, module_registry): - x = [ module_registry("1 meter"), module_registry("1 meter"), @@ -361,7 +345,6 @@ def summer(values): helpers.assert_quantity_almost_equal(y[0], module_registry.Quantity(1, "meter")) def test_issue105(self, module_registry): - func = module_registry.parse_unit_name val = list(func("meter")) assert list(func("METER")) == [] @@ -394,7 +377,7 @@ def test_angstrom_creation(self, module_registry): module_registry.Quantity(2, "Å") def test_alternative_angstrom_definition(self, module_registry): - module_registry.Quantity(2, "\u212B") + module_registry.Quantity(2, "\u212b") def test_micro_creation_U03bc(self, module_registry): module_registry.Quantity(2, "μm") @@ -402,6 +385,15 @@ def test_micro_creation_U03bc(self, module_registry): def test_micro_creation_U00b5(self, module_registry): module_registry.Quantity(2, "µm") + def test_micro_creation_mu(self, module_registry): + module_registry.Quantity(2, "mug") + + def test_micro_creation_mc(self, module_registry): + module_registry.Quantity(2, "mcg") + + def test_liter_creation_U2113(self, module_registry): + module_registry.Quantity(2, "ℓ") + @helpers.requires_numpy def test_issue171_real_imag(self, module_registry): qr = [1.0, 2.0, 3.0, 4.0] * module_registry.meter @@ -449,10 +441,10 @@ def test_issue339(self, module_registry): def test_issue354_356_370(self, module_registry): assert ( - "{:~}".format(1 * module_registry.second / module_registry.millisecond) + f"{1 * module_registry.second / module_registry.millisecond:~}" == "1.0 s / ms" ) - assert "{:~}".format(1 * module_registry.count) == "1 count" + assert f"{1 * module_registry.count:~}" == "1 count" assert "{:~}".format(1 * module_registry("MiB")) == "1 MiB" def test_issue468(self, module_registry): @@ -473,7 +465,6 @@ def test_issue482(self, module_registry): @helpers.requires_numpy def test_issue483(self, module_registry): - a = np.asarray([1, 2, 3]) q = [1, 2, 3] * module_registry.dimensionless p = (q**q).m @@ -732,7 +723,7 @@ def test_issue1058(self, module_registry): def test_issue1062_issue1097(self): # Must not be used by any other tests ureg = UnitRegistry() - assert "nanometer" not in ureg._units + assert "nanometer" not in internal(ureg)._units for i in range(5): ctx = Context.from_lines(["@context _", "cal = 4 J"]) with ureg.context("sp", ctx): @@ -879,13 +870,114 @@ def test_issue1277(self, module_registry): assert c.to("percent").m == 50 # assert c.to("%").m == 50 # TODO: fails. + def test_issue1963(self, module_registry): + ureg = module_registry + assert ureg("‰") == ureg("permille") + assert ureg("‰") == ureg.permille + + a = ureg.Quantity("10 ‰") + b = ureg.Quantity("100 ppm") + c = ureg.Quantity("0.5") + + assert f"{a}" == "10 permille" + assert f"{a:~}" == "10 ‰" + + assert_equal(a, 0.01) + assert_equal(1e2 * b, a) + assert_equal(c, 50 * a) + + assert_equal((1 * ureg.milligram) / (1 * ureg.gram), 1 * ureg.permille) + + @pytest.mark.xfail @helpers.requires_uncertainties() def test_issue_1300(self): + # TODO: THIS is not longer necessary after moving to formatter module_registry = UnitRegistry() module_registry.default_format = "~P" m = module_registry.Measurement(1, 0.1, "meter") assert m.default_format == "~P" + @helpers.requires_numpy() + def test_issue1674(self, module_registry): + Q_ = module_registry.Quantity + arr_of_q = np.array([Q_(2, "m"), Q_(4, "m")], dtype="object") + q_arr = Q_(np.array([1, 2]), "m") + + helpers.assert_quantity_equal( + arr_of_q * q_arr, np.array([Q_(2, "m^2"), Q_(8, "m^2")], dtype="object") + ) + helpers.assert_quantity_equal( + arr_of_q / q_arr, np.array([Q_(2, ""), Q_(2, "")], dtype="object") + ) + + arr_of_q = np.array([Q_(2, "m"), Q_(4, "s")], dtype="object") + q_arr = Q_(np.array([1, 2]), "m") + + helpers.assert_quantity_equal( + arr_of_q * q_arr, np.array([Q_(2, "m^2"), Q_(8, "m s")], dtype="object") + ) + + @helpers.requires_babel(["es_ES"]) + def test_issue_1400(self, sess_registry): + q1 = 3.1 * sess_registry.W + q2 = 3.1 * sess_registry.W / sess_registry.cm + assert q1.format_babel("~", locale="es_ES") == "3,1 W" + assert q1.format_babel("", locale="es_ES") == "3,1 vatios" + assert q2.format_babel("~", locale="es_ES") == "3,1 W/cm" + assert q2.format_babel("", locale="es_ES") == "3,1 vatios por centímetro" + + @helpers.requires_numpy() + @helpers.requires_uncertainties() + def test_issue1611(self, module_registry): + from numpy.testing import assert_almost_equal + from uncertainties import ufloat + + from pint import pint_eval + + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer + + u1 = ufloat(1.2, 0.34) + u2 = ufloat(5.6, 0.78) + q1_u = module_registry.Quantity(u2 - u1, "m") + q1_str = str(q1_u) + q1_str = f"{q1_u:.4uS}" + q1_m = q1_u.magnitude + q2_u = module_registry.Quantity(q1_str) + # Not equal because the uncertainties are differently random! + assert q1_u != q2_u + q2_m = q2_u.magnitude + + assert_almost_equal(q2_m.nominal_value, q1_m.nominal_value, decimal=9) + assert_almost_equal(q2_m.std_dev, q1_m.std_dev, decimal=4) + + q3_str = "12.34(5678)e-066 m" + q3_u = module_registry.Quantity(q3_str) + q3_m = q3_u.magnitude + assert q3_m < 1 + + @helpers.requires_uncertainties + def test_issue1614(self, module_registry): + from uncertainties import UFloat, ufloat + + q = module_registry.Quantity(1.0, "m") + assert isinstance(q, module_registry.Quantity) + m = module_registry.Measurement(2.0, 0.3, "m") + assert isinstance(m, module_registry.Measurement) + + u1 = ufloat(1.2, 3.4) + u2 = ufloat(5.6, 7.8) + q1_u = module_registry.Quantity(u1, "m") + m1 = module_registry.Measurement(q1_u) + assert m1.value.magnitude == u1.nominal_value + assert m1.error.magnitude == u1.std_dev + m2 = module_registry.Measurement(5.6, 7.8) # dimensionless + q2_u = module_registry.Quantity(m2) + assert isinstance(q2_u.magnitude, UFloat) + assert q2_u.magnitude.nominal_value == m2.value + assert q2_u.magnitude.nominal_value == u2.nominal_value + assert q2_u.magnitude.std_dev == m2.error + assert q2_u.magnitude.std_dev == u2.std_dev + if np is not None: @@ -1040,6 +1132,25 @@ def test_backcompat_speed_velocity(func_registry): assert get("[speed]") == UnitsContainer({"[length]": 1, "[time]": -1}) +def test_issue1433(func_registry): + assert func_registry.Quantity("1 micron") == func_registry.Quantity("1 micrometer") + + +def test_issue1527(): + ureg = UnitRegistry(non_int_type=decimal.Decimal) + x = ureg.parse_expression("2 microliter milligram/liter") + assert x.magnitude.as_tuple()[1] == (2,) + assert x.to_compact().as_tuple()[1] == (2,) + assert x.to_base_units().as_tuple()[1] == (2,) + assert x.to("ng").as_tuple()[1] == (2,) + + +def test_issue1621(): + ureg = UnitRegistry(non_int_type=decimal.Decimal) + digits = ureg.Quantity("5.0 mV/m").to_base_units().magnitude.as_tuple()[1] + assert digits == (5, 0) + + def test_issue1631(): import pint @@ -1059,3 +1170,158 @@ class MyRegistry(pint.UnitRegistry): q = 2 * ureg.meter assert isinstance(q, ureg.Quantity) assert isinstance(q, pint.Quantity) + + +def test_issue1725(registry_empty): + registry_empty.define("dollar = [currency]") + assert registry_empty.get_compatible_units("dollar") == set() + + +def test_issue1505(): + ur = UnitRegistry(non_int_type=decimal.Decimal) + + assert isinstance(ur.Quantity("1m/s").magnitude, decimal.Decimal) + assert not isinstance( + ur.Quantity("m/s").magnitude, float + ) # unexpected success (magnitude should not be a float) + assert isinstance( + ur.Quantity("m/s").magnitude, decimal.Decimal + ) # unexpected fail (magnitude should be a decimal) + + +def test_issue_1845(): + ur = UnitRegistry(auto_reduce_dimensions=True, non_int_type=decimal.Decimal) + # before issue 1845 these inputs would have resulted in a TypeError + assert ur("km / h * m").units == ur.Quantity("meter ** 2 / hour") + assert ur("kW / min * W").units == ur.Quantity("watts ** 2 / minute") + + +@pytest.mark.parametrize( + "units,spec,expected", + [ + # (dict(hour=1, watt=1), "P~", "W·h"), + (dict(ampere=1, volt=1), "P~", "V·A"), + # (dict(meter=1, newton=1), "P~", "N·m"), + ], +) +def test_issues_1841(func_registry, units, spec, expected): + ur = func_registry + ur.formatter.default_sort_func = sort_by_dimensionality + ur.formatter.default_format = spec + value = ur.Unit(UnitsContainer(**units)) + assert f"{value}" == expected + + +@pytest.mark.xfail +def test_issues_1841_xfail(): + from pint import formatting as fmt + from pint.delegates.formatter._compound_unit_helpers import sort_by_dimensionality + + # sets compact display mode by default + ur = UnitRegistry() + ur.formatter.default_format = "~P" + ur.formatter.default_sort_func = sort_by_dimensionality + + q = ur.Quantity("2*pi radian * hour") + + # Note that `radian` (and `bit` and `count`) are treated as dimensionless. + # And note that dimensionless quantities are stripped by this process, + # leading to errorneous output. Suggestions? + assert ( + fmt.format_unit(q.u._units, spec="", registry=ur, sort_dims=True) + == "radian * hour" + ) + assert ( + fmt.format_unit(q.u._units, spec="", registry=ur, sort_dims=False) + == "hour * radian" + ) + + # this prints "2*pi hour * radian", not "2*pi radian * hour" unless sort_dims is True + # print(q) + + +def test_issue1949(registry_empty): + ureg = UnitRegistry() + ureg.define( + "in_Hg_gauge = 3386389 * gram / metre / second ** 2; offset:101325000 = inHg_g = in_Hg_g = inHg_gauge" + ) + q = ureg.Quantity("1 atm").to("inHg_gauge") + assert q.units == ureg.in_Hg_gauge + assert_equal(q.magnitude, 0.0) + + +@pytest.mark.parametrize( + "given,expected", + [ + ( + "8.989e9 newton * meter^2 / coulomb^2", + r"\SI[]{8.989E+9}{\meter\squared\newton\per\coulomb\squared}", + ), + ("5 * meter / second", r"\SI[]{5}{\meter\per\second}"), + ("2.2 * meter^4", r"\SI[]{2.2}{\meter\tothe{4}}"), + ("2.2 * meter^-4", r"\SI[]{2.2}{\per\meter\tothe{4}}"), + ], +) +def test_issue1772(given, expected): + ureg = UnitRegistry(non_int_type=decimal.Decimal) + assert f"{ureg(given):Lx}" == expected + + +def test_issue2017(): + ureg = UnitRegistry() + + from pint import formatting as fmt + + @fmt.register_unit_format("test") + def _test_format(unit, registry, **options): + proc = {u.replace("µ", "u"): e for u, e in unit.items()} + return fmt.formatter( + proc.items(), + as_ratio=True, + single_denominator=False, + product_fmt="*", + division_fmt="/", + power_fmt="{}{}", + parentheses_fmt="({})", + **options, + ) + + base_unit = ureg.microsecond + assert f"{base_unit:~test}" == "us" + assert f"{base_unit:test}" == "microsecond" + + +def test_issue2007(): + ureg = UnitRegistry() + q = ureg.Quantity(1, "") + assert f"{q:P}" == "1 dimensionless" + assert f"{q:C}" == "1 dimensionless" + assert f"{q:D}" == "1 dimensionless" + assert f"{q:H}" == "1 dimensionless" + + assert f"{q:L}" == "1\\ \\mathrm{dimensionless}" + # L returned '1\\ dimensionless' in pint 0.23 + + assert f"{q:Lx}" == "\\SI[]{1}{}" + assert f"{q:~P}" == "1" + assert f"{q:~C}" == "1" + assert f"{q:~D}" == "1" + assert f"{q:~H}" == "1" + + +@helpers.requires_uncertainties() +@helpers.requires_numpy() +def test_issue2044(): + from numpy.testing import assert_almost_equal + from uncertainties import ufloat + + ureg = UnitRegistry() + # First make sure this doesn't fail completely (A Measurement) + q = ureg.Quantity(10_000, "m").plus_minus(0.01).to_compact() + assert_almost_equal(q.m.n, 10.0) + assert q.u == "kilometer" + + # Similarly, for a Ufloat with units + q = (ufloat(10_000, 0.01) * ureg.m).to_compact() + assert_almost_equal(q.m.n, 10.0) + assert q.u == "kilometer" diff --git a/pint/testsuite/test_log_units.py b/pint/testsuite/test_log_units.py index f9dfe77d3..f8728febd 100644 --- a/pint/testsuite/test_log_units.py +++ b/pint/testsuite/test_log_units.py @@ -1,9 +1,18 @@ +from __future__ import annotations + import logging import math +import operator as op import pytest -from pint import OffsetUnitCalculusError, Unit, UnitRegistry +from pint import ( + DimensionalityError, + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + Unit, + UnitRegistry, +) from pint.facets.plain.unit import UnitsContainer from pint.testsuite import QuantityTestCase, helpers @@ -16,7 +25,6 @@ def module_registry_auto_offset(): # TODO: do not subclass from QuantityTestCase class TestLogarithmicQuantity(QuantityTestCase): def test_log_quantity_creation(self, caplog): - # Following Quantity Creation Pattern for args in ( (4.2, "dBm"), @@ -49,6 +57,27 @@ def test_log_quantity_creation(self, caplog): assert len(caplog.records) == 1 + def test_delta_log_quantity_creation(self, log_module_registry): + # Following Quantity Creation Pattern for "delta_" units: + # tests the quantity creation of an absolute decibel unit: decibelmilliwatt. + for args in ( + (4.2, "delta_dBm"), + (4.2, UnitsContainer(delta_decibelmilliwatt=1)), + (4.2, log_module_registry.delta_dBm), + ): + x = log_module_registry.Quantity(*args) + assert x.magnitude == 4.2 + assert x.units == UnitsContainer(delta_decibelmilliwatt=1) + # tests the quantity creation of an absolute decibel unit: decibelmilliwatt. + for args in ( + (4.2, "delta_dB"), + (4.2, UnitsContainer(delta_decibel=1)), + (4.2, log_module_registry.delta_dB), + ): + x = log_module_registry.Quantity(*args) + assert x.magnitude == 4.2 + assert x.units == UnitsContainer(delta_decibel=1) + def test_log_convert(self): # # 1 dB = 1/10 * bel # helpers.assert_quantity_almost_equal(self.Q_(1.0, "dB").to("dimensionless"), self.Q_(1, "bell") / 10) @@ -57,13 +86,24 @@ def test_log_convert(self): # ## Test dB to dB units octave - decade # 1 decade = log2(10) octave helpers.assert_quantity_almost_equal( - self.Q_(1.0, "decade"), self.Q_(math.log(10, 2), "octave") + self.Q_(1.0, "decade"), self.Q_(math.log2(10), "octave") ) # ## Test dB to dB units dBm - dBu # 0 dBm = 1mW = 1e3 uW = 30 dBu helpers.assert_quantity_almost_equal( self.Q_(0.0, "dBm"), self.Q_(29.999999999999996, "dBu"), atol=1e-7 ) + # ## Test dB to dB units dBm - dBW + # 0 dBW = 1W = 1e3 mW = 30 dBm + helpers.assert_quantity_almost_equal( + self.Q_(0.0, "dBW"), self.Q_(29.999999999999996, "dBm"), atol=1e-7 + ) + + # ## Test dB to dB units dBm - dBW + # 0 dBW = 1W = 1e3 mW = 30 dBm + helpers.assert_quantity_almost_equal( + self.Q_(0.0, "dBW"), self.Q_(29.999999999999996, "dBm"), atol=1e-7 + ) def test_mix_regular_log_units(self): # Test regular-logarithmic mixed definition, such as dB/km or dB/cm @@ -83,6 +123,10 @@ def test_mix_regular_log_units(self): log_unit_names = [ + "decibelwatt", + "dBW", + "decibelwatt", + "dBW", "decibelmilliwatt", "dBm", "decibelmicrowatt", @@ -131,9 +175,50 @@ def test_quantity_by_multiplication(module_registry_auto_offset, unit_name, mag) assert q.units == unit +log_delta_unit_names = ["delta_" + name for name in log_unit_names if name != "decade"] + + +@pytest.mark.parametrize("unit_name", log_delta_unit_names) +def test_deltaunit_by_attribute(log_module_registry, unit_name): + """Can the delta logarithmic units be accessed by attribute lookups?""" + unit = getattr(log_module_registry, unit_name) + assert isinstance(unit, Unit) + + +@pytest.mark.parametrize("unit_name", log_delta_unit_names) +def test_deltaunit_parsing(log_module_registry, unit_name): + """Can the delta logarithmic units be understood by the parser?""" + unit = getattr(log_module_registry, unit_name) + assert isinstance(unit, Unit) + + +@pytest.mark.parametrize("mag", [1.0, 4.2]) +@pytest.mark.parametrize("unit_name", log_delta_unit_names) +def test_delta_quantity_by_constructor(log_module_registry, unit_name, mag): + """Can Quantity() objects be constructed using delta logarithmic units?""" + q = log_module_registry.Quantity(mag, unit_name) + assert q.magnitude == pytest.approx(mag) + assert q.units == getattr(log_module_registry, unit_name) + + +@pytest.mark.parametrize("mag", [1.0, 4.2]) +@pytest.mark.parametrize("unit_name", log_delta_unit_names) +def test_delta_quantity_by_multiplication(log_module_registry, unit_name, mag): + """Test that delta logarithmic units can be defined with multiplication + + Requires setting `autoconvert_offset_to_baseunit` to True + """ + unit = getattr(log_module_registry, unit_name) + q = mag * unit + assert q.magnitude == pytest.approx(mag) + assert q.units == unit + + @pytest.mark.parametrize( "unit1,unit2", [ + ("decibelwatt", "dBW"), + ("decibelwatt", "dBW"), ("decibelmilliwatt", "dBm"), ("decibelmicrowatt", "dBu"), ("decibel", "dB"), @@ -271,3 +356,572 @@ def test_frequency_octave_addition(module_registry_auto_offset, freq1, octaves, new_freq = freq1 + shift assert new_freq.units == freq1.units assert new_freq.magnitude == pytest.approx(freq2) + + +def test_db_db_addition(log_module_registry): + """Test a dB value can be added to a dB and the answer is correct.""" + # adding two dB units + power = (5 * log_module_registry.dB) + (10 * log_module_registry.dB) + assert power.magnitude == pytest.approx(11.19331048066) + assert power.units == log_module_registry.dB + + # Adding two absolute dB units + power = (5 * log_module_registry.dBW) + (10 * log_module_registry.dBW) + assert power.magnitude == pytest.approx(11.19331048066) + assert power.units == log_module_registry.dBW + + +class TestLogarithmicUnitMath(QuantityTestCase): + @classmethod + def setup_class(cls): + cls.kwargs["autoconvert_offset_to_baseunit"] = True + cls.kwargs["logarithmic_math"] = True + super().setup_class() + + @classmethod + def teardown_class(cls): + cls.kwargs["autoconvert_offset_to_baseunit"] = False + cls.kwargs["logarithmic_math"] = False + super().teardown_class() + + additions = [ + # --- input tuple --| -- expected result --| -- expected result (conversion to base units) -- + pytest.param( + ((2, "dB"), (1, "decibel")), + (4.5390189104386724, "decibel"), + (4.5390189104386724, "decibel"), + id="dB+dB", + ), + pytest.param( + ((2, "dBW"), (1, "decibelwatt")), + (4.5390189104386724, "decibelwatt"), + (4.5390189104386724, "decibelwatt"), + id="dBW+dBW", + ), + pytest.param( + ((2, "delta_dBW"), (1, "delta_decibelwatt")), + (3, "delta_decibelwatt"), + (3, "delta_decibelwatt"), + id="delta_dBW+delta_dBW", + ), + pytest.param( + ((100, "dimensionless"), (2, "decibel")), "error", "error", id="'' + dB" + ), + pytest.param( + ((2, "decibel"), (100, "dimensionless")), "error", "error", id="dB + ''" + ), # ensures symmetry + pytest.param( + ((100, "dimensionless"), (2, "dBW")), "error", "error", id="'' + dBW" + ), + pytest.param( + ((2, "dBW"), (100, "dimensionless")), "error", "error", id="dBW + ''" + ), + pytest.param(((100, "watt"), (2, "dBW")), "error", "error", id="W + dBW"), + pytest.param(((2, "dBW"), (100, "watt")), "error", "error", id="dBW + W"), + pytest.param( + ((2, "dBW"), (1, "decibel")), "error", "error", id="dBW+dB" + ), # dimensionality error + pytest.param( + ((2, "dB"), (1, "delta_decibel")), + (3, "decibel"), + (3, "decibel"), + id="dB+delta_dB", + ), + pytest.param( + ((2, "delta_dB"), (1, "decibel")), + (3, "decibel"), + (3, "decibel"), + id="delta_dB+dB", + ), + pytest.param( + ((2, "dBW"), (1, "delta_decibelwatt")), + (3, "decibelwatt"), + (3, "decibelwatt"), + id="dBW+delta_dBW", + ), + pytest.param( + ((2, "delta_dBW"), (10, "dimensionless")), + "error", + "error", + id="delta_dBW + ''", + ), + ] + + @pytest.mark.parametrize( + ("input_tuple", "expected", "expected_base_units"), additions + ) + def test_addition(self, input_tuple, expected, expected_base_units): + + qin1, qin2 = input_tuple + q1, q2 = self.Q_(*qin1), self.Q_(*qin2) + # update input tuple with new values to have correct values on failure + input_tuple = q1, q2 + + self.ureg.autoconvert_offset_to_baseunit = False + if expected == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.add(q1, q2) + else: + expected = self.Q_(*expected) + assert op.add(q1, q2).units == expected.units + helpers.assert_quantity_almost_equal(op.add(q1, q2), expected, atol=0.01) + + self.ureg.autoconvert_offset_to_baseunit = True + if expected_base_units == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.add(q1, q2) + else: + expected_base_units = self.Q_(*expected_base_units) + assert op.add(q1, q2).units == expected_base_units.units + helpers.assert_quantity_almost_equal( + op.add(q1, q2), expected_base_units, atol=0.01 + ) + + subtractions = [ + # --- input tuple -------------------- | -- expected result -- | -- expected result (conversion to base units) -- + pytest.param( + ((2, "dB"), (1, "decibel")), + (1, "delta_decibel"), + (1, "delta_decibel"), + id="dB-dB", + ), + pytest.param( + ((2, "dBW"), (1, "decibelwatt")), + (1, "delta_decibelwatt"), + (1, "delta_decibelwatt"), + id="dBW-dBW", + ), + pytest.param( + ((2, "delta_dBW"), (1, "delta_decibelwatt")), + (1, "delta_decibelwatt"), + (1, "delta_decibelwatt"), + id="delta_dBW-delta_dBW", + ), + pytest.param( + ((2, "dimensionless"), (10, "decibel")), + (-8, "dimensionless"), + (-8, "dimensionless"), + id="'' - dB", + ), + pytest.param( + ((10, "decibel"), (2, "dimensionless")), + (6.9897000433601875, "delta_decibel"), + (6.9897000433601875, "delta_decibel"), + id="dB - ''", + ), # no symmetry + pytest.param( + ((2, "dimensionless"), (10, "dBW")), "error", "error", id="'' - dBW" + ), + pytest.param( + ((10, "dBW"), (2, "dimensionless")), "error", "error", id="dBW - ''" + ), + pytest.param( + ((15, "watt"), (10, "dBW")), (5, "watt"), (5, "watt"), id="W - dBW" + ), + pytest.param( + ((10, "dBW"), (8, "watt")), + (0.9691001300805642, "delta_decibelwatt"), + (0.9691001300805642, "delta_decibelwatt"), + id="dBW - W", + ), + pytest.param( + ((2, "dBW"), (1, "decibel")), "error", "error", id="dBW-dB" + ), # dimensionality error + pytest.param( + ((2, "dB"), (1, "delta_decibel")), + (1, "decibel"), + (1, "decibel"), + id="dB-delta_dB", + ), + pytest.param( + ((2, "delta_dB"), (1, "decibel")), + (1, "decibel"), + (1, "decibel"), + id="delta_dB-dB", + ), + pytest.param( + ((4, "dBW"), (1, "delta_decibelwatt")), + (3, "decibelwatt"), + (3, "decibelwatt"), + id="dBW-delta_dBW", + ), + pytest.param( + ((10, "delta_dBW"), (2, "dimensionless")), + "error", + "error", + id="delta_dBW - ''", + ), + pytest.param( + ((10, "dimensionless"), (2, "delta_dBW")), + "error", + "error", + id="'' - delta_dBW", + ), + pytest.param( + ((15, "watt"), (10, "delta_dBW")), + (5, "watt"), + (5, "watt"), + id="W - delta_dBW", + ), + pytest.param( + ((10, "delta_dBW"), (8, "watt")), + (2, "watt"), + (2, "watt"), + id="delta_dBW - W", + ), + ] + + @pytest.mark.parametrize( + ("input_tuple", "expected", "expected_base_units"), subtractions + ) + def test_subtraction(self, input_tuple, expected, expected_base_units): + + qin1, qin2 = input_tuple + q1, q2 = self.Q_(*qin1), self.Q_(*qin2) + input_tuple = q1, q2 + + self.ureg.autoconvert_offset_to_baseunit = False + if expected == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.sub(q1, q2) + else: + expected = self.Q_(*expected) + assert op.sub(q1, q2).units == expected.units + helpers.assert_quantity_almost_equal(op.sub(q1, q2), expected, atol=0.01) + + self.ureg.autoconvert_offset_to_baseunit = True + if expected_base_units == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.sub(q1, q2) + else: + expected_base_units = self.Q_(*expected_base_units) + assert op.sub(q1, q2).units == expected_base_units.units + helpers.assert_quantity_almost_equal( + op.sub(q1, q2), expected_base_units, atol=0.01 + ) + + multiplications = [ + # --- input tuple --| -- expected result --| -- expected result (conversion to base units) -- + pytest.param( + ((2, "dB"), (1, "decibel")), "error", (2, "dimensionless"), id="dB*dB" + ), + pytest.param( + ((0.2, "dBm"), (0.1, "decibelmilliwatt")), + "error", + (1.07, "gram ** 2 * meter ** 4 / second ** 6"), + id="dBm*dBm", + ), + pytest.param( + ((0.2, "dB"), (0.1, "decibelmilliwatt")), + "error", + (1.07, "gram * meter ** 2 / second ** 3"), + id="dB*dBm", + ), + pytest.param( + ((2, "delta_dBW"), (1, "delta_decibelwatt")), + (2, "delta_decibelwatt ** 2"), + (2, "delta_decibelwatt ** 2"), + id="delta_dBW*delta_dBW", + ), + pytest.param( + ((2, "dimensionless"), (10, "decibel")), + "error", + (20, "dimensionless"), + id="'' * dB", + ), + pytest.param( + ((10, "decibel"), (2, "dimensionless")), + "error", + (20, "dimensionless"), + id="dB * ''", + ), + pytest.param( + ((2, "dimensionless"), (10, "dBW")), + "error", + (20 * 10**3, "gram * meter ** 2 / second ** 3"), + id="'' * dBW", + ), + pytest.param( + ((10, "dBW"), (2, "dimensionless")), + "error", + (20 * 10**3, "gram * meter ** 2 / second ** 3"), + id="dBW * ''", + ), + pytest.param( + ((15, "watt"), (10, "dBW")), + "error", + (150 * 10**3, "watt * gram * meter ** 2 / second ** 3"), + id="W*dBW", + ), + pytest.param( + ((10, "dBW"), (8, "watt")), + "error", + (80 * 10**3, "watt * gram * meter ** 2 / second ** 3"), + id="dBW*W", + ), + pytest.param( + ((2, "dBW"), (1, "decibel")), + "error", + (1.99526 * 10**3, "gram * meter ** 2 / second ** 3"), + id="dBW*dB", + ), + pytest.param( + ((2, "dB"), (1, "delta_decibel")), + "error", + (1.584, "delta_decibel"), + id="dB*delta_dB", + ), + pytest.param( + ((1, "delta_dB"), (2, "decibel")), + "error", + (1.584, "delta_decibel"), + id="delta_dB*dB", + ), + pytest.param( + ((4, "dBW"), (1, "delta_decibelwatt")), + "error", + (2511.88, "delta_decibelwatt * gram * meter ** 2 / second ** 3"), + id="dBW*delta_dBW", + ), + pytest.param( + ((10, "delta_dBW"), (2, "dimensionless")), + (20, "delta_dBW"), + (20, "delta_dBW"), + id="delta_dBW * ''", + ), + pytest.param( + ((2, "dimensionless"), (10, "delta_dBW")), + (20, "delta_dBW"), + (20, "delta_dBW"), + id="''*delta_dBW", + ), + pytest.param( + ((15, "watt"), (10, "delta_dBW")), + (150, "delta_dBW*watt"), + (150, "delta_dBW*watt"), + id="W*delta_dBW", + ), + pytest.param( + ((10, "delta_dBW"), (8, "watt")), + (80, "delta_dBW*watt"), + (80, "delta_dBW*watt"), + id="delta_dBW*W", + ), + ] + + @pytest.mark.parametrize( + ("input_tuple", "expected", "expected_base_units"), multiplications + ) + def test_multiplication(self, input_tuple, expected, expected_base_units): + + qin1, qin2 = input_tuple + q1, q2 = self.Q_(*qin1), self.Q_(*qin2) + input_tuple = q1, q2 + + self.ureg.autoconvert_offset_to_baseunit = False + if expected == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.mul(q1, q2) + else: + expected = self.Q_(*expected) + assert op.mul(q1, q2).units == expected.units + helpers.assert_quantity_almost_equal(op.mul(q1, q2), expected, atol=0.01) + + self.ureg.autoconvert_offset_to_baseunit = True + if expected_base_units == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.mul(q1, q2) + else: + expected_base_units = self.Q_(*expected_base_units) + assert op.mul(q1, q2).units == expected_base_units.units + helpers.assert_quantity_almost_equal( + op.mul(q1, q2), expected_base_units, atol=0.01 + ) + + divisions = [ + # --- input tuple --| -- expected result --| -- expected result (conversion to base units) -- + pytest.param( + ((4, "dB"), (2, "decibel")), "error", (1.5849, "dimensionless"), id="dB/dB" + ), + pytest.param( + ((4, "dBm"), (2, "decibelmilliwatt")), + "error", + (1.5849, "dimensionless"), + id="dBm/dBm", + ), + pytest.param( + ((4, "delta_dBW"), (2, "delta_decibelwatt")), + (2, "dimensionless"), + (2, "dimensionless"), + id="delta_dBW/delta_dBW", + ), + pytest.param( + ((20, "dimensionless"), (10, "decibel")), + "error", + (2, "dimensionless"), + id="'' / dB", + ), + pytest.param( + ((10, "decibel"), (2, "dimensionless")), + "error", + (5, "dimensionless"), + id="dB / ''", + ), + pytest.param( + ((2, "dimensionless"), (10, "dBW")), + "error", + (0.2 * 10**-3, "second ** 3 / gram / meter ** 2"), + id="'' / dBW", + ), + pytest.param( + ((10, "dBW"), (2, "dimensionless")), + "error", + (5 * 10**3, "gram * meter ** 2 / second ** 3"), + id="dBW / ''", + ), + pytest.param( + ((15, "watt"), (10, "dBW")), + "error", + (1.5 * 10**-3, "watt * second ** 3 / gram / meter ** 2"), + id="W/dBW", + ), + pytest.param( + ((10, "dBW"), (2, "watt")), + "error", + (5 * 10**3, "gram * meter ** 2 / second ** 3 / watt"), + id="dBW/W", + ), + pytest.param( + ((2, "dBW"), (1, "decibel")), + "error", + (1.25892 * 10**3, "gram * meter ** 2 / second ** 3"), + id="dBW/dB", + ), + pytest.param( + ((10, "dB"), (2, "decibelmilliwatt")), + "error", + (6.3095, "second ** 3 / gram / meter ** 2"), + id="dB/dBm", + ), + pytest.param( + ((10, "dB"), (2, "delta_decibel")), + "error", + (5, "1 / delta_decibel"), + id="dB/delta_dB", + ), + pytest.param( + ((20, "delta_dB"), (10, "decibel")), + "error", + (2, "delta_decibel"), + id="delta_dB/dB", + ), + pytest.param( + ((10, "dBW"), (2, "delta_decibelwatt")), + "error", + (5 * 10**3, "gram * meter ** 2 / second ** 3 / delta_decibelwatt"), + id="dBW/delta_dBW", + ), + pytest.param( + ((10, "delta_dBW"), (2, "dimensionless")), + (5, "delta_dBW"), + (5, "delta_dBW"), + id="delta_dBW / ''", + ), + pytest.param( + ((2, "dimensionless"), (10, "delta_dBW")), + (0.2, "1 / delta_dBW"), + (0.2, "1 / delta_dBW"), + id="''/delta_dBW", + ), + pytest.param( + ((10, "watt"), (5, "delta_dBW")), + (2, "watt/delta_dBW"), + (2, "watt/delta_dBW"), + id="W/delta_dBW", + ), + pytest.param( + ((10, "delta_dBW"), (5, "watt")), + (2, "delta_dBW/watt"), + (2, "delta_dBW/watt"), + id="delta_dBW/W", + ), + ] + + @pytest.mark.parametrize( + ("input_tuple", "expected", "expected_base_units"), divisions + ) + def test_true_division(self, input_tuple, expected, expected_base_units): + + qin1, qin2 = input_tuple + q1, q2 = self.Q_(*qin1), self.Q_(*qin2) + input_tuple = q1, q2 + + self.ureg.autoconvert_offset_to_baseunit = False + if expected == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.truediv(q1, q2) + else: + expected = self.Q_(*expected) + assert op.truediv(q1, q2).units == expected.units + helpers.assert_quantity_almost_equal( + op.truediv(q1, q2), expected, atol=0.01 + ) + + self.ureg.autoconvert_offset_to_baseunit = True + if expected_base_units == "error": + with pytest.raises( + ( + LogarithmicUnitCalculusError, + OffsetUnitCalculusError, + DimensionalityError, + ) + ): + op.truediv(q1, q2) + else: + expected_base_units = self.Q_(*expected_base_units) + assert op.truediv(q1, q2).units == expected_base_units.units + helpers.assert_quantity_almost_equal( + op.truediv(q1, q2), expected_base_units, atol=0.01 + ) diff --git a/pint/testsuite/test_matplotlib.py b/pint/testsuite/test_matplotlib.py index 25f317286..5327b5b0b 100644 --- a/pint/testsuite/test_matplotlib.py +++ b/pint/testsuite/test_matplotlib.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from pint import UnitRegistry @@ -46,3 +48,21 @@ def test_plot_with_set_units(local_registry): ax.axvline(120 * local_registry.minutes, color="tab:green") return fig + + +@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) +def test_plot_with_non_default_format(local_registry): + local_registry.mpl_formatter = "{:~P}" + + y = np.linspace(0, 30) * local_registry.miles + x = np.linspace(0, 5) * local_registry.hours + + fig, ax = plt.subplots() + ax.yaxis.set_units(local_registry.inches) + ax.xaxis.set_units(local_registry.seconds) + + ax.plot(x, y, "tab:blue") + ax.axhline(26400 * local_registry.feet, color="tab:red") + ax.axvline(120 * local_registry.minutes, color="tab:green") + + return fig diff --git a/pint/testsuite/test_measurement.py b/pint/testsuite/test_measurement.py index 926b4d6a6..a66f72dc1 100644 --- a/pint/testsuite/test_measurement.py +++ b/pint/testsuite/test_measurement.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from pint import DimensionalityError @@ -18,7 +20,8 @@ def test_instantiate(self): class TestMeasurement(QuantityTestCase): def test_simple(self): M_ = self.ureg.Measurement - M_(4.0, 0.1, "s") + m = M_(4.0, 0.1, "s * s") + assert repr(m) == "" def test_build(self): M_ = self.ureg.Measurement @@ -38,131 +41,142 @@ def test_build(self): assert m.error == u assert m.rel == m.error / abs(m.value) - def test_format(self, subtests): - v, u = self.Q_(4.0, "s ** 2"), self.Q_(0.1, "s ** 2") - m = self.ureg.Measurement(v, u) - - for spec, result in ( - ("{}", "(4.00 +/- 0.10) second ** 2"), - ("{!r}", ""), - ("{:P}", "(4.00 ± 0.10) second²"), - ("{:L}", r"\left(4.00 \pm 0.10\right)\ \mathrm{second}^{2}"), - ("{:H}", "(4.00 ± 0.10) second2"), - ("{:C}", "(4.00+/-0.10) second**2"), - ("{:Lx}", r"\SI{4.00 +- 0.10}{\second\squared}"), - ("{:.1f}", "(4.0 +/- 0.1) second ** 2"), - ("{:.1fP}", "(4.0 ± 0.1) second²"), - ("{:.1fL}", r"\left(4.0 \pm 0.1\right)\ \mathrm{second}^{2}"), - ("{:.1fH}", "(4.0 ± 0.1) second2"), - ("{:.1fC}", "(4.0+/-0.1) second**2"), - ("{:.1fLx}", r"\SI{4.0 +- 0.1}{\second\squared}"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_paru(self, subtests): - v, u = self.Q_(0.20, "s ** 2"), self.Q_(0.01, "s ** 2") - m = self.ureg.Measurement(v, u) - - for spec, result in ( - ("{:uS}", "0.200(10) second ** 2"), - ("{:.3uS}", "0.2000(100) second ** 2"), - ("{:.3uSP}", "0.2000(100) second²"), - ("{:.3uSL}", r"0.2000\left(100\right)\ \mathrm{second}^{2}"), - ("{:.3uSH}", "0.2000(100) second2"), - ("{:.3uSC}", "0.2000(100) second**2"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_u(self, subtests): - v, u = self.Q_(0.20, "s ** 2"), self.Q_(0.01, "s ** 2") - m = self.ureg.Measurement(v, u) - - for spec, result in ( - ("{:.3u}", "(0.2000 +/- 0.0100) second ** 2"), - ("{:.3uP}", "(0.2000 ± 0.0100) second²"), - ("{:.3uL}", r"\left(0.2000 \pm 0.0100\right)\ \mathrm{second}^{2}"), - ("{:.3uH}", "(0.2000 ± 0.0100) second2"), - ("{:.3uC}", "(0.2000+/-0.0100) second**2"), + @pytest.mark.parametrize( + "spec, expected", + [ + ("", "(4.00 +/- 0.10) second ** 2"), + ("P", "(4.00 ± 0.10) second²"), + ("L", r"\left(4.00 \pm 0.10\right)\ \mathrm{second}^{2}"), + ("H", "(4.00 ± 0.10) second2"), + ("C", "(4.00+/-0.10) second**2"), + ("Lx", r"\SI{4.00 +- 0.10}{\second\squared}"), + (".1f", "(4.0 +/- 0.1) second ** 2"), + (".1fP", "(4.0 ± 0.1) second²"), + (".1fL", r"\left(4.0 \pm 0.1\right)\ \mathrm{second}^{2}"), + (".1fH", "(4.0 ± 0.1) second2"), + (".1fC", "(4.0+/-0.1) second**2"), + (".1fLx", r"\SI{4.0 +- 0.1}{\second\squared}"), + ], + ) + def test_format(self, func_registry, spec, expected): + Q_ = func_registry.Quantity + v, u = Q_(4.0, "s ** 2"), Q_(0.1, "s ** 2") + m = func_registry.Measurement(v, u) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ + ("uS", "0.200(10) second ** 2"), + (".3uS", "0.2000(100) second ** 2"), + (".3uSP", "0.2000(100) second²"), + (".3uSL", r"0.2000\left(100\right)\ \mathrm{second}^{2}"), + (".3uSH", "0.2000(100) second2"), + (".3uSC", "0.2000(100) second**2"), + ], + ) + def test_format_paru(self, func_registry, spec, expected): + Q_ = func_registry.Quantity + v, u = Q_(0.20, "s ** 2"), Q_(0.01, "s ** 2") + m = func_registry.Measurement(v, u) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ + (".3u", "(0.2000 +/- 0.0100) second ** 2"), + (".3uP", "(0.2000 ± 0.0100) second²"), + (".3uL", r"\left(0.2000 \pm 0.0100\right)\ \mathrm{second}^{2}"), + (".3uH", "(0.2000 ± 0.0100) second2"), + (".3uC", "(0.2000+/-0.0100) second**2"), ( - "{:.3uLx}", + ".3uLx", r"\SI{0.2000 +- 0.0100}{\second\squared}", ), - ("{:.1uLx}", r"\SI{0.20 +- 0.01}{\second\squared}"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_percu(self, subtests): - self.test_format_perce(subtests) - v, u = self.Q_(0.20, "s ** 2"), self.Q_(0.01, "s ** 2") - m = self.ureg.Measurement(v, u) - - for spec, result in ( - ("{:.1u%}", "(20 +/- 1)% second ** 2"), - ("{:.1u%P}", "(20 ± 1)% second²"), - ("{:.1u%L}", r"\left(20 \pm 1\right) \%\ \mathrm{second}^{2}"), - ("{:.1u%H}", "(20 ± 1)% second2"), - ("{:.1u%C}", "(20+/-1)% second**2"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_perce(self, subtests): - v, u = self.Q_(0.20, "s ** 2"), self.Q_(0.01, "s ** 2") - m = self.ureg.Measurement(v, u) - for spec, result in ( - ("{:.1ue}", "(2.0 +/- 0.1)e-01 second ** 2"), - ("{:.1ueP}", "(2.0 ± 0.1)×10⁻¹ second²"), + (".1uLx", r"\SI{0.20 +- 0.01}{\second\squared}"), + ], + ) + def test_format_u(self, func_registry, spec, expected): + Q_ = func_registry.Quantity + v, u = Q_(0.20, "s ** 2"), Q_(0.01, "s ** 2") + m = func_registry.Measurement(v, u) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ + (".1u%", "(20 +/- 1)% second ** 2"), + (".1u%P", "(20 ± 1)% second²"), + (".1u%L", r"\left(20 \pm 1\right) \%\ \mathrm{second}^{2}"), + (".1u%H", "(20 ± 1)% second2"), + (".1u%C", "(20+/-1)% second**2"), + ], + ) + def test_format_percu(self, func_registry, spec, expected): + Q_ = func_registry.Quantity + v, u = Q_(0.20, "s ** 2"), Q_(0.01, "s ** 2") + m = func_registry.Measurement(v, u) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ + (".1ue", "(2.0 +/- 0.1)e-01 second ** 2"), + (".1ueP", "(2.0 ± 0.1)×10⁻¹ second²"), ( - "{:.1ueL}", + ".1ueL", r"\left(2.0 \pm 0.1\right) \times 10^{-1}\ \mathrm{second}^{2}", ), - ("{:.1ueH}", "(2.0 ± 0.1)×10-1 second2"), - ("{:.1ueC}", "(2.0+/-0.1)e-01 second**2"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_exponential_pos(self, subtests): + (".1ueH", "(2.0 ± 0.1)×10-1 second2"), + (".1ueC", "(2.0+/-0.1)e-01 second**2"), + ], + ) + def test_format_perce(self, func_registry, spec, expected): + Q_ = func_registry.Quantity + v, u = Q_(0.20, "s ** 2"), Q_(0.01, "s ** 2") + m = func_registry.Measurement(v, u) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ + ("", "(4.00 +/- 0.10)e+20 second ** 2"), + # ("!r", ""), + ("P", "(4.00 ± 0.10)×10²⁰ second²"), + ("L", r"\left(4.00 \pm 0.10\right) \times 10^{20}\ \mathrm{second}^{2}"), + ("H", "(4.00 ± 0.10)×1020 second2"), + ("C", "(4.00+/-0.10)e+20 second**2"), + ("Lx", r"\SI{4.00 +- 0.10 e+20}{\second\squared}"), + ], + ) + def test_format_exponential_pos(self, func_registry, spec, expected): # Quantities in exponential format come with their own parenthesis, don't wrap # them twice - m = self.ureg.Quantity(4e20, "s^2").plus_minus(1e19) - for spec, result in ( - ("{}", "(4.00 +/- 0.10)e+20 second ** 2"), - ("{!r}", ""), - ("{:P}", "(4.00 ± 0.10)×10²⁰ second²"), - ("{:L}", r"\left(4.00 \pm 0.10\right) \times 10^{20}\ \mathrm{second}^{2}"), - ("{:H}", "(4.00 ± 0.10)×1020 second2"), - ("{:C}", "(4.00+/-0.10)e+20 second**2"), - ("{:Lx}", r"\SI{4.00 +- 0.10 e+20}{\second\squared}"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_exponential_neg(self, subtests): - m = self.ureg.Quantity(4e-20, "s^2").plus_minus(1e-21) - for spec, result in ( - ("{}", "(4.00 +/- 0.10)e-20 second ** 2"), - ("{!r}", ""), - ("{:P}", "(4.00 ± 0.10)×10⁻²⁰ second²"), + m = func_registry.Quantity(4e20, "s^2").plus_minus(1e19) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ + ("", "(4.00 +/- 0.10)e-20 second ** 2"), + # ("!r", ""), + ("P", "(4.00 ± 0.10)×10⁻²⁰ second²"), ( - "{:L}", + "L", r"\left(4.00 \pm 0.10\right) \times 10^{-20}\ \mathrm{second}^{2}", ), - ("{:H}", "(4.00 ± 0.10)×10-20 second2"), - ("{:C}", "(4.00+/-0.10)e-20 second**2"), - ("{:Lx}", r"\SI{4.00 +- 0.10 e-20}{\second\squared}"), - ): - with subtests.test(spec): - assert spec.format(m) == result - - def test_format_default(self, subtests): - v, u = self.Q_(4.0, "s ** 2"), self.Q_(0.1, "s ** 2") - m = self.ureg.Measurement(v, u) - - for spec, result in ( + ("H", "(4.00 ± 0.10)×10-20 second2"), + ("C", "(4.00+/-0.10)e-20 second**2"), + ("Lx", r"\SI{4.00 +- 0.10 e-20}{\second\squared}"), + ], + ) + def test_format_exponential_neg(self, func_registry, spec, expected): + m = func_registry.Quantity(4e-20, "s^2").plus_minus(1e-21) + assert format(m, spec) == expected + + @pytest.mark.parametrize( + "spec, expected", + [ ("", "(4.00 +/- 0.10) second ** 2"), ("P", "(4.00 ± 0.10) second²"), ("L", r"\left(4.00 \pm 0.10\right)\ \mathrm{second}^{2}"), @@ -175,10 +189,16 @@ def test_format_default(self, subtests): (".1fH", "(4.0 ± 0.1) second2"), (".1fC", "(4.0+/-0.1) second**2"), (".1fLx", r"\SI{4.0 +- 0.1}{\second\squared}"), - ): - with subtests.test(spec): - self.ureg.default_format = spec - assert "{}".format(m) == result + ], + ) + def test_format_default(self, func_registry, spec, expected): + v, u = ( + func_registry.Quantity(4.0, "s ** 2"), + func_registry.Quantity(0.1, "s ** 2"), + ) + m = func_registry.Measurement(v, u) + func_registry.default_format = spec + assert f"{m}" == expected def test_raise_build(self): v, u = self.Q_(1.0, "s"), self.Q_(0.1, "s") @@ -193,7 +213,6 @@ def test_raise_build(self): v.plus_minus(u, relative=True) def test_propagate_linear(self): - v1, u1 = self.Q_(8.0, "s"), self.Q_(0.7, "s") v2, u2 = self.Q_(5.0, "s"), self.Q_(0.6, "s") v2, u3 = self.Q_(-5.0, "s"), self.Q_(0.6, "s") @@ -241,7 +260,6 @@ def test_propagate_linear(self): assert r.value.units == ml.value.units def test_propagate_product(self): - v1, u1 = self.Q_(8.0, "s"), self.Q_(0.7, "s") v2, u2 = self.Q_(5.0, "s"), self.Q_(0.6, "s") v2, u3 = self.Q_(-5.0, "s"), self.Q_(0.6, "s") @@ -272,3 +290,11 @@ def test_measurement_comparison(self): y = self.Q_(5.0, "meter").plus_minus(0.1) assert x <= y assert not (x >= y) + + def test_tokenization(self): + from pint import pint_eval + + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer + for p in pint_eval.tokenizer("8 + / - 4"): + str(p) + assert True diff --git a/pint/testsuite/test_non_int.py b/pint/testsuite/test_non_int.py index f61662234..ccf0dd6ff 100644 --- a/pint/testsuite/test_non_int.py +++ b/pint/testsuite/test_non_int.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import math import operator as op @@ -17,7 +19,6 @@ class NonIntTypeTestCase(QuantityTestCase): def assert_quantity_almost_equal( self, first, second, rtol="1e-07", atol="0", msg=None ): - if isinstance(first, self.Q_): assert isinstance(first.m, (self.kwargs["non_int_type"], int)) else: @@ -42,7 +43,6 @@ def QP_(self, value, units): class _TestBasic(NonIntTypeTestCase): def test_quantity_creation(self, caplog): - value = self.kwargs["non_int_type"]("4.2") for args in ( @@ -733,7 +733,6 @@ def _test_numeric(self, unit, ifunc): # self._test_quantity_ifloordiv(unit, ifunc) def test_quantity_abs_round(self): - value = self.kwargs["non_int_type"]("4.2") x = self.Q_(-value, "meter") y = self.Q_(value, "meter") @@ -743,10 +742,10 @@ def test_quantity_abs_round(self): zy = self.Q_(fun(y.magnitude), "meter") rx = fun(x) ry = fun(y) - assert rx == zx, "while testing {0}".format(fun) - assert ry == zy, "while testing {0}".format(fun) - assert rx is not zx, "while testing {0}".format(fun) - assert ry is not zy, "while testing {0}".format(fun) + assert rx == zx, f"while testing {fun}" + assert ry == zy, f"while testing {fun}" + assert rx is not zx, f"while testing {fun}" + assert ry is not zy, f"while testing {fun}" def test_quantity_float_complex(self): x = self.QP_("-4.2", None) @@ -1096,7 +1095,7 @@ def test_division_with_scalar(self, input_tuple, expected_output): else: in1, in2 = self.kwargs["non_int_type"](in1), self.QP_(*in2) input_tuple = in1, in2 # update input_tuple for better tracebacks - expected_copy = expected_output[:] + expected_copy = expected_output.copy() for i, mode in enumerate([False, True]): self.ureg.autoconvert_offset_to_baseunit = mode if expected_copy[i] == "error": @@ -1133,14 +1132,14 @@ def test_division_with_scalar(self, input_tuple, expected_output): def test_exponentiation(self, input_tuple, expected_output): self.ureg.default_as_delta = False in1, in2 = input_tuple - if type(in1) is tuple and type(in2) is tuple: + if type(in1) is type(in2) is tuple: in1, in2 = self.QP_(*in1), self.QP_(*in2) - elif not type(in1) is tuple and type(in2) is tuple: + elif type(in1) is not tuple and type(in2) is tuple: in1, in2 = self.kwargs["non_int_type"](in1), self.QP_(*in2) else: in1, in2 = self.QP_(*in1), self.kwargs["non_int_type"](in2) input_tuple = in1, in2 - expected_copy = expected_output[:] + expected_copy = expected_output.copy() for i, mode in enumerate([False, True]): self.ureg.autoconvert_offset_to_baseunit = mode if expected_copy[i] == "error": @@ -1156,48 +1155,39 @@ def test_exponentiation(self, input_tuple, expected_output): class TestNonIntTypeQuantityFloat(_TestBasic): - kwargs = dict(non_int_type=float) SUPPORTS_NAN = True class TestNonIntTypeQuantityBasicMathFloat(_TestQuantityBasicMath): - kwargs = dict(non_int_type=float) class TestNonIntTypeOffsetUnitMathFloat(_TestOffsetUnitMath): - kwargs = dict(non_int_type=float) class TestNonIntTypeQuantityDecimal(_TestBasic): - kwargs = dict(non_int_type=Decimal) SUPPORTS_NAN = True class TestNonIntTypeQuantityBasicMathDecimal(_TestQuantityBasicMath): - kwargs = dict(non_int_type=Decimal) class TestNonIntTypeOffsetUnitMathDecimal(_TestOffsetUnitMath): - kwargs = dict(non_int_type=Decimal) class TestNonIntTypeQuantityFraction(_TestBasic): - kwargs = dict(non_int_type=Fraction) SUPPORTS_NAN = False class TestNonIntTypeQuantityBasicMathFraction(_TestQuantityBasicMath): - kwargs = dict(non_int_type=Fraction) class TestNonIntTypeOffsetUnitMathFraction(_TestOffsetUnitMath): - kwargs = dict(non_int_type=Fraction) diff --git a/pint/testsuite/test_numpy.py b/pint/testsuite/test_numpy.py index 83448ce0f..722dcc4f5 100644 --- a/pint/testsuite/test_numpy.py +++ b/pint/testsuite/test_numpy.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import operator as op import pickle @@ -222,7 +224,6 @@ def test_concat_stack(self, subtests): def test_block_column_stack(self, subtests): for func in (np.block, np.column_stack): with subtests.test(func=func): - helpers.assert_quantity_equal( func([self.q[:, 0], self.q[:, 1]]), self.Q_(func([self.q[:, 0].m, self.q[:, 1].m]), self.ureg.m), @@ -287,6 +288,11 @@ def test_broadcast_arrays(self): result = np.broadcast_arrays(x, y, subok=True) helpers.assert_quantity_equal(result, expected) + def test_roll(self): + helpers.assert_quantity_equal( + np.roll(self.q, 1), [[4, 1], [2, 3]] * self.ureg.m + ) + class TestNumpyMathematicalFunctions(TestNumpyMethods): # https://www.numpy.org/devdocs/reference/routines.math.html @@ -304,7 +310,7 @@ def test_unwrap(self): @helpers.requires_array_function_protocol() def test_fix(self): - helpers.assert_quantity_equal(np.fix(3.14 * self.ureg.m), 3.0 * self.ureg.m) + helpers.assert_quantity_equal(np.fix(3.13 * self.ureg.m), 3.0 * self.ureg.m) helpers.assert_quantity_equal(np.fix(3.0 * self.ureg.m), 3.0 * self.ureg.m) helpers.assert_quantity_equal( np.fix([2.1, 2.9, -2.1, -2.9] * self.ureg.m), @@ -331,9 +337,7 @@ def test_prod_numpy_func(self): helpers.assert_quantity_equal( np.prod(self.q, axis=axis), [3, 8] * self.ureg.m**2 ) - helpers.assert_quantity_equal( - np.prod(self.q, where=where), 12 * self.ureg.m**3 - ) + helpers.assert_quantity_equal(np.prod(self.q, where=where), 12 * self.ureg.m**3) with pytest.raises(DimensionalityError): np.prod(self.q, axis=axis, where=where) @@ -381,12 +385,7 @@ def test_cumprod(self): def test_cumprod_numpy_func(self): with pytest.raises(DimensionalityError): np.cumprod(self.q) - with pytest.raises(DimensionalityError): - np.cumproduct(self.q) helpers.assert_quantity_equal(np.cumprod(self.q / self.ureg.m), [1, 2, 6, 24]) - helpers.assert_quantity_equal( - np.cumproduct(self.q / self.ureg.m), [1, 2, 6, 24] - ) helpers.assert_quantity_equal( np.cumprod(self.q / self.ureg.m, axis=1), [[1, 2], [3, 12]] ) @@ -439,13 +438,25 @@ def test_cross(self): np.cross(a, b), [[-15, -2, 39]] * self.ureg.kPa * self.ureg.m**2 ) + # NP2: Remove this when we only support np>=2.0 @helpers.requires_array_function_protocol() + @helpers.requires_numpy_previous_than("2.0") def test_trapz(self): helpers.assert_quantity_equal( np.trapz([1.0, 2.0, 3.0, 4.0] * self.ureg.J, dx=1 * self.ureg.m), 7.5 * self.ureg.J * self.ureg.m, ) + @helpers.requires_array_function_protocol() + # NP2: Remove this when we only support np>=2.0 + # trapezoid added in numpy 2.0 + @helpers.requires_numpy_at_least("2.0") + def test_trapezoid(self): + helpers.assert_quantity_equal( + np.trapezoid([1.0, 2.0, 3.0, 4.0] * self.ureg.J, dx=1 * self.ureg.m), + 7.5 * self.ureg.J * self.ureg.m, + ) + @helpers.requires_array_function_protocol() def test_dot(self): helpers.assert_quantity_equal( @@ -506,7 +517,7 @@ def test_power(self): arr = np.array(range(3), dtype=float) q = self.Q_(arr, "meter") - for op_ in [op.pow, op.ipow, np.power]: + for op_ in (op.pow, op.ipow, np.power): q_cp = copy.copy(q) with pytest.raises(DimensionalityError): op_(2.0, q_cp) @@ -759,9 +770,12 @@ def test_minimum(self): np.minimum(self.q, self.Q_([0, 5], "m")), self.Q_([[0, 2], [0, 4]], "m") ) + # NP2: Can remove Q_(arr).ptp test when we only support numpy>=2 def test_ptp(self): - assert self.q.ptp() == 3 * self.ureg.m + if not np.lib.NumpyVersion(np.__version__) >= "2.0.0b1": + assert self.q.ptp() == 3 * self.ureg.m + # NP2: Keep this test for numpy>=2, it's only arr.ptp() that is deprecated @helpers.requires_array_function_protocol() def test_ptp_numpy_func(self): helpers.assert_quantity_equal(np.ptp(self.q, axis=0), [2, 2] * self.ureg.m) @@ -806,7 +820,7 @@ def test_round_numpy_func(self): np.around(1.0275 * self.ureg.m, decimals=2), 1.03 * self.ureg.m ) helpers.assert_quantity_equal( - np.round_(1.0275 * self.ureg.m, decimals=2), 1.03 * self.ureg.m + np.round(1.0275 * self.ureg.m, decimals=2), 1.03 * self.ureg.m ) def test_trace(self): @@ -928,7 +942,7 @@ def test_setitem(self): q[:] = 1 * self.ureg.m helpers.assert_quantity_equal(q, [[1, 1], [1, 1]] * self.ureg.m) - # check and see that dimensionless num bers work correctly + # check and see that dimensionless numbers work correctly q = [0, 1, 2, 3] * self.ureg.dimensionless q[0] = 1 helpers.assert_quantity_equal(q, np.asarray([1, 1, 2, 3])) @@ -949,6 +963,22 @@ def test_setitem(self): assert not w assert q.mask[0] + def test_setitem_mixed_masked(self): + masked = np.ma.array( + [ + 1, + 2, + ], + mask=[True, False], + ) + q = self.Q_(np.ones(shape=(2,)), "m") + with pytest.raises(DimensionalityError): + q[:] = masked + + masked_q = self.Q_(masked, "mm") + q[:] = masked_q + helpers.assert_quantity_equal(q, [1.0, 0.002] * self.ureg.m) + def test_iterator(self): for q, v in zip(self.q.flatten(), [1, 2, 3, 4]): assert q == v * self.ureg.m @@ -1001,6 +1031,11 @@ def test_shape(self): u.shape = 4, 3 assert u.magnitude.shape == (4, 3) + def test_dtype(self): + u = self.Q_(np.arange(12, dtype="uint32")) + + assert u.dtype == "uint32" + @helpers.requires_array_function_protocol() def test_shape_numpy_func(self): assert np.shape(self.q) == (2, 2) @@ -1049,6 +1084,10 @@ def test_isclose_numpy_func(self): self.assertNDArrayEqual( np.isclose(self.q, q2), np.array([[False, True], [True, False]]) ) + self.assertNDArrayEqual( + np.isclose(self.q, q2, atol=1e-5 * self.ureg.mm, rtol=1e-7), + np.array([[False, True], [True, False]]), + ) self.assertNDArrayEqual( np.isclose(self.q, q2, atol=1e-5, rtol=1e-7), np.array([[False, True], [True, False]]), @@ -1189,8 +1228,10 @@ def test_copyto(self): helpers.assert_quantity_equal(q, self.Q_([[2, 2], [6, 4]], "m")) np.copyto(q, 0, where=[[False, False], [True, False]]) helpers.assert_quantity_equal(q, self.Q_([[2, 2], [0, 4]], "m")) - np.copyto(a, q) - self.assertNDArrayEqual(a, np.array([[2, 2], [0, 4]])) + with pytest.warns(UnitStrippedWarning): + # as a is not quantity, the unit is stripped. + np.copyto(a, q) + self.assertNDArrayEqual(a, np.array([[2, 2], [0, 4]])) @helpers.requires_array_function_protocol() def test_tile(self): @@ -1222,6 +1263,24 @@ def test_insert(self): np.array([[1, 0, 2], [3, 0, 4]]) * self.ureg.m, ) + @helpers.requires_array_function_protocol() + def test_delete(self): + q = self.Q_(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), "m") + helpers.assert_quantity_equal( + np.delete(q, 1, axis=0), + np.array([[1, 2, 3, 4], [9, 10, 11, 12]]) * self.ureg.m, + ) + + helpers.assert_quantity_equal( + np.delete(q, np.s_[::2], 1), + np.array([[2, 4], [6, 8], [10, 12]]) * self.ureg.m, + ) + + helpers.assert_quantity_equal( + np.delete(q, [1, 3, 5], None), + np.array([1, 3, 5, 7, 8, 9, 10, 11, 12]) * self.ureg.m, + ) + def test_ndarray_downcast(self): with pytest.warns(UnitStrippedWarning): np.asarray(self.q) @@ -1352,9 +1411,34 @@ def pad_with(vector, pad_width, iaxis, kwargs): @helpers.requires_array_function_protocol() def test_allclose(self): assert np.allclose([1e10, 1e-8] * self.ureg.m, [1.00001e10, 1e-9] * self.ureg.m) + assert np.allclose( + [1e10, 1e-8] * self.ureg.m, [1.00001e13, 1e-6] * self.ureg.mm + ) assert not np.allclose( [1e10, 1e-8] * self.ureg.m, [1.00001e10, 1e-9] * self.ureg.mm ) + assert np.allclose( + [1e10, 1e-8] * self.ureg.m, + [1.00001e10, 1e-9] * self.ureg.m, + atol=1e-8 * self.ureg.m, + ) + + assert not np.allclose([1.0, np.nan] * self.ureg.m, [1.0, np.nan] * self.ureg.m) + + assert np.allclose( + [1.0, np.nan] * self.ureg.m, [1.0, np.nan] * self.ureg.m, equal_nan=True + ) + + assert np.allclose( + [1e10, 1e-8] * self.ureg.m, [1.00001e10, 1e-9] * self.ureg.m, atol=1e-8 + ) + + with pytest.raises(DimensionalityError): + assert np.allclose( + [1e10, 1e-8] * self.ureg.m, + [1.00001e10, 1e-9] * self.ureg.m, + atol=1e-8 * self.ureg.s, + ) @helpers.requires_array_function_protocol() def test_intersect1d(self): @@ -1363,6 +1447,12 @@ def test_intersect1d(self): [1, 3] * self.ureg.m, ) + @helpers.requires_array_function_protocol() + def test_linalg_norm(self): + q = np.array([[3, 5, 8], [4, 12, 15]]) * self.ureg.m + expected = [5, 13, 17] * self.ureg.m + helpers.assert_quantity_equal(np.linalg.norm(q, axis=0), expected) + @pytest.mark.skip class TestBitTwiddlingUfuncs(TestUFuncs): diff --git a/pint/testsuite/test_numpy_func.py b/pint/testsuite/test_numpy_func.py index 49caa3224..28fa0d121 100644 --- a/pint/testsuite/test_numpy_func.py +++ b/pint/testsuite/test_numpy_func.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +from contextlib import ExitStack from unittest.mock import patch import pytest @@ -191,3 +194,99 @@ def test_numpy_wrap(self): numpy_wrap("invalid", np.ones, [], {}, []) # TODO (#905 follow-up): test that NotImplemented is returned when upcast types # present + + @helpers.requires_numpy_previous_than("2.0") + def test_trapz(self): + with ExitStack() as stack: + stack.callback( + setattr, + self.ureg, + "autoconvert_offset_to_baseunit", + self.ureg.autoconvert_offset_to_baseunit, + ) + self.ureg.autoconvert_offset_to_baseunit = True + t = self.Q_(np.array([0.0, 4.0, 8.0]), "degC") + z = self.Q_(np.array([0.0, 2.0, 4.0]), "m") + helpers.assert_quantity_equal( + np.trapz(t, x=z), self.Q_(1108.6, "kelvin meter") + ) + + @helpers.requires_numpy_at_least("2.0") + def test_trapezoid(self): + with ExitStack() as stack: + stack.callback( + setattr, + self.ureg, + "autoconvert_offset_to_baseunit", + self.ureg.autoconvert_offset_to_baseunit, + ) + self.ureg.autoconvert_offset_to_baseunit = True + t = self.Q_(np.array([0.0, 4.0, 8.0]), "degC") + z = self.Q_(np.array([0.0, 2.0, 4.0]), "m") + helpers.assert_quantity_equal( + np.trapezoid(t, x=z), self.Q_(1108.6, "kelvin meter") + ) + + @helpers.requires_numpy_previous_than("2.0") + def test_trapz_no_autoconvert(self): + t = self.Q_(np.array([0.0, 4.0, 8.0]), "degC") + z = self.Q_(np.array([0.0, 2.0, 4.0]), "m") + with pytest.raises(OffsetUnitCalculusError): + np.trapz(t, x=z) + + @helpers.requires_numpy_at_least("2.0") + def test_trapezoid_no_autoconvert(self): + t = self.Q_(np.array([0.0, 4.0, 8.0]), "degC") + z = self.Q_(np.array([0.0, 2.0, 4.0]), "m") + with pytest.raises(OffsetUnitCalculusError): + np.trapezoid(t, x=z) + + def test_correlate(self): + a = self.Q_(np.array([1, 2, 3]), "m") + v = self.Q_(np.array([0, 1, 0.5]), "s") + res = np.correlate(a, v, "full") + ref = np.array([0.5, 2.0, 3.5, 3.0, 0.0]) + assert np.array_equal(res.magnitude, ref) + assert res.units == "meter * second" + + def test_dot(self): + with ExitStack() as stack: + stack.callback( + setattr, + self.ureg, + "autoconvert_offset_to_baseunit", + self.ureg.autoconvert_offset_to_baseunit, + ) + self.ureg.autoconvert_offset_to_baseunit = True + t = self.Q_(np.array([0.0, 5.0, 10.0]), "degC") + z = self.Q_(np.array([1.0, 2.0, 3.0]), "m") + helpers.assert_quantity_almost_equal( + np.dot(t, z), self.Q_(1678.9, "kelvin meter") + ) + + def test_dot_no_autoconvert(self): + t = self.Q_(np.array([0.0, 5.0, 10.0]), "degC") + z = self.Q_(np.array([1.0, 2.0, 3.0]), "m") + with pytest.raises(OffsetUnitCalculusError): + np.dot(t, z) + + def test_cross(self): + with ExitStack() as stack: + stack.callback( + setattr, + self.ureg, + "autoconvert_offset_to_baseunit", + self.ureg.autoconvert_offset_to_baseunit, + ) + self.ureg.autoconvert_offset_to_baseunit = True + t = self.Q_(np.array([0.0, 5.0, 10.0]), "degC") + z = self.Q_(np.array([1.0, 2.0, 3.0]), "m") + helpers.assert_quantity_almost_equal( + np.cross(t, z), self.Q_([268.15, -536.3, 268.15], "kelvin meter") + ) + + def test_cross_no_autoconvert(self): + t = self.Q_(np.array([0.0, 5.0, 10.0]), "degC") + z = self.Q_(np.array([1.0, 2.0, 3.0]), "m") + with pytest.raises(OffsetUnitCalculusError): + np.cross(t, z) diff --git a/pint/testsuite/test_pint_eval.py b/pint/testsuite/test_pint_eval.py index bed81057d..09433d133 100644 --- a/pint/testsuite/test_pint_eval.py +++ b/pint/testsuite/test_pint_eval.py @@ -1,74 +1,173 @@ +from __future__ import annotations + import pytest -from pint.compat import tokenizer -from pint.pint_eval import build_eval_tree +from pint.pint_eval import build_eval_tree, plain_tokenizer, uncertainty_tokenizer +from pint.util import string_preprocessor + +TOKENIZERS = (plain_tokenizer, uncertainty_tokenizer) + +def _pre(tokenizer, input_text: str, preprocess: bool = False) -> str: + if preprocess: + input_text = string_preprocessor(input_text) + return build_eval_tree(tokenizer(input_text)).to_string() + + +@pytest.mark.parametrize("tokenizer", TOKENIZERS) +@pytest.mark.parametrize( + ("input_text", "parsed"), + ( + ("3", "3"), + ("1 + 2", "(1 + 2)"), + ("1 - 2", "(1 - 2)"), + ("2 * 3 + 4", "((2 * 3) + 4)"), # order of operations + ("2 * (3 + 4)", "(2 * (3 + 4))"), # parentheses + ( + "1 + 2 * 3 ** (4 + 3 / 5)", + "(1 + (2 * (3 ** (4 + (3 / 5)))))", + ), # more order of operations + ( + "1 * ((3 + 4) * 5)", + "(1 * ((3 + 4) * 5))", + ), # nested parentheses at beginning + ("1 * (5 * (3 + 4))", "(1 * (5 * (3 + 4)))"), # nested parentheses at end + ( + "1 * (5 * (3 + 4) / 6)", + "(1 * ((5 * (3 + 4)) / 6))", + ), # nested parentheses in middle + ("-1", "(- 1)"), # unary + ("3 * -1", "(3 * (- 1))"), # unary + ("3 * --1", "(3 * (- (- 1)))"), # double unary + ("3 * -(2 + 4)", "(3 * (- (2 + 4)))"), # parenthetical unary + ("3 * -((2 + 4))", "(3 * (- (2 + 4)))"), # parenthetical unary + # implicit op + ("3 4", "(3 4)"), + # implicit op, then parentheses + ("3 (2 + 4)", "(3 (2 + 4))"), + # parentheses, then implicit + ("(3 ** 4 ) 5", "((3 ** 4) 5)"), + # implicit op, then exponentiation + ("3 4 ** 5", "(3 (4 ** 5))"), + # implicit op, then addition + ("3 4 + 5", "((3 4) + 5)"), + # power followed by implicit + ("3 ** 4 5", "((3 ** 4) 5)"), + # implicit with parentheses + ("3 (4 ** 5)", "(3 (4 ** 5))"), + # exponent with e + ("3e-1", "3e-1"), + # multiple units with exponents + ("kg ** 1 * s ** 2", "((kg ** 1) * (s ** 2))"), + # multiple units with neg exponents + ("kg ** -1 * s ** -2", "((kg ** (- 1)) * (s ** (- 2)))"), + # multiple units with neg exponents + ("kg^-1 * s^-2", "((kg ^ (- 1)) * (s ^ (- 2)))"), + # multiple units with neg exponents, implicit op + ("kg^-1 s^-2", "((kg ^ (- 1)) (s ^ (- 2)))"), + # nested power + ("2 ^ 3 ^ 2", "(2 ^ (3 ^ 2))"), + # nested power + ("gram * second / meter ** 2", "((gram * second) / (meter ** 2))"), + # nested power + ("gram / meter ** 2 / second", "((gram / (meter ** 2)) / second)"), + # units should behave like numbers, so we don't need a bunch of extra tests for them + # implicit op, then addition + ("3 kg + 5", "((3 kg) + 5)"), + ("(5 % 2) m", "((5 % 2) m)"), # mod operator + ("(5 // 2) m", "((5 // 2) m)"), # floordiv operator + ), +) +def test_build_eval_tree(tokenizer, input_text: str, parsed: str): + assert _pre(tokenizer, input_text) == parsed -class TestPintEval: - def _test_one(self, input_text, parsed): - assert build_eval_tree(tokenizer(input_text)).to_string() == parsed - @pytest.mark.parametrize( - ("input_text", "parsed"), +@pytest.mark.parametrize("tokenizer", TOKENIZERS) +@pytest.mark.parametrize( + ("input_text", "parsed"), + ( + ("3", "3"), + ("1 + 2", "(1 + 2)"), + ("1 - 2", "(1 - 2)"), + ("2 * 3 + 4", "((2 * 3) + 4)"), # order of operations + ("2 * (3 + 4)", "(2 * (3 + 4))"), # parentheses + ( + "1 + 2 * 3 ** (4 + 3 / 5)", + "(1 + (2 * (3 ** (4 + (3 / 5)))))", + ), # more order of operations ( - ("3", "3"), - ("1 + 2", "(1 + 2)"), - ("2 * 3 + 4", "((2 * 3) + 4)"), # order of operations - ("2 * (3 + 4)", "(2 * (3 + 4))"), # parentheses - ( - "1 + 2 * 3 ** (4 + 3 / 5)", - "(1 + (2 * (3 ** (4 + (3 / 5)))))", - ), # more order of operations - ( - "1 * ((3 + 4) * 5)", - "(1 * ((3 + 4) * 5))", - ), # nested parentheses at beginning - ("1 * (5 * (3 + 4))", "(1 * (5 * (3 + 4)))"), # nested parentheses at end - ( - "1 * (5 * (3 + 4) / 6)", - "(1 * ((5 * (3 + 4)) / 6))", - ), # nested parentheses in middle - ("-1", "(- 1)"), # unary - ("3 * -1", "(3 * (- 1))"), # unary - ("3 * --1", "(3 * (- (- 1)))"), # double unary - ("3 * -(2 + 4)", "(3 * (- (2 + 4)))"), # parenthetical unary - ("3 * -((2 + 4))", "(3 * (- (2 + 4)))"), # parenthetical unary - # implicit op - ("3 4", "(3 4)"), - # implicit op, then parentheses - ("3 (2 + 4)", "(3 (2 + 4))"), - # parentheses, then implicit - ("(3 ** 4 ) 5", "((3 ** 4) 5)"), - # implicit op, then exponentiation - ("3 4 ** 5", "(3 (4 ** 5))"), - # implicit op, then addition - ("3 4 + 5", "((3 4) + 5)"), - # power followed by implicit - ("3 ** 4 5", "((3 ** 4) 5)"), - # implicit with parentheses - ("3 (4 ** 5)", "(3 (4 ** 5))"), - # exponent with e - ("3e-1", "3e-1"), - # multiple units with exponents - ("kg ** 1 * s ** 2", "((kg ** 1) * (s ** 2))"), - # multiple units with neg exponents - ("kg ** -1 * s ** -2", "((kg ** (- 1)) * (s ** (- 2)))"), - # multiple units with neg exponents - ("kg^-1 * s^-2", "((kg ^ (- 1)) * (s ^ (- 2)))"), - # multiple units with neg exponents, implicit op - ("kg^-1 s^-2", "((kg ^ (- 1)) (s ^ (- 2)))"), - # nested power - ("2 ^ 3 ^ 2", "(2 ^ (3 ^ 2))"), - # nested power - ("gram * second / meter ** 2", "((gram * second) / (meter ** 2))"), - # nested power - ("gram / meter ** 2 / second", "((gram / (meter ** 2)) / second)"), - # units should behave like numbers, so we don't need a bunch of extra tests for them - # implicit op, then addition - ("3 kg + 5", "((3 kg) + 5)"), - ("(5 % 2) m", "((5 % 2) m)"), # mod operator - ("(5 // 2) m", "((5 // 2) m)"), # floordiv operator - ), - ) - def test_build_eval_tree(self, input_text, parsed): - self._test_one(input_text, parsed) + "1 * ((3 + 4) * 5)", + "(1 * ((3 + 4) * 5))", + ), # nested parentheses at beginning + ("1 * (5 * (3 + 4))", "(1 * (5 * (3 + 4)))"), # nested parentheses at end + ( + "1 * (5 * (3 + 4) / 6)", + "(1 * ((5 * (3 + 4)) / 6))", + ), # nested parentheses in middle + ("-1", "(- 1)"), # unary + ("3 * -1", "(3 * (- 1))"), # unary + ("3 * --1", "(3 * (- (- 1)))"), # double unary + ("3 * -(2 + 4)", "(3 * (- (2 + 4)))"), # parenthetical unary + ("3 * -((2 + 4))", "(3 * (- (2 + 4)))"), # parenthetical unary + # implicit op + ("3 4", "(3 * 4)"), + # implicit op, then parentheses + ("3 (2 + 4)", "(3 * (2 + 4))"), + # parentheses, then implicit + ("(3 ** 4 ) 5", "((3 ** 4) * 5)"), + # implicit op, then exponentiation + ("3 4 ** 5", "(3 * (4 ** 5))"), + # implicit op, then addition + ("3 4 + 5", "((3 * 4) + 5)"), + # power followed by implicit + ("3 ** 4 5", "((3 ** 4) * 5)"), + # implicit with parentheses + ("3 (4 ** 5)", "(3 * (4 ** 5))"), + # exponent with e + ("3e-1", "3e-1"), + # multiple units with exponents + ("kg ** 1 * s ** 2", "((kg ** 1) * (s ** 2))"), + # multiple units with neg exponents + ("kg ** -1 * s ** -2", "((kg ** (- 1)) * (s ** (- 2)))"), + # multiple units with neg exponents + ("kg^-1 * s^-2", "((kg ** (- 1)) * (s ** (- 2)))"), + # multiple units with neg exponents, implicit op + ("kg^-1 s^-2", "((kg ** (- 1)) * (s ** (- 2)))"), + # nested power + ("2 ^ 3 ^ 2", "(2 ** (3 ** 2))"), + # nested power + ("gram * second / meter ** 2", "((gram * second) / (meter ** 2))"), + # nested power + ("gram / meter ** 2 / second", "((gram / (meter ** 2)) / second)"), + # units should behave like numbers, so we don't need a bunch of extra tests for them + # implicit op, then addition + ("3 kg + 5", "((3 * kg) + 5)"), + ("(5 % 2) m", "((5 % 2) * m)"), # mod operator + ("(5 // 2) m", "((5 // 2) * m)"), # floordiv operator + ), +) +def test_preprocessed_eval_tree(tokenizer, input_text: str, parsed: str): + assert _pre(tokenizer, input_text, True) == parsed + + +@pytest.mark.parametrize( + ("input_text", "parsed"), + ( + ("( 8.0 + / - 4.0 ) e6 m", "((8.0e6 +/- 4.0e6) m)"), + ("( 8.0 ± 4.0 ) e6 m", "((8.0e6 +/- 4.0e6) m)"), + ("( 8.0 + / - 4.0 ) e-6 m", "((8.0e-6 +/- 4.0e-6) m)"), + ("( nan + / - 0 ) e6 m", "((nan +/- 0) m)"), + ("( nan ± 4.0 ) m", "((nan +/- 4.0) m)"), + ("8.0 + / - 4.0 m", "((8.0 +/- 4.0) m)"), + ("8.0 ± 4.0 m", "((8.0 +/- 4.0) m)"), + ("8.0(4)m", "((8.0 +/- 0.4) m)"), + ("8.0(.4)m", "((8.0 +/- .4) m)"), + # ("8.0(-4)m", None), # TODO: this should raise an exception + ), +) +def test_uncertainty(input_text: str, parsed: str): + if parsed is None: + with pytest.raises(): + assert _pre(uncertainty_tokenizer, input_text) + else: + assert _pre(uncertainty_tokenizer, input_text) == parsed diff --git a/pint/testsuite/test_pitheorem.py b/pint/testsuite/test_pitheorem.py index a49588225..665d5798e 100644 --- a/pint/testsuite/test_pitheorem.py +++ b/pint/testsuite/test_pitheorem.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import itertools import logging @@ -8,7 +10,6 @@ # TODO: do not subclass from QuantityTestCase class TestPiTheorem(QuantityTestCase): def test_simple(self, caplog): - # simple movement with caplog.at_level(logging.DEBUG): assert pi_theorem({"V": "m/s", "T": "s", "L": "m"}) == [ diff --git a/pint/testsuite/test_quantity.py b/pint/testsuite/test_quantity.py index 6da4f34b4..1b5020d84 100644 --- a/pint/testsuite/test_quantity.py +++ b/pint/testsuite/test_quantity.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import datetime import logging @@ -12,11 +14,11 @@ from pint import ( DimensionalityError, OffsetUnitCalculusError, - Quantity, UnitRegistry, get_application_registry, ) from pint.compat import np +from pint.errors import UndefinedBehavior from pint.facets.plain.unit import UnitsContainer from pint.testsuite import QuantityTestCase, assert_no_warnings, helpers @@ -29,7 +31,6 @@ def __init__(self, q): # TODO: do not subclass from QuantityTestCase class TestQuantity(QuantityTestCase): - kwargs = dict(autoconvert_offset_to_baseunit=False) def test_quantity_creation(self, caplog): @@ -59,6 +60,11 @@ def test_quantity_creation(self, caplog): assert 4.2 * self.ureg.meter == self.Q_(4.2, 2 * self.ureg.meter) assert len(caplog.records) == 1 + def test_round(self): + x = self.Q_(1.1, "kg") + assert isinstance(round(x).magnitude, int) + assert isinstance(round(x, 0).magnitude, float) + def test_quantity_with_quantity(self): x = self.Q_(4.2, "m") assert self.Q_(x, "m").magnitude == 4.2 @@ -80,8 +86,11 @@ def test_quantity_comparison(self): j = self.Q_(5, "meter*meter") # Include a comparison to the application registry - k = 5 * get_application_registry().meter - m = Quantity(5, "meter") # Include a comparison to a directly created Quantity + 5 * get_application_registry().meter + # Include a comparison to a directly created Quantity + from pint import Quantity + + Quantity(5, "meter") # identity for single object assert x == x @@ -100,11 +109,12 @@ def test_quantity_comparison(self): assert x != z assert x < z + # TODO: Reinstate this in the near future. # Compare with items to the separate application registry - assert k >= m # These should both be from application registry - if z._REGISTRY != m._REGISTRY: - with pytest.raises(ValueError): - z > m # One from local registry, one from application registry + # assert k >= m # These should both be from application registry + # if z._REGISTRY._subregistry != m._REGISTRY._subregistry: + # with pytest.raises(ValueError): + # z > m # One from local registry, one from application registry assert z != j @@ -170,7 +180,7 @@ def test_quantity_format(self, subtests): ("{:Lx}", r"\SI[]{4.12345678}{\kilo\gram\meter\squared\per\second}"), ): with subtests.test(spec): - assert spec.format(x) == result + assert spec.format(x) == result, spec # Check the special case that prevents e.g. '3 1 / second' x = self.Q_(3, UnitsContainer(second=-1)) @@ -260,15 +270,16 @@ def test_default_formatting(self, subtests): ("C~", "4.12345678 kg*m**2/s"), ): with subtests.test(spec): - ureg.default_format = spec + ureg.formatter.default_format = spec assert f"{x}" == result def test_formatting_override_default_units(self): ureg = UnitRegistry() - ureg.default_format = "~" + ureg.formatter.default_format = "~" x = ureg.Quantity(4, "m ** 2") assert f"{x:dP}" == "4 meter²" + ureg.separate_format_defaults = None with pytest.warns(DeprecationWarning): assert f"{x:d}" == "4 meter ** 2" @@ -278,10 +289,11 @@ def test_formatting_override_default_units(self): def test_formatting_override_default_magnitude(self): ureg = UnitRegistry() - ureg.default_format = ".2f" + ureg.formatter.default_format = ".2f" x = ureg.Quantity(4, "m ** 2") assert f"{x:dP}" == "4 meter²" + ureg.separate_format_defaults = None with pytest.warns(DeprecationWarning): assert f"{x:D}" == "4 meter ** 2" @@ -297,7 +309,7 @@ def test_exponent_formatting(self): assert f"{x:~Lx}" == r"\SI[]{1e+20}{\meter}" assert f"{x:~P}" == r"1×10²⁰ m" - x /= 1e40 + x = ureg.Quantity(1e-20, "meter") assert f"{x:~H}" == r"1×10-20 m" assert f"{x:~L}" == r"1\times 10^{-20}\ \mathrm{m}" assert f"{x:~Lx}" == r"\SI[]{1e-20}{\meter}" @@ -327,7 +339,7 @@ def pretty(cls, data): ) x._repr_pretty_(Pretty, False) assert "".join(alltext) == "3.5 kilogram·meter²/second" - ureg.default_format = "~" + ureg.formatter.default_format = "~" assert x._repr_html_() == "3.5 kg m2/s" assert ( x._repr_latex_() == r"$3.5\ \frac{\mathrm{kg} \cdot " @@ -370,9 +382,77 @@ def test_convert(self): round(abs(self.Q_("2 second").to("millisecond").magnitude - 2000), 7) == 0 ) + @helpers.requires_mip + def test_to_preferred(self): + ureg = self.ureg + Q_ = self.Q_ + + ureg.define("pound_force_per_square_foot = 47.8803 pascals = psf") + ureg.define("pound_mass = 0.45359237 kg = lbm") + + preferred_units = [ + ureg.ft, # distance L + ureg.slug, # mass M + ureg.s, # duration T + ureg.rankine, # temperature Θ + ureg.lbf, # force L M T^-2 + ureg.psf, # pressure M L^−1 T^−2 + ureg.lbm * ureg.ft**-3, # density M L^-3 + ureg.W, # power L^2 M T^-3 + ] + + temp = (Q_("1 lbf") * Q_("1 m/s")).to_preferred(preferred_units) + assert temp.units == ureg.W + + temp = (Q_(" 1 lbf*m")).to_preferred(preferred_units) + # would prefer this to be repeatable, but mip doesn't guarantee that currently + assert temp.units in (ureg.W * ureg.s, ureg.ft * ureg.lbf) + + temp = Q_("1 kg").to_preferred(preferred_units) + assert temp.units == ureg.slug + + result = Q_("1 slug/m**3").to_preferred(preferred_units) + assert result.units == ureg.lbm * ureg.ft**-3 + + result = Q_("1 amp").to_preferred(preferred_units) + assert result.units == ureg.amp + + result = Q_("1 volt").to_preferred(preferred_units) + assert result.units == ureg.volts + + @helpers.requires_mip + def test_to_preferred_registry(self): + ureg = self.ureg + Q_ = self.Q_ + ureg.default_preferred_units = [ + ureg.m, # distance L + ureg.kg, # mass M + ureg.s, # duration T + ureg.N, # force L M T^-2 + ureg.Pa, # pressure M L^−1 T^−2 + ureg.W, # power L^2 M T^-3 + ] + pressure = (Q_(1, "N") * Q_("1 m**-2")).to_preferred() + assert pressure.units == ureg.Pa + + @helpers.requires_mip + def test_autoconvert_to_preferred(self): + ureg = self.ureg + Q_ = self.Q_ + ureg.autoconvert_to_preferred = True + ureg.default_preferred_units = [ + ureg.m, # distance L + ureg.kg, # mass M + ureg.s, # duration T + ureg.N, # force L M T^-2 + ureg.Pa, # pressure M L^−1 T^−2 + ureg.W, # power L^2 M T^-3 + ] + pressure = Q_(1, "N") * Q_("1 m**-2") + assert pressure.units == ureg.Pa + @helpers.requires_numpy def test_convert_numpy(self): - # Conversions with single units take a different codepath than # Conversions with more than one unit. src_dst1 = UnitsContainer(meter=1), UnitsContainer(inch=1) @@ -620,7 +700,13 @@ def test_no_ndarray_coercion_without_numpy(self): with pytest.raises(ValueError): self.Q_(1, "m").__array__() - @patch("pint.compat.upcast_types", [FakeWrapper]) + @patch( + "pint.compat.upcast_type_names", ("pint.testsuite.test_quantity.FakeWrapper",) + ) + @patch( + "pint.compat.upcast_type_map", + {"pint.testsuite.test_quantity.FakeWrapper": FakeWrapper}, + ) def test_upcast_type_rejection_on_creation(self): with pytest.raises(TypeError): self.Q_(FakeWrapper(42), "m") @@ -755,8 +841,8 @@ def test_limits_magnitudes(self): def test_nonnumeric_magnitudes(self): ureg = self.ureg x = "some string" * ureg.m - with pytest.warns(RuntimeWarning): - self.compare_quantity_compact(x, x) + with pytest.warns(UndefinedBehavior): + x.to_compact() def test_very_large_to_compact(self): # This should not raise an IndexError @@ -1000,7 +1086,6 @@ def test_nparray(self): self._test_numeric(np.ones((1, 3)), self._test_inplace) def test_quantity_abs_round(self): - x = self.Q_(-4.2, "meter") y = self.Q_(4.2, "meter") @@ -1009,10 +1094,10 @@ def test_quantity_abs_round(self): zy = self.Q_(fun(y.magnitude), "meter") rx = fun(x) ry = fun(y) - assert rx == zx, "while testing {0}".format(fun) - assert ry == zy, "while testing {0}".format(fun) - assert rx is not zx, "while testing {0}".format(fun) - assert ry is not zy, "while testing {0}".format(fun) + assert rx == zx, f"while testing {fun}" + assert ry == zy, f"while testing {fun}" + assert rx is not zx, f"while testing {fun}" + assert ry is not zy, f"while testing {fun}" def test_quantity_float_complex(self): x = self.Q_(-4.2, None) @@ -1152,14 +1237,14 @@ def test_dimensionality(self): def test_inclusion(self): dim = self.Q_(42, "meter").dimensionality assert "[length]" in dim - assert not ("[time]" in dim) + assert "[time]" not in dim dim = (self.Q_(42, "meter") / self.Q_(11, "second")).dimensionality assert "[length]" in dim assert "[time]" in dim dim = self.Q_(20.785, "J/(mol)").dimensionality for dimension in ("[length]", "[mass]", "[substance]", "[time]"): assert dimension in dim - assert not ("[angle]" in dim) + assert "[angle]" not in dim class TestQuantityWithDefaultRegistry(TestQuantity): @@ -1227,6 +1312,8 @@ def setup_class(cls): (((100, "delta_degF"), (10, "degR")), (110, "degR")), (((100, "delta_degF"), (10, "delta_degC")), (118, "delta_degF")), (((100, "delta_degF"), (10, "delta_degF")), (110, "delta_degF")), + pytest.param(((100, "delta_degC"), (10, "Δ°C")), (110, "delta_degC"), id="Δ°C"), + pytest.param(((100, "Δ°F"), (10, "Δ°C")), (118, "delta_degF"), id="Δ°F"), ] @pytest.mark.parametrize(("input_tuple", "expected"), additions) @@ -1306,6 +1393,8 @@ def test_inplace_addition(self, input_tuple, expected): (((100, "delta_degF"), (10, "degR")), (90, "degR")), (((100, "delta_degF"), (10, "delta_degC")), (82, "delta_degF")), (((100, "delta_degF"), (10, "delta_degF")), (90, "delta_degF")), + pytest.param(((100, "delta_degC"), (10, "Δ°C")), (90, "delta_degC"), id="Δ°C"), + pytest.param(((100, "Δ°F"), (10, "Δ°C")), (82, "delta_degF"), id="Δ°F"), ] @pytest.mark.parametrize(("input_tuple", "expected"), subtractions) @@ -1385,6 +1474,12 @@ def test_inplace_subtraction(self, input_tuple, expected): (((100, "delta_degF"), (10, "degR")), (1000, "delta_degF*degR")), (((100, "delta_degF"), (10, "delta_degC")), (1000, "delta_degF*delta_degC")), (((100, "delta_degF"), (10, "delta_degF")), (1000, "delta_degF**2")), + pytest.param( + ((100, "delta_degC"), (10, "Δ°C")), (1000, "delta_degC**2"), id="Δ°C**2" + ), + pytest.param( + ((100, "Δ°F"), (10, "Δ°C")), (1000, "delta_degF*delta_degC"), id="Δ°F*Δ°C" + ), ] @pytest.mark.parametrize(("input_tuple", "expected"), multiplications) @@ -1463,6 +1558,10 @@ def test_inplace_multiplication(self, input_tuple, expected): (((100, "delta_degF"), (10, "degR")), (10, "delta_degF/degR")), (((100, "delta_degF"), (10, "delta_degC")), (10, "delta_degF/delta_degC")), (((100, "delta_degF"), (10, "delta_degF")), (10, "")), + pytest.param(((100, "delta_degC"), (10, "Δ°C")), (10, ""), id="Δ°C/Δ°C"), + pytest.param( + ((100, "Δ°F"), (10, "Δ°C")), (10, "delta_degF/delta_degC"), id="Δ°F/Δ°C" + ), ] @pytest.mark.parametrize(("input_tuple", "expected"), divisions) @@ -1620,7 +1719,7 @@ def test_division_with_scalar(self, input_tuple, expected): else: in1, in2 = in1, self.Q_(*in2) input_tuple = in1, in2 # update input_tuple for better tracebacks - expected_copy = expected[:] + expected_copy = expected.copy() for i, mode in enumerate([False, True]): self.ureg.autoconvert_offset_to_baseunit = mode if expected_copy[i] == "error": @@ -1654,14 +1753,14 @@ def test_division_with_scalar(self, input_tuple, expected): def test_exponentiation(self, input_tuple, expected): self.ureg.default_as_delta = False in1, in2 = input_tuple - if type(in1) is tuple and type(in2) is tuple: + if type(in1) is type(in2) is tuple: in1, in2 = self.Q_(*in1), self.Q_(*in2) - elif not type(in1) is tuple and type(in2) is tuple: + elif type(in1) is not tuple and type(in2) is tuple: in2 = self.Q_(*in2) else: in1 = self.Q_(*in1) input_tuple = in1, in2 - expected_copy = expected[:] + expected_copy = expected.copy() for i, mode in enumerate([False, True]): self.ureg.autoconvert_offset_to_baseunit = mode if expected_copy[i] == "error": @@ -1692,18 +1791,18 @@ def test_exponentiation_force_ndarray(self): def test_inplace_exponentiation(self, input_tuple, expected): self.ureg.default_as_delta = False in1, in2 = input_tuple - if type(in1) is tuple and type(in2) is tuple: + if type(in1) is type(in2) is tuple: (q1v, q1u), (q2v, q2u) = in1, in2 in1 = self.Q_(*(np.array([q1v] * 2, dtype=float), q1u)) in2 = self.Q_(q2v, q2u) - elif not type(in1) is tuple and type(in2) is tuple: + elif type(in1) is not tuple and type(in2) is tuple: in2 = self.Q_(*in2) else: in1 = self.Q_(*in1) input_tuple = in1, in2 - expected_copy = expected[:] + expected_copy = expected.copy() for i, mode in enumerate([False, True]): self.ureg.autoconvert_offset_to_baseunit = mode in1_cp = copy.copy(in1) @@ -1835,7 +1934,10 @@ def test_equal_zero_nan_NP(self): self.Q_([0, 1, 2], "J") == np.array([0, 0, np.nan]), np.asarray([True, False, False]), ) - assert not (self.Q_(np.arange(4), "J") == np.zeros(3)) + + # This raise an exception on NumPy 1.25 as dimensions + # are different + # assert not (self.Q_(np.arange(4), "J") == np.zeros(3)) def test_offset_equal_zero(self): ureg = self.ureg @@ -1931,3 +2033,11 @@ def test_offset_autoconvert_gt_zero(self): assert q2 > 0 with pytest.raises(DimensionalityError): q1.__gt__(ureg.Quantity(0, "")) + + def test_types(self): + quantity = self.Q_(1.0, "m") + assert isinstance(quantity, self.Q_) + assert isinstance(quantity.units, self.ureg.Unit) + assert isinstance(quantity.m, float) + + assert isinstance(self.ureg.m, self.ureg.Unit) diff --git a/pint/testsuite/test_systems.py b/pint/testsuite/test_systems.py index 5b3f1ce2e..9e78a3d1e 100644 --- a/pint/testsuite/test_systems.py +++ b/pint/testsuite/test_systems.py @@ -1,8 +1,12 @@ +from __future__ import annotations + import pytest from pint import UnitRegistry from pint.testsuite import QuantityTestCase +from .helpers import internal + class TestGroup: def _build_empty_reg_root(self): @@ -13,7 +17,7 @@ def _build_empty_reg_root(self): def test_units_programmatically(self): ureg, root = self._build_empty_reg_root() - d = ureg._groups + d = internal(ureg)._groups assert root._used_groups == set() assert root._used_by == set() @@ -38,7 +42,7 @@ def test_cyclic(self): def test_groups_programmatically(self): ureg, root = self._build_empty_reg_root() - d = ureg._groups + d = internal(ureg)._groups g2 = ureg.Group("g2") assert d.keys() == {"root", "g2"} @@ -53,7 +57,7 @@ def test_simple(self): lines = ["@group mygroup", "meter = 3", "second = 2"] ureg, root = self._build_empty_reg_root() - d = ureg._groups + d = internal(ureg)._groups grp = ureg.Group.from_lines(lines, lambda x: None) @@ -221,7 +225,7 @@ def test_get_base_units(self): lines = ["@system %s using test-imperial" % sysname, "inch"] s = ureg.System.from_lines(lines, ureg.get_base_units) - ureg._systems[s.name] = s + internal(ureg)._systems[s.name] = s # base_factor, destination_units c = ureg.get_base_units("inch", system=sysname) @@ -243,7 +247,7 @@ def test_get_base_units_different_exponent(self): lines = ["@system %s using test-imperial" % sysname, "pint:meter"] s = ureg.System.from_lines(lines, ureg.get_base_units) - ureg._systems[s.name] = s + internal(ureg)._systems[s.name] = s # base_factor, destination_units c = ureg.get_base_units("inch", system=sysname) @@ -272,7 +276,7 @@ def test_get_base_units_relation(self): lines = ["@system %s using test-imperial" % sysname, "mph:meter"] s = ureg.System.from_lines(lines, ureg.get_base_units) - ureg._systems[s.name] = s + internal(ureg)._systems[s.name] = s # base_factor, destination_units c = ureg.get_base_units("inch", system=sysname) assert round(abs(c[0] - 0.056), 2) == 0 diff --git a/pint/testsuite/test_testing.py b/pint/testsuite/test_testing.py index 3116dd8aa..dfb8b0602 100644 --- a/pint/testsuite/test_testing.py +++ b/pint/testsuite/test_testing.py @@ -1,12 +1,19 @@ -import pytest +from __future__ import annotations + +from typing import Any -from pint import Quantity +import pytest from .. import testing np = pytest.importorskip("numpy") +class QuantityToBe(tuple[Any]): + def from_many(*args): + return QuantityToBe(args) + + @pytest.mark.parametrize( ["first", "second", "error", "message"], ( @@ -14,7 +21,7 @@ np.array([0, 1]), np.array([0, 1]), False, "", id="ndarray-None-None-equal" ), pytest.param( - Quantity(1, "m"), + QuantityToBe.from_many(1, "m"), 1, True, "The first is not dimensionless", @@ -22,73 +29,81 @@ ), pytest.param( 1, - Quantity(1, "m"), + QuantityToBe.from_many(1, "m"), True, "The second is not dimensionless", id="mixed2-int-not equal-equal", ), pytest.param( - Quantity(1, "m"), Quantity(1, "m"), False, "", id="Quantity-int-equal-equal" + QuantityToBe.from_many(1, "m"), + QuantityToBe.from_many(1, "m"), + False, + "", + id="QuantityToBe.from_many-int-equal-equal", ), pytest.param( - Quantity(1, "m"), - Quantity(1, "s"), + QuantityToBe.from_many(1, "m"), + QuantityToBe.from_many(1, "s"), True, "Units are not equal", - id="Quantity-int-equal-not equal", + id="QuantityToBe.from_many-int-equal-not equal", ), pytest.param( - Quantity(1, "m"), - Quantity(2, "m"), + QuantityToBe.from_many(1, "m"), + QuantityToBe.from_many(2, "m"), True, "Magnitudes are not equal", - id="Quantity-int-not equal-equal", + id="QuantityToBe.from_many-int-not equal-equal", ), pytest.param( - Quantity(1, "m"), - Quantity(2, "s"), + QuantityToBe.from_many(1, "m"), + QuantityToBe.from_many(2, "s"), True, "Units are not equal", - id="Quantity-int-not equal-not equal", + id="QuantityToBe.from_many-int-not equal-not equal", ), pytest.param( - Quantity(1, "m"), - Quantity(float("nan"), "m"), + QuantityToBe.from_many(1, "m"), + QuantityToBe.from_many(float("nan"), "m"), True, "Magnitudes are not equal", - id="Quantity-float-not equal-equal", + id="QuantityToBe.from_many-float-not equal-equal", ), pytest.param( - Quantity([1, 2], "m"), - Quantity([1, 2], "m"), + QuantityToBe.from_many([1, 2], "m"), + QuantityToBe.from_many([1, 2], "m"), False, "", - id="Quantity-ndarray-equal-equal", + id="QuantityToBe.from_many-ndarray-equal-equal", ), pytest.param( - Quantity([1, 2], "m"), - Quantity([1, 2], "s"), + QuantityToBe.from_many([1, 2], "m"), + QuantityToBe.from_many([1, 2], "s"), True, "Units are not equal", - id="Quantity-ndarray-equal-not equal", + id="QuantityToBe.from_many-ndarray-equal-not equal", ), pytest.param( - Quantity([1, 2], "m"), - Quantity([2, 2], "m"), + QuantityToBe.from_many([1, 2], "m"), + QuantityToBe.from_many([2, 2], "m"), True, "Magnitudes are not equal", - id="Quantity-ndarray-not equal-equal", + id="QuantityToBe.from_many-ndarray-not equal-equal", ), pytest.param( - Quantity([1, 2], "m"), - Quantity([2, 2], "s"), + QuantityToBe.from_many([1, 2], "m"), + QuantityToBe.from_many([2, 2], "s"), True, "Units are not equal", - id="Quantity-ndarray-not equal-not equal", + id="QuantityToBe.from_many-ndarray-not equal-not equal", ), ), ) -def test_assert_equal(first, second, error, message): +def test_assert_equal(sess_registry, first, second, error, message): + if isinstance(first, QuantityToBe): + first = sess_registry.Quantity(*first) + if isinstance(second, QuantityToBe): + second = sess_registry.Quantity(*second) if error: with pytest.raises(AssertionError, match=message): testing.assert_equal(first, second) diff --git a/pint/testsuite/test_umath.py b/pint/testsuite/test_umath.py index 6f32ab5b0..a555a7664 100644 --- a/pint/testsuite/test_umath.py +++ b/pint/testsuite/test_umath.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from pint import DimensionalityError, UnitRegistry @@ -79,7 +81,7 @@ def _test1( if results is None: results = [None] * len(ok_with) for x1, res in zip(ok_with, results): - err_msg = "At {} with {}".format(func.__name__, x1) + err_msg = f"At {func.__name__} with {x1}" if output_units == "same": ou = x1.units elif isinstance(output_units, (int, float)): @@ -163,7 +165,7 @@ def _test1_2o( if results is None: results = [None] * len(ok_with) for x1, res in zip(ok_with, results): - err_msg = "At {} with {}".format(func.__name__, x1) + err_msg = f"At {func.__name__} with {x1}" qms = func(x1) if res is None: res = func(x1.magnitude) @@ -223,7 +225,7 @@ def _test2( """ for x2 in ok_with: - err_msg = "At {} with {} and {}".format(func.__name__, x1, x2) + err_msg = f"At {func.__name__} with {x1} and {x2}" if output_units == "same": ou = x1.units elif output_units == "prod": diff --git a/pint/testsuite/test_unit.py b/pint/testsuite/test_unit.py index 96db871c2..03fabe833 100644 --- a/pint/testsuite/test_unit.py +++ b/pint/testsuite/test_unit.py @@ -1,7 +1,10 @@ +from __future__ import annotations + import copy import functools import logging import math +import operator import re from contextlib import nullcontext as does_not_raise @@ -13,6 +16,8 @@ from pint.testsuite import QuantityTestCase, assert_no_warnings, helpers from pint.util import ParserHelper, UnitsContainer +from .helpers import internal + # TODO: do not subclass from QuantityTestCase class TestUnit(QuantityTestCase): @@ -54,6 +59,33 @@ def test_unit_formatting(self, subtests): with subtests.test(spec): assert spec.format(x) == result + def test_latex_escaping(self, subtests): + ureg = UnitRegistry() + ureg.define(r"percent = 1e-2 = %") + x = ureg.Unit(UnitsContainer(percent=1)) + for spec, result in { + "L": r"\mathrm{percent}", + "L~": r"\mathrm{\%}", + "Lx": r"\si[]{\percent}", + "Lx~": r"\si[]{\%}", + }.items(): + with subtests.test(spec): + ureg.formatter.default_format = spec + assert f"{x}" == result, f"Failed for {spec}, got {x} expected {result}" + # no '#' here as it's a comment char when define()ing new units + ureg.define(r"weirdunit = 1 = \~_^&%$_{}") + x = ureg.Unit(UnitsContainer(weirdunit=1)) + for spec, result in { + "L": r"\mathrm{weirdunit}", + "L~": r"\mathrm{\textbackslash \textasciitilde \_\textasciicircum \&\%\$\_\{\}}", + "Lx": r"\si[]{\weirdunit}", + # TODO: Currently makes \si[]{\\~_^&%$_{}} (broken). What do we even want this to be? + # "Lx~": r"\si[]{\textbackslash \textasciitilde \_\textasciicircum \&\%\$\_\{\}}", + }.items(): + with subtests.test(spec): + ureg.formatter.default_format = spec + assert f"{x}" == result, f"Failed for {spec}, {result}" + def test_unit_default_formatting(self, subtests): ureg = UnitRegistry() x = ureg.Unit(UnitsContainer(meter=2, kilogram=1, second=-1)) @@ -72,12 +104,13 @@ def test_unit_default_formatting(self, subtests): ("C~", "kg*m**2/s"), ): with subtests.test(spec): - ureg.default_format = spec + ureg.formatter.default_format = spec assert f"{x}" == result, f"Failed for {spec}, {result}" + @pytest.mark.xfail(reason="Still not clear how default formatting will work.") def test_unit_formatting_defaults_warning(self): ureg = UnitRegistry() - ureg.default_format = "~P" + ureg.formatter.default_format = "~P" x = ureg.Unit("m / s ** 2") with pytest.warns(DeprecationWarning): @@ -103,21 +136,22 @@ def test_unit_formatting_snake_case(self, subtests): ("C~", "oil_bbl"), ): with subtests.test(spec): - ureg.default_format = spec + ureg.formatter.default_format = spec assert f"{x}" == result, f"Failed for {spec}, {result}" def test_unit_formatting_custom(self, monkeypatch): - from pint import formatting, register_unit_format - - monkeypatch.setattr(formatting, "_FORMATTERS", formatting._FORMATTERS.copy()) + from pint import register_unit_format + from pint.delegates.formatter._spec_helpers import REGISTERED_FORMATTERS @register_unit_format("new") - def format_new(unit, **options): + def format_new(unit, *args, **options): return "new format" ureg = UnitRegistry() - assert "{:new}".format(ureg.m) == "new format" + assert f"{ureg.m:new}" == "new format" + + del REGISTERED_FORMATTERS["new"] def test_ipython(self): alltext = [] @@ -127,6 +161,13 @@ class Pretty: def text(text): alltext.append(text) + @classmethod + def pretty(cls, data): + try: + data._repr_pretty_(cls, False) + except AttributeError: + alltext.append(str(data)) + ureg = UnitRegistry() x = ureg.Unit(UnitsContainer(meter=2, kilogram=1, second=-1)) assert x._repr_html_() == "kilogram meter2/second" @@ -136,7 +177,7 @@ def text(text): ) x._repr_pretty_(Pretty, False) assert "".join(alltext) == "kilogram·meter²/second" - ureg.default_format = "~" + ureg.formatter.default_format = "~" assert x._repr_html_() == "kg m2/s" assert ( x._repr_latex_() == r"$\frac{\mathrm{kg} \cdot \mathrm{m}^{2}}{\mathrm{s}}$" @@ -166,7 +207,7 @@ def test_unit_rdiv(self): ("unit", "power_ratio", "expectation", "expected_unit"), [ ("m", 2, does_not_raise(), "m**2"), - ("m", dict(), pytest.raises(TypeError), None), + ("m", {}, pytest.raises(TypeError), None), ], ) def test_unit_pow(self, unit, power_ratio, expectation, expected_unit): @@ -212,7 +253,6 @@ def test_unit_eqs(self): assert not (self.U_("byte") != self.U_("byte")) def test_unit_cmp(self): - x = self.U_("m") assert x < self.U_("km") assert x > self.U_("mm") @@ -222,17 +262,14 @@ def test_unit_cmp(self): assert y < 1e6 def test_dimensionality(self): - x = self.U_("m") assert x.dimensionality == UnitsContainer({"[length]": 1}) def test_dimensionless(self): - assert self.U_("m/mm").dimensionless assert not self.U_("m").dimensionless def test_unit_casting(self): - assert int(self.U_("m/mm")) == 1000 assert float(self.U_("mm/m")) == 1e-3 assert complex(self.U_("mm/mm")) == 1 + 0j @@ -260,7 +297,7 @@ def test_base(self): with pytest.raises(errors.RedefinitionError): ureg.define("meter = [length]") with pytest.raises(TypeError): - ureg.define(list()) + ureg.define([]) ureg.define("degC = kelvin; offset: 273.15") def test_define(self): @@ -269,11 +306,11 @@ def test_define(self): assert len(dir(ureg)) > 0 def test_load(self): - import pkg_resources + from importlib.resources import files from .. import compat - data = pkg_resources.resource_filename(compat.__name__, "default_en.txt") + data = files(compat.__package__).joinpath("default_en.txt") ureg1 = UnitRegistry() ureg2 = UnitRegistry(data) assert dir(ureg1) == dir(ureg2) @@ -285,11 +322,11 @@ def test_default_format(self): q = ureg.meter s1 = f"{q}" s2 = f"{q:~}" - ureg.default_format = "~" + ureg.formatter.default_format = "~" s3 = f"{q}" assert s2 == s3 assert s1 != s3 - assert ureg.default_format == "~" + assert ureg.formatter.default_format == "~" def test_iterate(self): ureg = UnitRegistry() @@ -370,6 +407,16 @@ def test_parse_pretty(self): 1, UnitsContainer(meter=37, second=-4.321) ) + def test_parse_pretty_degrees(self): + for exp in ("1Δ°C", "1 Δ°C", "ΔdegC", "delta_°C"): + assert self.ureg.parse_expression(exp) == self.Q_( + 1, UnitsContainer(delta_degree_Celsius=1) + ) + assert self.ureg.parse_expression("") + assert self.ureg.parse_expression("mol °K") == self.Q_( + 1, UnitsContainer(mol=1, kelvin=1) + ) + def test_parse_factor(self): assert self.ureg.parse_expression("42*meter") == self.Q_( 42, UnitsContainer(meter=1.0) @@ -533,8 +580,7 @@ def func(x): assert f3(3.0 * ureg.centimeter) == 0.03 * ureg.centimeter assert f3(3.0 * ureg.meter) == 3.0 * ureg.centimeter - def gfunc(x, y): - return x + y + gfunc = operator.add g0 = ureg.wraps(None, [None, None])(gfunc) assert g0(3, 1) == 4 @@ -560,12 +606,27 @@ def hfunc(x, y): h3 = ureg.wraps((None,), (None, None))(hfunc) assert h3(3, 1) == (3, 1) - def test_wrap_referencing(self): + def kfunc(a, /, b, c=5, *, d=6): + return a, b, c, d + + k1 = ureg.wraps((None,), (None, None, None, None))(kfunc) + assert k1(1, 2, 3, d=4) == (1, 2, 3, 4) + assert k1(1, 2, c=3, d=4) == (1, 2, 3, 4) + assert k1(1, b=2, c=3, d=4) == (1, 2, 3, 4) + assert k1(1, d=4, b=2, c=3) == (1, 2, 3, 4) + assert k1(1, 2, c=3) == (1, 2, 3, 6) + assert k1(1, 2, d=4) == (1, 2, 5, 4) + assert k1(1, 2) == (1, 2, 5, 6) + k2 = ureg.wraps((None,), ("meter", "centimeter", "meter", "centimeter"))(kfunc) + assert k2( + 1 * ureg.meter, 2 * ureg.centimeter, 3 * ureg.meter, d=4 * ureg.centimeter + ) == (1, 2, 3, 4) + + def test_wrap_referencing(self): ureg = self.ureg - def gfunc(x, y): - return x + y + gfunc = operator.add def gfunc2(x, y): return x**2 + y @@ -610,6 +671,7 @@ def func(x): assert f0(3.0 * ureg.centimeter) == 0.03 * ureg.meter with pytest.raises(DimensionalityError): f0(3.0 * ureg.kilogram) + assert f0(x=3.0 * ureg.centimeter) == 0.03 * ureg.meter f0b = ureg.check(ureg.meter)(func) with pytest.raises(DimensionalityError): @@ -618,8 +680,7 @@ def func(x): with pytest.raises(DimensionalityError): f0b(3.0 * ureg.kilogram) - def gfunc(x, y): - return x / y + gfunc = operator.truediv g0 = ureg.check(None, None)(gfunc) assert g0(6, 2) == 3 @@ -647,13 +708,13 @@ def test_to_ref_vs_to(self): q = 8.0 * self.ureg.inch t = 8.0 * self.ureg.degF dt = 8.0 * self.ureg.delta_degF - assert q.to("yard").magnitude == self.ureg._units[ + assert q.to("yard").magnitude == internal(self.ureg)._units[ "inch" ].converter.to_reference(8.0) - assert t.to("kelvin").magnitude == self.ureg._units[ + assert t.to("kelvin").magnitude == internal(self.ureg)._units[ "degF" ].converter.to_reference(8.0) - assert dt.to("kelvin").magnitude == self.ureg._units[ + assert dt.to("kelvin").magnitude == internal(self.ureg)._units[ "delta_degF" ].converter.to_reference(8.0) @@ -772,7 +833,6 @@ def test_case_sensitivity(self): class TestCaseInsensitiveRegistry(QuantityTestCase): - kwargs = dict(case_sensitive=False) def test_case_sensitivity(self): @@ -814,7 +874,6 @@ def test_many(self): self._test(self.ureg.kelvin) def test_context_sp(self): - gd = self.ureg.get_dimensionality # length, frequency, energy @@ -853,13 +912,6 @@ def test_get_compatible_units(self): class TestRegistryWithDefaultRegistry(TestRegistry): - @classmethod - def setup_class(cls): - from pint import _DEFAULT_REGISTRY - - cls.ureg = _DEFAULT_REGISTRY - cls.Q_ = cls.ureg.Quantity - def test_lazy(self): x = LazyRegistry() x.test = "test" @@ -868,8 +920,10 @@ def test_lazy(self): y("meter") assert isinstance(y, UnitRegistry) - def test_redefinition(self): - d = self.ureg.define + def test_redefinition(self, func_registry): + ureg = UnitRegistry(on_redefinition="raise") + d = ureg.define + assert "meter" in internal(self.ureg)._units with pytest.raises(RedefinitionError): d("meter = [time]") with pytest.raises(RedefinitionError): @@ -880,7 +934,7 @@ def test_redefinition(self): d("[velocity] = [length]") # aliases - assert "inch" in self.ureg._units + assert "inch" in internal(self.ureg)._units with pytest.raises(RedefinitionError): d("bla = 3.2 meter = inch") with pytest.raises(RedefinitionError): @@ -890,6 +944,8 @@ def test_redefinition(self): # TODO: remove QuantityTestCase class TestConvertWithOffset(QuantityTestCase): + kwargs = dict(autoconvert_offset_to_baseunit=False) + # The dicts in convert_with_offset are used to create a UnitsContainer. # We create UnitsContainer to avoid any auto-conversion of units. convert_with_offset = [ @@ -936,6 +992,8 @@ class TestConvertWithOffset(QuantityTestCase): (({"degC": 2}, {"kelvin": 2}), "error"), (({"degC": 1, "degF": 1}, {"kelvin": 2}), "error"), (({"degC": 1, "kelvin": 1}, {"kelvin": 2}), "error"), + (({"delta_degC": 1}, {"degF": 1}), "error"), + (({"delta_degC": 1}, {"degC": 1}), "error"), ] @pytest.mark.parametrize(("input_tuple", "expected"), convert_with_offset) @@ -980,7 +1038,7 @@ def test_alias(self): assert ureg.Unit(a) == ureg.Unit("canonical") # Test that aliases defined multiple times are not duplicated - assert ureg._units["canonical"].aliases == ( + assert internal(ureg)._units["canonical"].aliases == ( "alias1", "alias2", ) @@ -988,3 +1046,8 @@ def test_alias(self): # Define against unknown name with pytest.raises(KeyError): ureg.define("@alias notexist = something") + + def test_prefix_offset_units(self): + ureg = UnitRegistry() + with pytest.raises(errors.OffsetUnitCalculusError): + ureg.parse_units("kilodegree_Celsius") diff --git a/pint/testsuite/test_util.py b/pint/testsuite/test_util.py index d2eebe59a..3eb49a471 100644 --- a/pint/testsuite/test_util.py +++ b/pint/testsuite/test_util.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import collections import copy import math @@ -5,6 +7,7 @@ import pytest +from pint import pint_eval from pint.util import ( ParserHelper, UnitsContainer, @@ -15,7 +18,6 @@ sized, string_preprocessor, to_units_container, - tokenizer, transpose, ) @@ -120,13 +122,13 @@ def test_invalid(self): UnitsContainer({"1": "2"}) d = UnitsContainer() with pytest.raises(TypeError): - d.__mul__(list()) + d.__mul__([]) with pytest.raises(TypeError): - d.__pow__(list()) + d.__pow__([]) with pytest.raises(TypeError): - d.__truediv__(list()) + d.__truediv__([]) with pytest.raises(TypeError): - d.__rtruediv__(list()) + d.__rtruediv__([]) class TestToUnitsContainer: @@ -193,9 +195,9 @@ def test_calculate(self): assert "seconds" / z() == ParserHelper(0.5, seconds=1, meter=-2) assert dict(seconds=1) / z() == ParserHelper(0.5, seconds=1, meter=-2) - def _test_eval_token(self, expected, expression, use_decimal=False): - token = next(tokenizer(expression)) - actual = ParserHelper.eval_token(token, use_decimal=use_decimal) + def _test_eval_token(self, expected, expression): + token = next(pint_eval.tokenizer(expression)) + actual = ParserHelper.eval_token(token) assert expected == actual assert type(expected) == type(actual) @@ -302,10 +304,24 @@ def test_shortest_path(self): p = find_shortest_path(g, 2, 1) assert p == [2, 1] + def test_shortest_path_densely_connected_2146(self): + import itertools + g = collections.defaultdict(set) + for i, j in itertools.combinations(range(42), 2): + g[i].add(j) + g[j].add(i) + p = find_shortest_path(g, 0, 39) + assert p == [0, 39] + p = find_shortest_path(g, 0, 41) + assert p == [0, 41] + p = find_shortest_path(g, 17, 2) + assert p == [17, 2] + p = find_shortest_path(g, 12, 12) + assert p == [12] + class TestMatrix: def test_matrix_to_string(self): - assert ( matrix_to_string([[1, 2], [3, 4]], row_headers=None, col_headers=None) == "1\t2\n" @@ -346,23 +362,20 @@ def test_matrix_to_string(self): ) def test_transpose(self): - assert transpose([[1, 2], [3, 4]]) == [[1, 3], [2, 4]] class TestOtherUtils: def test_iterable(self): - # Test with list, string, generator, and scalar assert iterable([0, 1, 2, 3]) assert iterable("test") - assert iterable((i for i in range(5))) + assert iterable(i for i in range(5)) assert not iterable(0) def test_sized(self): - # Test with list, string, generator, and scalar assert sized([0, 1, 2, 3]) assert sized("test") - assert not sized((i for i in range(5))) + assert not sized(i for i in range(5)) assert not sized(0) diff --git a/pint/toktest.py b/pint/toktest.py new file mode 100644 index 000000000..e0026a21d --- /dev/null +++ b/pint/toktest.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +import tokenize + +from pint.pint_eval import _plain_tokenizer, uncertainty_tokenizer + +tokenizer = _plain_tokenizer + +input_lines = [ + "( 8.0 + / - 4.0 ) e6 m", + "( 8.0 ± 4.0 ) e6 m", + "( 8.0 + / - 4.0 ) e-6 m", + "( nan + / - 0 ) e6 m", + "( nan ± 4.0 ) m", + "8.0 + / - 4.0 m", + "8.0 ± 4.0 m", + "8.0(4)m", + "8.0(.4)m", + "8.0(-4)m", # error! + "pint == wonderfulness ^ N + - + / - * ± m J s", +] + +for line in input_lines: + result = [] + g = list(uncertainty_tokenizer(line)) # tokenize the string + for toknum, tokval, _, _, _ in g: + result.append((toknum, tokval)) + + print("====") + print(f"input line: {line}") + print(result) + print(tokenize.untokenize(result)) diff --git a/pint/util.py b/pint/util.py index 3d0017521..796bc20da 100644 --- a/pint/util.py +++ b/pint/util.py @@ -10,55 +10,86 @@ from __future__ import annotations -import functools -import inspect import logging import math import operator import re -from collections.abc import Mapping +import tokenize +import types +from collections import deque +from collections.abc import Callable, Generator, Hashable, Iterable, Iterator, Mapping from fractions import Fraction from functools import lru_cache, partial from logging import NullHandler from numbers import Number from token import NAME, NUMBER -from typing import TYPE_CHECKING, ClassVar, Optional, Type, Union - -from .compat import NUMERIC_TYPES, tokenizer +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + TypeAlias, + TypeVar, +) + +from . import pint_eval +from ._typing import Scalar +from .compat import NUMERIC_TYPES, Self from .errors import DefinitionSyntaxError -from .formatting import format_unit from .pint_eval import build_eval_tree if TYPE_CHECKING: - from pint import Quantity, UnitRegistry + from ._typing import QuantityOrUnitLike + from .registry import UnitRegistry - from ._typing import UnitLike logger = logging.getLogger(__name__) logger.addHandler(NullHandler()) +T = TypeVar("T") +TH = TypeVar("TH", bound=Hashable) +TT = TypeVar("TT", bound=type) + +ItMatrix: TypeAlias = Iterable[Iterable[Scalar]] +Matrix: TypeAlias = list[list[Scalar]] + + +def _noop(x: T) -> T: + return x + def matrix_to_string( - matrix, row_headers=None, col_headers=None, fmtfun=lambda x: str(int(x)) -): - """Takes a 2D matrix (as nested list) and returns a string. + matrix: ItMatrix, + row_headers: Iterable[str] | None = None, + col_headers: Iterable[str] | None = None, + fmtfun: Callable[ + [ + Scalar, + ], + str, + ] = "{:0.0f}".format, +) -> str: + """Return a string representation of a matrix. Parameters ---------- - matrix : - - row_headers : - (Default value = None) - col_headers : - (Default value = None) - fmtfun : - (Default value = lambda x: str(int(x))) + matrix + A matrix given as an iterable of an iterable of numbers. + row_headers + An iterable of strings to serve as row headers. + (default = None, meaning no row headers are printed.) + col_headers + An iterable of strings to serve as column headers. + (default = None, meaning no col headers are printed.) + fmtfun + A callable to convert a number into string. + (default = `"{:0.0f}".format`) Returns ------- - + str + String representation of the matrix. """ - ret = [] + ret: list[str] = [] if col_headers: ret.append(("\t" if row_headers else "") + "\t".join(col_headers)) if row_headers: @@ -72,99 +103,129 @@ def matrix_to_string( return "\n".join(ret) -def transpose(matrix): - """Takes a 2D matrix (as nested list) and returns the transposed version. +def transpose(matrix: ItMatrix) -> Matrix: + """Return the transposed version of a matrix. Parameters ---------- - matrix : - + matrix + A matrix given as an iterable of an iterable of numbers. Returns ------- - + Matrix + The transposed version of the matrix. """ return [list(val) for val in zip(*matrix)] -def column_echelon_form(matrix, ntype=Fraction, transpose_result=False): - """Calculates the column echelon form using Gaussian elimination. +def matrix_apply( + matrix: ItMatrix, + func: Callable[ + [ + Scalar, + ], + Scalar, + ], +) -> Matrix: + """Apply a function to individual elements within a matrix. Parameters ---------- - matrix : - a 2D matrix as nested list. - ntype : - the numerical type to use in the calculation. (Default value = Fraction) - transpose_result : - indicates if the returned matrix should be transposed. (Default value = False) + matrix + A matrix given as an iterable of an iterable of numbers. + func + A callable that converts a number to another. Returns ------- - type - column echelon form, transformed identity matrix, swapped rows - + A new matrix in which each element has been replaced by new one. """ - lead = 0 + return [[func(x) for x in row] for row in matrix] - M = transpose(matrix) - _transpose = transpose if transpose_result else lambda x: x +def column_echelon_form( + matrix: ItMatrix, ntype: type = Fraction, transpose_result: bool = False +) -> tuple[Matrix, Matrix, list[int]]: + """Calculate the column echelon form using Gaussian elimination. - rows, cols = len(M), len(M[0]) + Parameters + ---------- + matrix + A 2D matrix as nested list. + ntype + The numerical type to use in the calculation. + (default = Fraction) + transpose_result + Indicates if the returned matrix should be transposed. + (default = False) - new_M = [] - for row in M: - r = [] - for x in row: - if isinstance(x, float): - x = ntype.from_float(x) - else: - x = ntype(x) - r.append(x) - new_M.append(r) - M = new_M + Returns + ------- + ech_matrix + Column echelon form. + id_matrix + Transformed identity matrix. + swapped + Swapped rows. + """ + + _transpose: Callable[ + [ + ItMatrix, + ], + Matrix, + ] = transpose if transpose_result else _noop + ech_matrix = matrix_apply( + transpose(matrix), + lambda x: ntype.from_float(x) if isinstance(x, float) else ntype(x), # type: ignore + ) + + rows, cols = len(ech_matrix), len(ech_matrix[0]) # M = [[ntype(x) for x in row] for row in M] - I = [ # noqa: E741 + id_matrix: list[list[Scalar]] = [ # noqa: E741 [ntype(1) if n == nc else ntype(0) for nc in range(rows)] for n in range(rows) ] - swapped = [] + swapped: list[int] = [] + lead = 0 for r in range(rows): if lead >= cols: - return _transpose(M), _transpose(I), swapped - i = r - while M[i][lead] == 0: - i += 1 - if i != rows: + return _transpose(ech_matrix), _transpose(id_matrix), swapped + s = r + while ech_matrix[s][lead] == 0: # type: ignore + s += 1 + if s != rows: continue - i = r + s = r lead += 1 if cols == lead: - return _transpose(M), _transpose(I), swapped + return _transpose(ech_matrix), _transpose(id_matrix), swapped - M[i], M[r] = M[r], M[i] - I[i], I[r] = I[r], I[i] + ech_matrix[s], ech_matrix[r] = ech_matrix[r], ech_matrix[s] + id_matrix[s], id_matrix[r] = id_matrix[r], id_matrix[s] - swapped.append(i) - lv = M[r][lead] - M[r] = [mrx / lv for mrx in M[r]] - I[r] = [mrx / lv for mrx in I[r]] + swapped.append(s) + lv = ech_matrix[r][lead] + ech_matrix[r] = [mrx / lv for mrx in ech_matrix[r]] + id_matrix[r] = [mrx / lv for mrx in id_matrix[r]] - for i in range(rows): - if i == r: + for s in range(rows): + if s == r: continue - lv = M[i][lead] - M[i] = [iv - lv * rv for rv, iv in zip(M[r], M[i])] - I[i] = [iv - lv * rv for rv, iv in zip(I[r], I[i])] + lv = ech_matrix[s][lead] + ech_matrix[s] = [ + iv - lv * rv for rv, iv in zip(ech_matrix[r], ech_matrix[s]) + ] + id_matrix[s] = [iv - lv * rv for rv, iv in zip(id_matrix[r], id_matrix[s])] lead += 1 - return _transpose(M), _transpose(I), swapped + return _transpose(ech_matrix), _transpose(id_matrix), swapped -def pi_theorem(quantities, registry=None): +def pi_theorem(quantities: dict[str, Any], registry: UnitRegistry | None = None): """Builds dimensionless quantities using the Buckingham π theorem Parameters @@ -172,7 +233,7 @@ def pi_theorem(quantities, registry=None): quantities : dict mapping between variable name and units registry : - (Default value = None) + (default value = None) Returns ------- @@ -186,7 +247,7 @@ def pi_theorem(quantities, registry=None): dimensions = set() if registry is None: - getdim = lambda x: x + getdim = _noop non_int_type = float else: getdim = registry.get_dimensionality @@ -214,33 +275,35 @@ def pi_theorem(quantities, registry=None): dimensions = list(dimensions) # Calculate dimensionless quantities - M = [ + matrix = [ [dimensionality[dimension] for name, dimensionality in quant] for dimension in dimensions ] - M, identity, pivot = column_echelon_form(M, transpose_result=False) + ech_matrix, id_matrix, pivot = column_echelon_form(matrix, transpose_result=False) # Collect results # Make all numbers integers and minimize the number of negative exponents. # Remove zeros results = [] - for rowm, rowi in zip(M, identity): + for rowm, rowi in zip(ech_matrix, id_matrix): if any(el != 0 for el in rowm): continue max_den = max(f.denominator for f in rowi) neg = -1 if sum(f < 0 for f in rowi) > sum(f > 0 for f in rowi) else 1 results.append( - dict( - (q[0], neg * f.numerator * max_den / f.denominator) + { + q[0]: neg * f.numerator * max_den / f.denominator for q, f in zip(quant, rowi) if f.numerator != 0 - ) + } ) return results -def solve_dependencies(dependencies): +def solve_dependencies( + dependencies: dict[TH, set[TH]], +) -> Generator[set[TH]]: """Solve a dependency graph. Parameters @@ -249,12 +312,16 @@ def solve_dependencies(dependencies): dependency dictionary. For each key, the value is an iterable indicating its dependencies. - Returns - ------- - type + Yields + ------ + set iterator of sets, each containing keys of independents tasks dependent only of the previous tasks in the list. + Raises + ------ + ValueError + if a cyclic dependency is found. """ while dependencies: # values not in keys (items without dep) @@ -273,23 +340,68 @@ def solve_dependencies(dependencies): yield t -def find_shortest_path(graph, start, end, path=None): - path = (path or []) + [start] +def find_shortest_path( + graph: dict[TH, set[TH]], start: TH, end: TH +): + """Find shortest path between two nodes within a graph. + + Parameters + ---------- + graph + A graph given as a mapping of nodes + to a set of all connected nodes to it. + start + Starting node. + end + End node. + + Returns + ------- + list[TH] + The shortest path between two nodes. + """ + path = [start] if start == end: return path - if start not in graph: - return None - shortest = None - for node in graph[start]: - if node not in path: - newpath = find_shortest_path(graph, node, end, path) - if newpath: - if not shortest or len(newpath) < len(shortest): - shortest = newpath - return shortest + + fifo = deque() + fifo.append((start, path)) + visited = set() + while fifo: + node, path = fifo.popleft() + visited.add(node) + for adjascent_node in graph[node] - visited: + if adjascent_node == end: + return path + [adjascent_node] + else: + fifo.append((adjascent_node, path + [adjascent_node])) + + return None -def find_connected_nodes(graph, start, visited=None): +def find_connected_nodes( + graph: dict[TH, set[TH]], start: TH, visited: set[TH] | None = None +) -> set[TH] | None: + """Find all nodes connected to a start node within a graph. + + Parameters + ---------- + graph + A graph given as a mapping of nodes + to a set of all connected nodes to it. + start + Starting node. + visited + Mutable set to collect visited nodes. + (default = None, empty set) + + Returns + ------- + set[TH] + The shortest path between two nodes. + """ + + # TODO: raise ValueError when start not in graph if start not in graph: return None @@ -303,41 +415,45 @@ def find_connected_nodes(graph, start, visited=None): return visited -class udict(dict): +class udict(dict[str, Scalar]): """Custom dict implementing __missing__.""" - def __missing__(self, key): + def __missing__(self, key: str): return 0 - def copy(self): + def copy(self: Self) -> Self: return udict(self) -class UnitsContainer(Mapping): +class UnitsContainer(Mapping[str, Scalar]): """The UnitsContainer stores the product of units and their respective exponent and implements the corresponding operations. UnitsContainer is a read-only mapping. All operations (even in place ones) + return new instances. Parameters ---------- - - Returns - ------- - type - - + non_int_type + Numerical type used for non integer values. """ __slots__ = ("_d", "_hash", "_one", "_non_int_type") - def __init__(self, *args, **kwargs) -> None: + _d: udict + _hash: int | None + _one: Scalar + _non_int_type: type + + def __init__( + self, *args: Any, non_int_type: type | None = None, **kwargs: Any + ) -> None: if args and isinstance(args[0], UnitsContainer): default_non_int_type = args[0]._non_int_type else: default_non_int_type = float - self._non_int_type = kwargs.pop("non_int_type", default_non_int_type) + self._non_int_type = non_int_type or default_non_int_type if self._non_int_type is float: self._one = 1 @@ -348,18 +464,34 @@ def __init__(self, *args, **kwargs) -> None: self._d = d for key, value in d.items(): if not isinstance(key, str): - raise TypeError("key must be a str, not {}".format(type(key))) + raise TypeError(f"key must be a str, not {type(key)}") if not isinstance(value, Number): - raise TypeError("value must be a number, not {}".format(type(value))) + raise TypeError(f"value must be a number, not {type(value)}") if not isinstance(value, int) and not isinstance(value, self._non_int_type): d[key] = self._non_int_type(value) self._hash = None - def copy(self): + def copy(self: Self) -> Self: + """Create a copy of this UnitsContainer.""" return self.__copy__() - def add(self, key, value): - newval = self._d[key] + value + def add(self: Self, key: str, value: Number) -> Self: + """Create a new UnitsContainer adding value to + the value existing for a given key. + + Parameters + ---------- + key + unit to which the value will be added. + value + value to be added. + + Returns + ------- + UnitsContainer + A copy of this container. + """ + newval = self._d[key] + self._normalize_nonfloat_value(value) new = self.copy() if newval: new._d[key] = newval @@ -368,17 +500,18 @@ def add(self, key, value): new._hash = None return new - def remove(self, keys): - """Create a new UnitsContainer purged from given keys. + def remove(self: Self, keys: Iterable[str]) -> Self: + """Create a new UnitsContainer purged from given entries. Parameters ---------- - keys : - + keys + Iterable of keys (units) to remove. Returns ------- - + UnitsContainer + A copy of this container. """ new = self.copy() for k in keys: @@ -386,51 +519,55 @@ def remove(self, keys): new._hash = None return new - def rename(self, oldkey, newkey): + def rename(self: Self, oldkey: str, newkey: str) -> Self: """Create a new UnitsContainer in which an entry has been renamed. Parameters ---------- - oldkey : - - newkey : - + oldkey + Existing key (unit). + newkey + New key (unit). Returns ------- - + UnitsContainer + A copy of this container. """ new = self.copy() new._d[newkey] = new._d.pop(oldkey) new._hash = None return new - def __iter__(self): + def unit_items(self) -> Iterable[tuple[str, Scalar]]: + return self._d.items() + + def __iter__(self) -> Iterator[str]: return iter(self._d) def __len__(self) -> int: return len(self._d) - def __getitem__(self, key): + def __getitem__(self, key: str) -> Scalar: return self._d[key] - def __contains__(self, key): + def __contains__(self, key: str) -> bool: return key in self._d - def __hash__(self): + def __hash__(self) -> int: if self._hash is None: self._hash = hash(frozenset(self._d.items())) return self._hash # Only needed by pickle protocol 0 and 1 (used by pytables) - def __getstate__(self): + def __getstate__(self) -> tuple[udict, Scalar, type]: return self._d, self._one, self._non_int_type - def __setstate__(self, state): + def __setstate__(self, state: tuple[udict, Scalar, type]): self._d, self._one, self._non_int_type = state self._hash = None - def __eq__(self, other) -> bool: + def __eq__(self, other: Any) -> bool: if isinstance(other, UnitsContainer): # UnitsContainer.__hash__(self) is not the same as hash(self); see # ParserHelper.__hash__ and __eq__. @@ -456,14 +593,20 @@ def __str__(self) -> str: def __repr__(self) -> str: tmp = "{%s}" % ", ".join( - ["'{}': {}".format(key, value) for key, value in sorted(self._d.items())] + [f"'{key}': {value}" for key, value in sorted(self._d.items())] ) - return "".format(tmp) + return f"" def __format__(self, spec: str) -> str: + # TODO: provisional + from .formatting import format_unit + return format_unit(self, spec) def format_babel(self, spec: str, registry=None, **kwspec) -> str: + # TODO: provisional + from .formatting import format_unit + return format_unit(self, spec, registry=registry, **kwspec) def __copy__(self): @@ -475,7 +618,7 @@ def __copy__(self): out._one = self._one return out - def __mul__(self, other): + def __mul__(self, other: Any): if not isinstance(other, self.__class__): err = "Cannot multiply UnitsContainer by {}" raise TypeError(err.format(type(other))) @@ -491,7 +634,7 @@ def __mul__(self, other): __rmul__ = __mul__ - def __pow__(self, other): + def __pow__(self, other: Any): if not isinstance(other, NUMERIC_TYPES): err = "Cannot power UnitsContainer by {}" raise TypeError(err.format(type(other))) @@ -502,84 +645,88 @@ def __pow__(self, other): new._hash = None return new - def __truediv__(self, other): + def __truediv__(self, other: Any): if not isinstance(other, self.__class__): err = "Cannot divide UnitsContainer by {}" raise TypeError(err.format(type(other))) new = self.copy() for key, value in other.items(): - new._d[key] -= value + new._d[key] -= self._normalize_nonfloat_value(value) if new._d[key] == 0: del new._d[key] new._hash = None return new - def __rtruediv__(self, other): + def __rtruediv__(self, other: Any): if not isinstance(other, self.__class__) and other != 1: err = "Cannot divide {} by UnitsContainer" raise TypeError(err.format(type(other))) return self**-1 + def _normalize_nonfloat_value(self, value: Scalar) -> Scalar: + if not isinstance(value, int) and not isinstance(value, self._non_int_type): + return self._non_int_type(value) # type: ignore[no-any-return] + return value + class ParserHelper(UnitsContainer): """The ParserHelper stores in place the product of variables and their respective exponent and implements the corresponding operations. + It also provides a scaling factor. + + For example: + `3 * m ** 2` becomes ParserHelper(3, m=2) + + Briefly is a UnitsContainer with a scaling factor. ParserHelper is a read-only mapping. All operations (even in place ones) + return new instances. + + WARNING : The hash value used does not take into account the scale + attribute so be careful if you use it as a dict key and then two unequal + object can have the same hash. Parameters ---------- - - Returns - ------- - type - WARNING : The hash value used does not take into account the scale - attribute so be careful if you use it as a dict key and then two unequal - object can have the same hash. - + scale + Scaling factor. + (default = 1) + **kwargs + Used to populate the dict of units and exponents. """ __slots__ = ("scale",) - def __init__(self, scale=1, *args, **kwargs): + scale: Scalar + + def __init__(self, scale: Scalar = 1, *args, **kwargs): super().__init__(*args, **kwargs) self.scale = scale @classmethod - def from_word(cls, input_word, non_int_type=float): + def from_word(cls, input_word: str, non_int_type: type = float) -> ParserHelper: """Creates a ParserHelper object with a single variable with exponent one. - Equivalent to: ParserHelper({'word': 1}) + Equivalent to: ParserHelper(1, {input_word: 1}) Parameters ---------- - input_word : - - - Returns - ------- + input_word + non_int_type + Numerical type used for non integer values. """ if non_int_type is float: return cls(1, [(input_word, 1)], non_int_type=non_int_type) else: - ONE = non_int_type("1.0") + ONE = non_int_type("1") return cls(ONE, [(input_word, ONE)], non_int_type=non_int_type) @classmethod - def eval_token(cls, token, use_decimal=False, non_int_type=float): - - # TODO: remove this code when use_decimal is deprecated - if use_decimal: - raise DeprecationWarning( - "`use_decimal` is deprecated, use `non_int_type` keyword argument when instantiating the registry.\n" - ">>> from decimal import Decimal\n" - ">>> ureg = UnitRegistry(non_int_type=Decimal)" - ) - + def eval_token(cls, token: tokenize.TokenInfo, non_int_type: type = float): token_type = token.type token_text = token.string if token_type == NUMBER: @@ -596,18 +743,16 @@ def eval_token(cls, token, use_decimal=False, non_int_type=float): raise Exception("unknown token type") @classmethod - @lru_cache() - def from_string(cls, input_string, non_int_type=float): + @lru_cache + def from_string(cls, input_string: str, non_int_type: type = float) -> ParserHelper: """Parse linear expression mathematical units and return a quantity object. Parameters ---------- - input_string : - - - Returns - ------- + input_string + non_int_type + Numerical type used for non integer values. """ if not input_string: return cls(non_int_type=non_int_type) @@ -621,7 +766,7 @@ def from_string(cls, input_string, non_int_type=float): else: reps = False - gen = tokenizer(input_string) + gen = pint_eval.tokenizer(input_string) ret = build_eval_tree(gen).evaluate( partial(cls.eval_token, non_int_type=non_int_type) ) @@ -668,17 +813,17 @@ def __setstate__(self, state): super().__setstate__(state[:-1]) self.scale = state[-1] - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ParserHelper): return self.scale == other.scale and super().__eq__(other) elif isinstance(other, str): return self == ParserHelper.from_string(other, self._non_int_type) elif isinstance(other, Number): return self.scale == other and not len(self._d) - else: - return self.scale == 1 and super().__eq__(other) - def operate(self, items, op=operator.iadd, cleanup=True): + return self.scale == 1 and super().__eq__(other) + + def operate(self, items, op=operator.iadd, cleanup: bool = True): d = udict(self._d) for key, value in items: d[key] = op(d[key], value) @@ -692,15 +837,15 @@ def operate(self, items, op=operator.iadd, cleanup=True): def __str__(self): tmp = "{%s}" % ", ".join( - ["'{}': {}".format(key, value) for key, value in sorted(self._d.items())] + [f"'{key}': {value}" for key, value in sorted(self._d.items())] ) - return "{} {}".format(self.scale, tmp) + return f"{self.scale} {tmp}" def __repr__(self): tmp = "{%s}" % ", ".join( - ["'{}': {}".format(key, value) for key, value in sorted(self._d.items())] + [f"'{key}': {value}" for key, value in sorted(self._d.items())] ) - return "".format(self.scale, tmp) + return f"" def __mul__(self, other): if isinstance(other, str): @@ -754,7 +899,8 @@ def __rtruediv__(self, other): #: List of regex substitution pairs. _subs_re_list = [ - ("\N{DEGREE SIGN}", " degree"), + (r"Δ°", "Δdeg"), # needs to be before the "degree" rule + ("\N{DEGREE SIGN}", "degree"), (r"([\w\.\-\+\*\\\^])\s+", r"\1 "), # merge multiple spaces (r"({}) squared", r"\1**2"), # Handle square and cube (r"({}) cubed", r"\1**3"), @@ -765,7 +911,7 @@ def __rtruediv__(self, other): r"\b([0-9]+\.?[0-9]*)(?=[e|E][a-zA-Z]|[a-df-zA-DF-Z])", r"\1*", ), # Handle numberLetter for multiplication - (r"([\w\.\-])\s+(?=\w)", r"\1*"), # Handle space for multiplication + (r"([\w\.\)])\s+(?=[\w\(])", r"\1*"), # Handle space for multiplication ] #: Compiles the regex and replace {} by a regex that matches an identifier. @@ -823,21 +969,22 @@ def __new__(cls, *args, **kwargs): inst._REGISTRY = application_registry.get() return inst - def _check(self, other) -> bool: + def _check(self, other: Any) -> bool: """Check if the other object use a registry and if so that it is the same registry. Parameters ---------- - other : - + other Returns ------- - type - other don't use a registry and raise ValueError if other don't use the - same unit registry. + bool + Raises + ------ + ValueError + if other don't use the same unit registry. """ if self._REGISTRY is getattr(other, "_REGISTRY", None): return True @@ -856,40 +1003,44 @@ class PrettyIPython: default_format: str - def _repr_html_(self): - if "~" in self.default_format: - return "{:~H}".format(self) - else: - return "{:H}".format(self) + def _repr_html_(self) -> str: + if "~" in self._REGISTRY.formatter.default_format: + return f"{self:~H}" + return f"{self:H}" - def _repr_latex_(self): - if "~" in self.default_format: - return "${:~L}$".format(self) - else: - return "${:L}$".format(self) + def _repr_latex_(self) -> str: + if "~" in self._REGISTRY.formatter.default_format: + return f"${self:~L}$" + return f"${self:L}$" - def _repr_pretty_(self, p, cycle): - if "~" in self.default_format: - p.text("{:~P}".format(self)) + def _repr_pretty_(self, p, cycle: bool): + # if cycle: + if "~" in self._REGISTRY.formatter.default_format: + p.text(f"{self:~P}") else: - p.text("{:P}".format(self)) + p.text(f"{self:P}") + # else: + # p.pretty(self.magnitude) + # p.text(" ") + # p.pretty(self.units) def to_units_container( - unit_like: Union[UnitLike, Quantity], registry: Optional[UnitRegistry] = None + unit_like: QuantityOrUnitLike, registry: UnitRegistry | None = None ) -> UnitsContainer: """Convert a unit compatible type to a UnitsContainer. Parameters ---------- - unit_like : - - registry : - (Default value = None) + unit_like + Quantity or Unit to infer the plain units from. + registry + If provided, uses the registry's UnitsContainer and parse_unit_name. If None, + uses the registry attached to unit_like. Returns ------- - + UnitsContainer """ mro = type(unit_like).mro() if UnitsContainer in mro: @@ -898,7 +1049,10 @@ def to_units_container( return unit_like._units elif str in mro: if registry: - return registry._parse_units(unit_like) + # TODO: document how to whether to lift preprocessing loop out to caller + for p in registry.preprocessors: + unit_like = p(unit_like) + return registry.parse_units_as_container(unit_like) else: return ParserHelper.from_string(unit_like) elif dict in mro: @@ -909,17 +1063,16 @@ def to_units_container( def infer_base_unit( - unit_like: Union[UnitLike, Quantity], registry: Optional[UnitRegistry] = None + unit_like: QuantityOrUnitLike, registry: UnitRegistry | None = None ) -> UnitsContainer: """ Given a Quantity or UnitLike, give the UnitsContainer for it's plain units. Parameters ---------- - unit_like : Union[UnitLike, Quantity] + unit_like Quantity or Unit to infer the plain units from. - - registry: Optional[UnitRegistry] + registry If provided, uses the registry's UnitsContainer and parse_unit_name. If None, uses the registry attached to unit_like. @@ -954,7 +1107,7 @@ def infer_base_unit( return registry.UnitsContainer(nonzero_dict) -def getattr_maybe_raise(self, item): +def getattr_maybe_raise(obj: Any, item: str): """Helper function invoked at start of all overridden ``__getattr__``. Raise AttributeError if the user tries to ask for a _ or __ attribute, @@ -963,39 +1116,25 @@ def getattr_maybe_raise(self, item): Parameters ---------- - item : string - Item to be found. - - - Returns - ------- + item + attribute to be found. + Raises + ------ + AttributeError """ # Double-underscore attributes are tricky to detect because they are - # automatically prefixed with the class name - which may be a subclass of self + # automatically prefixed with the class name - which may be a subclass of obj if ( item.endswith("__") or len(item.lstrip("_")) == 0 or (item.startswith("_") and not item.lstrip("_")[0].isdigit()) ): - raise AttributeError("%r object has no attribute %r" % (self, item)) - - -def iterable(y) -> bool: - """Check whether or not an object can be iterated over. - - Vendored from numpy under the terms of the BSD 3-Clause License. (Copyright - (c) 2005-2019, NumPy Developers.) + raise AttributeError(f"{obj!r} object has no attribute {item!r}") - Parameters - ---------- - value : - Input object. - type : - object - y : - """ +def iterable(y: Any) -> bool: + """Check whether or not an object can be iterated over.""" try: iter(y) except TypeError: @@ -1003,18 +1142,8 @@ def iterable(y) -> bool: return True -def sized(y) -> bool: - """Check whether or not an object has a defined length. - - Parameters - ---------- - value : - Input object. - type : - object - y : - - """ +def sized(y: Any) -> bool: + """Check whether or not an object has a defined length.""" try: len(y) except TypeError: @@ -1022,37 +1151,20 @@ def sized(y) -> bool: return True -@functools.lru_cache( - maxsize=None -) # TODO: replace with cache when Python 3.8 is dropped. -def _build_type(class_name: str, bases): - return type(class_name, bases, dict()) - - -def build_dependent_class(registry_class, class_name: str, attribute_name: str) -> Type: - """Creates a class specifically for the given registry that - subclass all the classes named by the registry bases in a - specific attribute - - 1. List the 'attribute_name' attribute for each of the bases of the registry class. - 2. Use this list as bases for the new class - 3. Add the provided registry as the class registry. - - """ - bases = ( - getattr(base, attribute_name) - for base in inspect.getmro(registry_class) - if attribute_name in base.__dict__ - ) - bases = tuple(dict.fromkeys(bases, None).keys()) - if len(bases) == 1 and bases[0].__name__ == class_name: - return bases[0] - return _build_type(class_name, bases) - - -def create_class_with_registry(registry, base_class) -> Type: +def create_class_with_registry( + registry: UnitRegistry, base_class: type[TT] +) -> type[TT]: """Create new class inheriting from base_class and filling _REGISTRY class attribute with an actual instanced registry. """ - return type(base_class.__name__, tuple((base_class,)), dict(_REGISTRY=registry)) + class_body = { + "__module__": "pint", + "_REGISTRY": registry, + } + + return types.new_class( + base_class.__name__, + bases=(base_class,), + exec_body=lambda ns: ns.update(class_body), + ) diff --git a/pyproject.toml b/pyproject.toml index 771af682d..ba30d56ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,217 @@ +[project] +name = "Pint" +authors = [{ name = "Hernan E. Grecco", email = "hernan.grecco@gmail.com" }, { name = "Valispace", email = "support@valispace.com"}] +dynamic = ["version"] +license = { text = "BSD" } +description = "Physical quantities module" +readme = "README.rst" +maintainers = [ + { name = "Hernan E. Grecco", email = "hernan.grecco@gmail.com" }, + { name = "Jules Chéron", email = "julescheron@gmail.com" }, + { name = "Valispace", email = "support@valispace.com"}, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: BSD License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python", + "Topic :: Scientific/Engineering", + "Topic :: Software Development :: Libraries", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] +requires-python = ">=3.11" +keywords = ["physical", "quantities", "unit", "conversion", "science"] +dependencies = [ + "platformdirs>=2.1.0", + "typing_extensions>=4.0.0", + "flexcache>=0.3", + "flexparser>=0.4", +] + +[tool.hatch.build] +packages = ["pint"] +include = ["pint/default_en.txt", "pint/constants_en.txt"] + +[project.optional-dependencies] +test = ["pytest", "pytest-cov", "pytest-subtests", "pytest-benchmark"] +test-mpl = ["pytest-mpl"] +test-all = ["pint[test, test-mpl]"] +codspeed = ["pint[test-all]", "pytest-codspeed"] +numpy = ["numpy >= 1.23"] +uncertainties = ["uncertainties >= 3.1.6"] +babel = ["babel <= 2.8"] +pandas = ["pint-pandas >= 0.3"] +xarray = ["xarray"] +dask = ["dask"] +mip = ["mip >= 1.13"] +matplotlib = ["matplotlib"] +all = [ + "pint[numpy,uncertainties,babel,pandas,pandas,xarray,dask,mip,matplotlib]", +] +docs = [ + "sphinx>=6,<8.2", + "ipython<=8.12", + "nbsphinx", + "jupyter_client", + "ipykernel", + "graphviz", + "pooch", + "sparse", + "Serialize", + "pygments>=2.4", + "sphinx-book-theme>=1.1.0", + "sphinx_copybutton", + "sphinx_design", + "docutils", #==0.14", + "commonmark==0.8.1", + "recommonmark==0.5.0", + "babel", + "sciform", +] + +[project.urls] +Homepage = "https://github.com/hgrecco/pint" +Documentation = "https://pint.readthedocs.io/" + [build-system] -requires = ["setuptools>=41", "wheel", "setuptools_scm[toml]>=3.4.3"] -build-backend = "setuptools.build_meta" +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[tool.hatch.version] +source = "vcs" + +[tool.uv] +cache-keys = [{ file = "pyproject.toml" }, { git = true }] + +[tool.pytest.ini_options] +addopts = "--import-mode=importlib" +xfail_strict = true +pythonpath = "." + +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint] +extend-select = ["I"] + +[tool.pixi.project] +channels = ["https://fast.prefix.dev/conda-forge"] +platforms = ['osx-arm64', 'linux-64', 'win-64'] + +[tool.pixi.tasks] + +[tool.pixi.pypi-dependencies] +pint = { path = ".", editable = true } + +[tool.pixi.environments] +dev = { features = ["numpy", "dev", "py313"], solve-group = "default" } +lint = { features = ["lint"], no-default-feature = true } +build = { features = ["build"], no-default-feature = true } +typecheck = { features = ["typecheck"], solve-group = "default" } +test = { features = ["test"], solve-group = "default" } +test-all = { features = ["test-all"], solve-group = "default" } +numpy = { features = ["numpy"], solve-group = "default" } +codspeed = { features = ["codspeed"], solve-group = "default" } +docs = { features = [ + "docs", + "numpy", + "mip", + "matplotlib", + "dask", + "xarray", + "test", + "py311", +] } +# When pint[all] works in pixi, this will be easier. +all = { features = [ + "test", + "test-mpl", + "codspeed", + "numpy", + "uncertainties", + "babel", + "pandas", + "xarray", + "dask", + "mip", + "matplotlib", +], solve-group = "default" } + +test-py311 = ["test", "py311"] +test-py312 = ["test", "py312"] +test-py313 = ["test", "py313"] +test-py311-numpy = ["numpy", "test", "py311"] +test-py312-numpy = ["numpy", "test", "py312"] +test-py313-numpy = ["numpy", "test", "py313"] +test-py311-all = ["all", "test", "py311"] +test-py312-all = ["all", "test", "py312"] +test-py313-all = ["all", "test", "py313"] + +[tool.pixi.feature.dev.dependencies] +tomlkit = "*" + +[tool.pixi.feature.lint.dependencies] +pre-commit = "*" +pre-commit-hooks = "*" +taplo = "*" +ruff = "*" +mdformat = "*" +mdformat-ruff = "*" + +[tool.pixi.feature.lint.tasks] +pre-commit-install = "pre-commit install" +lint = "pre-commit run" + +[tool.pixi.feature.build.dependencies] +uv = "*" + +[tool.pixi.feature.build.tasks] +build = "uv build" +publish = "uv publish" + +[tool.pixi.feature.test.tasks] +test = "pytest --benchmark-skip" +bench = "pytest --benchmark-only" + +[tool.pixi.feature.typecheck.dependencies] +pyright = "*" +pip = "*" + +[tool.pixi.feature.typecheck.tasks] +typecheck = "pyright" + +[tool.pixi.feature.docs.pypi-dependencies] +pint = { path = ".", editable = true } + +[tool.pixi.feature.docs.tasks] +docbuild = "sphinx-build -n -j auto -b html -d build/doctrees docs build/html" +doctest = "sphinx-build -a -j auto -b doctest -d build/doctrees docs build/doctest" + +[tool.pixi.feature.docs.dependencies] +pandoc = "*" + +[tool.pixi.feature.py311.dependencies] +python = "3.11.*" + +[tool.pixi.feature.py312.dependencies] +python = "3.12.*" + +[tool.pixi.feature.py313.dependencies] +python = "3.13.*" + +[tool.pyright] +include = ["pint"] +exclude = ["pint/testsuite"] -[tool.setuptools_scm] +[tool.pyright.defineConstant] +HAS_BABEL = true +HAS_UNCERTAINTIES = true +HAS_NUMPY = true +HAS_MIP = true +HAS_DASK = true diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..c62365819 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +platformdirs>=2.1.0 +typing_extensions>=4.0.0 +flexcache>=0.3 +flexparser>=0.3 diff --git a/requirements_docs.txt b/requirements_docs.txt index 683292c2d..adc93409a 100644 --- a/requirements_docs.txt +++ b/requirements_docs.txt @@ -1,12 +1,12 @@ -sphinx>4 -ipython +sphinx<8 +ipython<=8.12 matplotlib +mip>=1.13 nbsphinx numpy pytest jupyter_client ipykernel -ipython graphviz xarray pooch @@ -15,6 +15,8 @@ dask[complete] setuptools>=41.2 Serialize pygments>=2.4 -sphinx-book-theme==0.3.3 +sphinx-book-theme>=1.1.0 sphinx_copybutton sphinx_design +typing_extensions +sciform diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 887309c59..000000000 --- a/setup.cfg +++ /dev/null @@ -1,79 +0,0 @@ -[metadata] -name = Pint -author = Hernan E. Grecco -author_email = hernan.grecco@gmail.com -license = BSD -description = Physical quantities module -long_description = file: README.rst -keywords = physical, quantities, unit, conversion, science -url = https://github.com/hgrecco/pint -classifiers = - Development Status :: 4 - Beta - Intended Audience :: Developers - Intended Audience :: Science/Research - License :: OSI Approved :: BSD License - Operating System :: MacOS :: MacOS X - Operating System :: Microsoft :: Windows - Operating System :: POSIX - Programming Language :: Python - Topic :: Scientific/Engineering - Topic :: Software Development :: Libraries - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Programming Language :: Python :: 3.11 - -[options] -packages = pint -zip_safe = True -include_package_data = True -python_requires = >=3.8 -setup_requires = setuptools; setuptools_scm -scripts = pint/pint-convert - -[options.extras_require] -numpy = numpy >= 1.19.5 -uncertainties = uncertainties >= 3.1.6 -babel = babel <= 2.8 -pandas = pint-pandas >= 0.3 -xarray = xarray -dask = dask -test = - pytest - pytest-mpl - pytest-cov - pytest-subtests - packaging - -[options.package_data] -pint = default_en.txt; constants_en.txt; py.typed - -[build-system] -requires = ["setuptools", "setuptools_scm", "wheel"] - -[flake8] -ignore= - # whitespace before ':' - doesn't work well with black - E203 - E402 - # line too long - let black worry about that - E501 - # do not assign a lambda expression, use a def - E731 - # line break before binary operator - W503 -exclude= - build - -[isort] -default_section=THIRDPARTY -known_first_party=pint -multi_line_output=3 -include_trailing_comma=True -force_grid_wrap=0 -use_parentheses=True -line_length=88 - -[zest.releaser] -python-file-with-version = version.py -create-wheel = yes diff --git a/setup.py b/setup.py deleted file mode 100644 index f4f9665a5..000000000 --- a/setup.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python3 -from setuptools import setup - -if __name__ == "__main__": - setup() diff --git a/version.py b/version.py deleted file mode 100644 index c9114ddb6..000000000 --- a/version.py +++ /dev/null @@ -1,6 +0,0 @@ -# This is just for zest.releaser. Do not touch -# flake8: noqa - -# fmt: off -__version__ = '0.21.dev0' -# fmt: on