Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 28 additions & 35 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ on:
branches:
- main



# Cancel running workflows for updated PRs
# https://turso.tech/blog/simple-trick-to-save-environment-and-money-when-using-github-actions
concurrency:
Expand All @@ -26,7 +24,6 @@ concurrency:
# enforces that test run just once per OS / floatX setting.

jobs:

changes:
name: "Check for changes"
runs-on: ubuntu-latest
Expand Down Expand Up @@ -56,8 +53,7 @@ jobs:
if: ${{ needs.changes.outputs.changes == 'true' }}
strategy:
matrix:
os: [ubuntu-latest]
floatx: [float64]
linker: [cvm, numba]
python-version: ["3.14"]
test-subset:
- |
Expand Down Expand Up @@ -145,10 +141,10 @@ jobs:
tests/dims/test_model.py
fail-fast: false
runs-on: ${{ matrix.os }}
runs-on: ubuntu-latest
env:
TEST_SUBSET: ${{ matrix.test-subset }}
PYTENSOR_FLAGS: floatX=${{ matrix.floatx }}
PYTENSOR_FLAGS: linker=${{ matrix.linker }}
defaults:
run:
shell: bash -leo pipefail {0}
Expand All @@ -175,18 +171,17 @@ jobs:
- name: Upload coverage to Codecov
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
with:
token: ${{ secrets.CODECOV_TOKEN }} # use token for more robust uploads
token: ${{ secrets.CODECOV_TOKEN }}
env_vars: TEST_SUBSET
name: ${{ matrix.os }} ${{ matrix.floatx }}
name: Ubuntu py${{ matrix.python-version }} linker=${{ matrix.linker }}
fail_ci_if_error: false

windows:
needs: changes
if: ${{ needs.changes.outputs.changes == 'true' }}
strategy:
matrix:
os: [windows-latest]
floatx: [float64]
linker: [cvm, numba]
python-version: ["3.11"]
test-subset:
- tests/variational/test_approximations.py tests/variational/test_callbacks.py tests/variational/test_inference.py tests/variational/test_opvi.py tests/test_initial_point.py
Expand All @@ -195,10 +190,10 @@ jobs:
- tests/step_methods/test_metropolis.py tests/step_methods/test_slicer.py tests/step_methods/hmc/test_nuts.py tests/step_methods/test_compound.py tests/step_methods/hmc/test_hmc.py tests/step_methods/test_state.py

fail-fast: false
runs-on: ${{ matrix.os }}
runs-on: windows-latest
env:
TEST_SUBSET: ${{ matrix.test-subset }}
PYTENSOR_FLAGS: floatX=${{ matrix.floatx }}
PYTENSOR_FLAGS: linker=${{ matrix.linker }}
defaults:
run:
shell: bash -leo pipefail {0}
Expand All @@ -225,18 +220,17 @@ jobs:
- name: Upload coverage to Codecov
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
with:
token: ${{ secrets.CODECOV_TOKEN }} # use token for more robust uploads
token: ${{ secrets.CODECOV_TOKEN }}
env_vars: TEST_SUBSET
name: ${{ matrix.os }} ${{ matrix.floatx }}
name: Windows py${{ matrix.python-version }} linker=${{ matrix.linker }}
fail_ci_if_error: false

macos:
needs: changes
if: ${{ needs.changes.outputs.changes == 'true' }}
strategy:
matrix:
os: [macos-latest]
floatx: [float64]
linker: [cvm, numba]
python-version: ["3.14"]
test-subset:
- |
Expand All @@ -253,10 +247,10 @@ jobs:
tests/backends/test_zarr.py
tests/variational/test_updates.py
fail-fast: false
runs-on: ${{ matrix.os }}
runs-on: macos-latest
env:
TEST_SUBSET: ${{ matrix.test-subset }}
PYTENSOR_FLAGS: floatX=${{ matrix.floatx }}
PYTENSOR_FLAGS: linker=${{ matrix.linker }}
defaults:
run:
shell: bash -leo pipefail {0}
Expand Down Expand Up @@ -285,16 +279,15 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }} # use token for more robust uploads
env_vars: TEST_SUBSET
name: ${{ matrix.os }} ${{ matrix.floatx }}
name: MacOS py${{ matrix.python-version }} linker=${{ matrix.linker }}
fail_ci_if_error: false

alternative_backends:
needs: changes
if: ${{ needs.changes.outputs.changes == 'true' }}
strategy:
matrix:
os: [ubuntu-latest]
floatx: [float64]
linker: [cvm, numba]
# nutpie depends on PyMC, and it will require an extra release cycle to support
# the next PyMC release and therefore Python 3.14.
python-version: ["3.13"]
Expand All @@ -305,10 +298,10 @@ jobs:
tests/sampling/test_mcmc_external.py
fail-fast: false
runs-on: ${{ matrix.os }}
runs-on: ubuntu-latest
env:
TEST_SUBSET: ${{ matrix.test-subset }}
PYTENSOR_FLAGS: floatX=${{ matrix.floatx }}
PYTENSOR_FLAGS: linker=${{ matrix.linker }}
defaults:
run:
shell: bash -leo pipefail {0}
Expand Down Expand Up @@ -337,24 +330,24 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }} # use token for more robust uploads
env_vars: TEST_SUBSET
name: Alternative backend tests - ${{ matrix.os }} ${{ matrix.floatx }}
name: Alternative backends py${{ matrix.python-version }} linker=${{ matrix.linker }}
fail_ci_if_error: false

float32:
needs: changes
if: ${{ needs.changes.outputs.changes == 'true' }}
strategy:
matrix:
linker: [cvm, numba]
os: [windows-latest]
floatx: [float32]
python-version: ["3.14"]
test-subset:
- tests/sampling/test_mcmc.py tests/ode/test_ode.py tests/ode/test_utils.py tests/distributions/test_transform.py
- tests/sampling/test_mcmc.py tests/ode/test_ode.py tests/ode/test_utils.py tests/distributions/test_transform.py
fail-fast: false
runs-on: ${{ matrix.os }}
env:
TEST_SUBSET: ${{ matrix.test-subset }}
PYTENSOR_FLAGS: floatX=${{ matrix.floatx }}
PYTENSOR_FLAGS: floatX=float32
defaults:
run:
shell: bash -leo pipefail {0}
Expand Down Expand Up @@ -383,19 +376,19 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }} # use token for more robust uploads
env_vars: TEST_SUBSET
name: ${{ matrix.os }} ${{ matrix.floatx }}
name: float32 ${{ matrix.os }} py${{ matrix.python-version }} linker=${{ matrix.linker }}
fail_ci_if_error: false

all_tests:
if: ${{ always() }}
runs-on: ubuntu-latest
needs: [ changes, ubuntu, windows, macos, alternative_backends, float32 ]
needs: [changes, ubuntu, windows, macos, alternative_backends, float32]
steps:
- name: Check build matrix status
if: ${{ needs.changes.outputs.changes == 'true' &&
( needs.ubuntu.result != 'success' ||
needs.windows.result != 'success' ||
needs.macos.result != 'success' ||
needs.alternative_backends.result != 'success' ||
needs.float32.result != 'success' ) }}
( needs.ubuntu.result != 'success' ||
needs.windows.result != 'success' ||
needs.macos.result != 'success' ||
needs.alternative_backends.result != 'success' ||
needs.float32.result != 'success' ) }}
run: exit 1
41 changes: 23 additions & 18 deletions pymc/dims/distributions/scalar.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import pytensor.xtensor as ptx
import pytensor.xtensor.random as pxr
import pytensor.xtensor.random as ptxr

from pytensor.xtensor import as_xtensor

Expand All @@ -23,7 +23,7 @@
)
from pymc.distributions.continuous import Beta as RegularBeta
from pymc.distributions.continuous import Gamma as RegularGamma
from pymc.distributions.continuous import HalfStudentTRV, flat, halfflat
from pymc.distributions.continuous import HalfCauchyRV, HalfStudentTRV, flat, halfflat


def _get_sigma_from_either_sigma_or_tau(*, sigma, tau):
Expand All @@ -40,23 +40,23 @@ def _get_sigma_from_either_sigma_or_tau(*, sigma, tau):


class Flat(DimDistribution):
xrv_op = pxr.as_xrv(flat)
xrv_op = ptxr.as_xrv(flat)

@classmethod
def dist(cls, **kwargs):
return super().dist([], **kwargs)


class HalfFlat(PositiveDimDistribution):
xrv_op = pxr.as_xrv(halfflat, [], ())
xrv_op = ptxr.as_xrv(halfflat, [], ())

@classmethod
def dist(cls, **kwargs):
return super().dist([], **kwargs)


class Normal(DimDistribution):
xrv_op = pxr.normal
xrv_op = ptxr.normal

@classmethod
def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs):
Expand All @@ -65,7 +65,7 @@ def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs):


class HalfNormal(PositiveDimDistribution):
xrv_op = pxr.halfnormal
xrv_op = ptxr.halfnormal

@classmethod
def dist(cls, sigma=None, *, tau=None, **kwargs):
Expand All @@ -74,7 +74,7 @@ def dist(cls, sigma=None, *, tau=None, **kwargs):


class LogNormal(PositiveDimDistribution):
xrv_op = pxr.lognormal
xrv_op = ptxr.lognormal

@classmethod
def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs):
Expand All @@ -83,7 +83,7 @@ def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs):


class StudentT(DimDistribution):
xrv_op = pxr.t
xrv_op = ptxr.t

@classmethod
def dist(cls, nu, mu=0, sigma=None, *, lam=None, **kwargs):
Expand All @@ -102,28 +102,33 @@ def xrv_op(self, nu, sigma, core_dims=None, extra_dims=None, rng=None):
nu = as_xtensor(nu)
sigma = as_xtensor(sigma)
core_rv = HalfStudentTRV.rv_op(nu=nu.values, sigma=sigma.values).owner.op
xop = pxr.as_xrv(core_rv)
xop = ptxr.as_xrv(core_rv)
return xop(nu, sigma, core_dims=core_dims, extra_dims=extra_dims, rng=rng)


class Cauchy(DimDistribution):
xrv_op = pxr.cauchy
xrv_op = ptxr.cauchy

@classmethod
def dist(cls, alpha, beta, **kwargs):
return super().dist([alpha, beta], **kwargs)


class HalfCauchy(PositiveDimDistribution):
xrv_op = pxr.halfcauchy

@classmethod
def dist(cls, beta, **kwargs):
return super().dist([0.0, beta], **kwargs)
return super().dist([beta], **kwargs)

@classmethod
def xrv_op(self, beta, core_dims, extra_dims=None, rng=None):
beta = as_xtensor(beta)
core_rv = HalfCauchyRV.rv_op(beta=beta.values).owner.op
xop = ptxr.as_xrv(core_rv)
return xop(beta, core_dims=core_dims, extra_dims=extra_dims, rng=rng)


class Beta(UnitDimDistribution):
xrv_op = pxr.beta
xrv_op = ptxr.beta

@classmethod
def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, nu=None, **kwargs):
Expand All @@ -132,15 +137,15 @@ def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, nu=None, **kwargs):


class Laplace(DimDistribution):
xrv_op = pxr.laplace
xrv_op = ptxr.laplace

@classmethod
def dist(cls, mu=0, b=1, **kwargs):
return super().dist([mu, b], **kwargs)


class Exponential(PositiveDimDistribution):
xrv_op = pxr.exponential
xrv_op = ptxr.exponential

@classmethod
def dist(cls, lam=None, *, scale=None, **kwargs):
Expand All @@ -154,7 +159,7 @@ def dist(cls, lam=None, *, scale=None, **kwargs):


class Gamma(PositiveDimDistribution):
xrv_op = pxr.gamma
xrv_op = ptxr.gamma

@classmethod
def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs):
Expand All @@ -173,7 +178,7 @@ def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs):


class InverseGamma(PositiveDimDistribution):
xrv_op = pxr.invgamma
xrv_op = ptxr.invgamma

@classmethod
def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs):
Expand Down
Loading
Loading