diff --git a/pytensor/graph/basic.py b/pytensor/graph/basic.py index 512f0ef3ab..6dc297e33d 100644 --- a/pytensor/graph/basic.py +++ b/pytensor/graph/basic.py @@ -270,11 +270,11 @@ def clone_with_new_inputs( # Some Ops like Alloc require the node to always be rebuilt in non-strict mode # as the output type depends on the input values and not just their types - output_type_depends_on_input_value = self.op._output_type_depends_on_input_value + data_dependent_output_shape = self.op.data_dependent_output_shape for i, (curr, new) in enumerate(zip(self.inputs, new_inputs, strict=True)): # Check if the input type changed or if the Op has output types that depend on input values - if (curr.type != new.type) or output_type_depends_on_input_value: + if (curr.type != new.type) or data_dependent_output_shape: # In strict mode, the cloned graph is assumed to be mathematically equivalent to the original one. # We only need to rebuild a node when the new input has a different, but compatible, type. # This can happen e.g., when we provide a new input with a more specialized static shape. diff --git a/pytensor/graph/op.py b/pytensor/graph/op.py index 690bb44df5..db90b348c1 100644 --- a/pytensor/graph/op.py +++ b/pytensor/graph/op.py @@ -195,7 +195,7 @@ class Op(MetaObject): itypes: Sequence["Type"] | None = None otypes: Sequence["Type"] | None = None - _output_type_depends_on_input_value = False + data_dependent_output_shape = False """ Whether the static output type depends on the inferred value of one of the inputs. (e.g, via constant folding or static shape inference). diff --git a/pytensor/tensor/basic.py b/pytensor/tensor/basic.py index 26bd34692b..b981679823 100644 --- a/pytensor/tensor/basic.py +++ b/pytensor/tensor/basic.py @@ -1362,7 +1362,7 @@ def triu_indices_from( class Eye(Op): - _output_type_depends_on_input_value = True + data_dependent_output_shape = True __props__ = ("dtype",) def __init__(self, dtype=None): @@ -1577,7 +1577,7 @@ class Alloc(COp): """ _f16_ok = True - _output_type_depends_on_input_value = True + data_dependent_output_shape = True __props__ = () @@ -4213,7 +4213,7 @@ def perform(self, node, inputs, outputs): class AllocEmpty(COp): """Implement Alloc on the cpu, but without initializing memory.""" - _output_type_depends_on_input_value = True + data_dependent_output_shape = True __props__ = ("dtype",) params_type = ParamsType(typecode=int32) diff --git a/pytensor/tensor/random/op.py b/pytensor/tensor/random/op.py index c76d250c9e..b20b96cea3 100644 --- a/pytensor/tensor/random/op.py +++ b/pytensor/tensor/random/op.py @@ -41,7 +41,7 @@ class RandomVariable(Op): """ - _output_type_depends_on_input_value = True + data_dependent_output_shape = True __props__ = ("name", "signature", "dtype", "inplace") default_output = 1 diff --git a/pytensor/tensor/shape.py b/pytensor/tensor/shape.py index 8913d6fb4d..38bda6fb61 100644 --- a/pytensor/tensor/shape.py +++ b/pytensor/tensor/shape.py @@ -398,7 +398,7 @@ class SpecifyShape(COp): view_map = {0: [0]} __props__ = () _f16_ok = True - _output_type_depends_on_input_value = True + data_dependent_output_shape = True def make_node(self, x, *shape): x = ptb.as_tensor_variable(x) @@ -619,7 +619,7 @@ class Reshape(COp): view_map = {0: [0]} # output 0 is potentially aliased to inputs [0] _f16_ok = True - _output_type_depends_on_input_value = True + data_dependent_output_shape = True check_input = False __props__ = ("ndim",)