@@ -1253,41 +1253,52 @@ def test_avoid_useless_subtensors(self):
1253
1253
@pytest .mark .parametrize ("linker" , ["cvm" , "py" ])
1254
1254
def test_perform (self , linker ):
1255
1255
1256
- a = pytensor .shared (5 )
1256
+ a = pytensor .shared (np .full ((3 , 1 , 1 ), 5 ))
1257
+ s_0 = iscalar ("s_0" )
1257
1258
s_1 = iscalar ("s_1" )
1258
- shape = (s_1 , 1 )
1259
+ shape = (s_0 , s_1 , 1 )
1259
1260
1260
1261
bcast_res = broadcast_to (a , shape )
1261
- assert bcast_res .broadcastable == (False , True )
1262
+ assert bcast_res .broadcastable == (False , False , True )
1262
1263
1263
1264
bcast_fn = pytensor .function (
1264
- [s_1 ], bcast_res , mode = Mode (optimizer = None , linker = linker )
1265
+ [s_0 , s_1 ], bcast_res , mode = Mode (optimizer = None , linker = linker )
1265
1266
)
1266
1267
bcast_fn .vm .allow_gc = False
1267
1268
1268
- bcast_at = bcast_fn (4 )
1269
- bcast_np = np .broadcast_to (5 , (4 , 1 ))
1269
+ bcast_at = bcast_fn (3 , 4 )
1270
+ bcast_np = np .broadcast_to (5 , (3 , 4 , 1 ))
1270
1271
1271
1272
assert np .array_equal (bcast_at , bcast_np )
1272
1273
1273
- bcast_var = bcast_fn .maker .fgraph .outputs [0 ].owner .inputs [0 ]
1274
- bcast_in = bcast_fn .vm .storage_map [a ]
1275
- bcast_out = bcast_fn .vm .storage_map [bcast_var ]
1274
+ with pytest .raises (ValueError ):
1275
+ bcast_fn (5 , 4 )
1276
1276
1277
1277
if linker != "py" :
1278
+ bcast_var = bcast_fn .maker .fgraph .outputs [0 ].owner .inputs [0 ]
1279
+ bcast_in = bcast_fn .vm .storage_map [a ]
1280
+ bcast_out = bcast_fn .vm .storage_map [bcast_var ]
1278
1281
assert np .shares_memory (bcast_out [0 ], bcast_in [0 ])
1279
1282
1283
+ def test_make_node_error_handling (self ):
1284
+ with pytest .raises (
1285
+ ValueError ,
1286
+ match = "Broadcast target shape has 1 dims, which is shorter than input with 2 dims" ,
1287
+ ):
1288
+ broadcast_to (at .zeros ((3 , 4 )), (5 ,))
1289
+
1280
1290
@pytest .mark .skipif (
1281
1291
not config .cxx , reason = "G++ not available, so we need to skip this test."
1282
1292
)
1283
- def test_memory_leak (self ):
1293
+ @pytest .mark .parametrize ("valid" , (True , False ))
1294
+ def test_memory_leak (self , valid ):
1284
1295
import gc
1285
1296
import tracemalloc
1286
1297
1287
1298
from pytensor .link .c .cvm import CVM
1288
1299
1289
1300
n = 100_000
1290
- x = pytensor .shared (np .ones (n , dtype = np .float64 ))
1301
+ x = pytensor .shared (np .ones (( 1 , n ) , dtype = np .float64 ))
1291
1302
y = broadcast_to (x , (5 , n ))
1292
1303
1293
1304
f = pytensor .function ([], y , mode = Mode (optimizer = None , linker = "cvm" ))
@@ -1303,8 +1314,17 @@ def test_memory_leak(self):
1303
1314
blocks_last = None
1304
1315
block_diffs = []
1305
1316
for i in range (1 , 50 ):
1306
- x .set_value (np .ones (n ))
1307
- _ = f ()
1317
+ if valid :
1318
+ x .set_value (np .ones ((1 , n )))
1319
+ _ = f ()
1320
+ else :
1321
+ x .set_value (np .ones ((2 , n )))
1322
+ try :
1323
+ _ = f ()
1324
+ except ValueError :
1325
+ pass
1326
+ else :
1327
+ raise RuntimeError ("Should have failed" )
1308
1328
_ = gc .collect ()
1309
1329
blocks_i , _ = tracemalloc .get_traced_memory ()
1310
1330
if blocks_last is not None :
@@ -1313,7 +1333,7 @@ def test_memory_leak(self):
1313
1333
blocks_last = blocks_i
1314
1334
1315
1335
tracemalloc .stop ()
1316
- assert np .allclose (np .mean (block_diffs ), 0 )
1336
+ assert np .all (np .array (block_diffs ) <= ( 0 + 1e-8 ) )
1317
1337
1318
1338
@pytest .mark .parametrize (
1319
1339
"fn,input_dims" ,
0 commit comments