We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fb641de commit 5998c5eCopy full SHA for 5998c5e
torchrl/objectives/value/utils.py
@@ -286,7 +286,9 @@ def _split_and_pad_sequence(
286
287
# int16 supports length up to 32767
288
dtype = (
289
- torch.int16 if tensor.shape[-2] < torch.iinfo(torch.int16).max else torch.int32
+ torch.int16
290
+ if tensor.size(time_dim) < torch.iinfo(torch.int16).max
291
+ else torch.int32
292
)
293
arange = torch.arange(max_seq_len, device=tensor.device, dtype=dtype).unsqueeze(0)
294
mask = arange < splits.unsqueeze(1)
0 commit comments