|
|
|
@ -79,7 +79,7 @@ def bool_index_compat(x, mask):
|
|
|
|
|
- For versions below 2.6, the tensor and mask are converted to NumPy arrays, the indexing
|
|
|
|
|
operation is performed using NumPy, and the result is converted back to a PaddlePaddle tensor.
|
|
|
|
|
"""
|
|
|
|
|
if satisfy_paddle_version("2.6") or isinstance(mask, (int, list)):
|
|
|
|
|
if satisfy_paddle_version("2.6") or isinstance(mask, (int, list, slice)):
|
|
|
|
|
return x[mask]
|
|
|
|
|
else:
|
|
|
|
|
x_np = x.cpu().numpy()[mask.cpu().numpy()]
|
|
|
|
@ -389,6 +389,18 @@ def chdir(newdir: typing.Union[Path, str]):
|
|
|
|
|
os.chdir(curdir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def move_to_device(data, device):
|
|
|
|
|
if device is None or device == "":
|
|
|
|
|
return data
|
|
|
|
|
elif device == 'cpu':
|
|
|
|
|
return paddle.to_tensor(data, place=paddle.CPUPlace())
|
|
|
|
|
elif device in ('gpu', 'cuda'):
|
|
|
|
|
return paddle.to_tensor(data, place=paddle.CUDAPlace())
|
|
|
|
|
else:
|
|
|
|
|
device = device.replace("cuda", "gpu") if "cuda" in device else device
|
|
|
|
|
return data.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def prepare_batch(batch: typing.Union[dict, list, paddle.Tensor],
|
|
|
|
|
device: str="cpu"):
|
|
|
|
|
"""Moves items in a batch (typically generated by a DataLoader as a list
|
|
|
|
@ -413,12 +425,14 @@ def prepare_batch(batch: typing.Union[dict, list, paddle.Tensor],
|
|
|
|
|
batch = flatten(batch)
|
|
|
|
|
for key, val in batch.items():
|
|
|
|
|
try:
|
|
|
|
|
batch[key] = val.to(device)
|
|
|
|
|
# batch[key] = val.to(device)
|
|
|
|
|
batch[key] = move_to_device(val, device)
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
batch = unflatten(batch)
|
|
|
|
|
elif paddle.is_tensor(batch):
|
|
|
|
|
batch = batch.to(device)
|
|
|
|
|
# batch = batch.to(device)
|
|
|
|
|
batch = move_to_device(batch, device)
|
|
|
|
|
elif isinstance(batch, list):
|
|
|
|
|
for i in range(len(batch)):
|
|
|
|
|
try:
|
|
|
|
|