You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
run_owod.sh第1,2,3步没有问题,自定义数据集task1有25类,task2有5类,第4步训练时task1时没有问题,训练task2时出现cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes)RuntimeError: shape '[16, -1, 32]' is invalid for input of size 44800
具体如下:
Traceback (most recent call last):
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/./tools/train_owod.py", line 188, in
main()
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/./tools/train_owod.py", line 183, in main
model = runner.train_loop.run() # type: ignore
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/runner/loops.py", line 96, in run
self.run_epoch()
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/runner/loops.py", line 112, in run_epoch
self.run_iter(idx, data_batch)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/runner/loops.py", line 128, in run_iter
outputs = self.runner.model.train_step(
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/model/wrappers/distributed.py", line 121, in train_step
losses = self._run_forward(data, mode='loss')
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/model/wrappers/distributed.py", line 161, in _run_forward
results = self(**data, mode=mode)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward
else self._run_ddp_forward(*inputs, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward
return self.module(*inputs, **kwargs) # type: ignore[index]
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmdet/models/detectors/base.py", line 92, in forward
return self.loss(inputs, data_samples)
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/yolo_world/models/detectors/yolo_world_owod.py", line 95, in loss
losses = self.bbox_head.loss(img_feats, txt_feats,
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/yolo_world/models/dense_heads/yolov10_world_head.py", line 376, in loss
losses = self.loss_by_feat([one2many_loss_inputs, one2one_loss_inputs])
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/yolo_world/models/dense_heads/yolov10_world_head.py", line 471, in loss_by_feat
losses = self.one2many_loss_by_feat(*one2many_loss_inputs)
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/third_party/mmyolo/mmyolo/models/dense_heads/yolov10_head.py", line 542, in one2many_loss_by_feat
flatten_cls_preds = [
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/third_party/mmyolo/mmyolo/models/dense_heads/yolov10_head.py", line 543, in
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes)
RuntimeError: shape '[16, -1, 32]' is invalid for input of size 44800
The text was updated successfully, but these errors were encountered:
run_owod.sh第1,2,3步没有问题,自定义数据集task1有25类,task2有5类,第4步训练时task1时没有问题,训练task2时出现cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes)RuntimeError: shape '[16, -1, 32]' is invalid for input of size 44800
具体如下:
Traceback (most recent call last):
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/./tools/train_owod.py", line 188, in
main()
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/./tools/train_owod.py", line 183, in main
model = runner.train_loop.run() # type: ignore
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/runner/loops.py", line 96, in run
self.run_epoch()
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/runner/loops.py", line 112, in run_epoch
self.run_iter(idx, data_batch)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/runner/loops.py", line 128, in run_iter
outputs = self.runner.model.train_step(
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/model/wrappers/distributed.py", line 121, in train_step
losses = self._run_forward(data, mode='loss')
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmengine/model/wrappers/distributed.py", line 161, in _run_forward
results = self(**data, mode=mode)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/parallel/distributed.py", line 1519, in forward
else self._run_ddp_forward(*inputs, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/parallel/distributed.py", line 1355, in _run_ddp_forward
return self.module(*inputs, **kwargs) # type: ignore[index]
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/root/anaconda3/envs/yolouniow/lib/python3.9/site-packages/mmdet/models/detectors/base.py", line 92, in forward
return self.loss(inputs, data_samples)
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/yolo_world/models/detectors/yolo_world_owod.py", line 95, in loss
losses = self.bbox_head.loss(img_feats, txt_feats,
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/yolo_world/models/dense_heads/yolov10_world_head.py", line 376, in loss
losses = self.loss_by_feat([one2many_loss_inputs, one2one_loss_inputs])
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/yolo_world/models/dense_heads/yolov10_world_head.py", line 471, in loss_by_feat
losses = self.one2many_loss_by_feat(*one2many_loss_inputs)
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/third_party/mmyolo/mmyolo/models/dense_heads/yolov10_head.py", line 542, in one2many_loss_by_feat
flatten_cls_preds = [
File "/home/wl/code/opensource/open_detect/YOLO-UniOW/third_party/mmyolo/mmyolo/models/dense_heads/yolov10_head.py", line 543, in
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes)
RuntimeError: shape '[16, -1, 32]' is invalid for input of size 44800
The text was updated successfully, but these errors were encountered: