You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
When you set batch normalization (requiring more than one sample per step) and drop_last=false, it will cause an error when the last sample per epoch (the remainder of the total sample divided by the mini batch is 1)
```
Traceback (most recent call last):
File "/cluster/home/ouliang/daisy/rna_velocity/allen_scvelo.py", line 31, in
vae.train()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/velovi/_model.py", line 195, in train
return runner()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/train/_trainrunner.py", line 99, in __call__
self.trainer.fit(self.training_plan, self.data_splitter)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/train/_trainer.py", line 186, in fit
super().fit(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 532, in fit
call._call_and_handle_interrupt(
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 43, in _call_and_handle_interrupt
return trainer_fn(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 571, in _fit_impl
self._run(model, ckpt_path=ckpt_path)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 980, in _run
results = self._run_stage()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 1023, in _run_stage
self.fit_loop.run()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py", line 202, in run
self.advance()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py", line 355, in advance self.epoch_loop.run(self._data_fetcher)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py", line 133, in run
self.advance(data_fetcher)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py", line 219, in advance
batch_output = self.automatic_optimization.run(trainer.optimizers[0], kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 188, in run
self._optimizer_step(kwargs.get("batch_idx", 0), closure)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 266, in _optimizer_step
call._call_lightning_module_hook(
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 146, in _call_lightning_module_hook
output = fn(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/core/module.py", line 1276, in optimizer_step
optimizer.step(closure=optimizer_closure)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/core/optimizer.py", line 161, in step
step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 231, in optimizer_step
return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/plugins/precision/precision_plugin.py", line 116, in optimizer_step
return optimizer.step(closure=closure, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/optim/optimizer.py", line 280, in wrapper
out = func(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/optim/optimizer.py", line 33, in _use_grad
ret = func(self, *args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/optim/adamw.py", line 148, in step
loss = closure()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/plugins/precision/precision_plugin.py", line 103, in _wrap_closure
closure_result = closure()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 142, in __call__
self._result = self.closure(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context return func(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 128, in closure
step_output = self._step_fn()
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 315, in _training_step
training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 294, in _call_strategy_hook
output = fn(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 380, in training_step
return self.model.training_step(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/train/_trainingplans.py", line 342, in training_step
_, _, scvi_loss = self.forward(batch, loss_kwargs=self.loss_kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/train/_trainingplans.py", line 278, in forward
return self.module(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/module/base/_decorators.py", line 32, in auto_transfer_args
return fn(self, *args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/module/base/_base_module.py", line 205, in forward
return _generic_forward(
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/module/base/_base_module.py", line 749, in _generic_forward
inference_outputs = module.inference(**inference_inputs, **inference_kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/module/base/_decorators.py", line 32, in auto_transfer_args
return fn(self, *args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/velovi/_module.py", line 367, in inference
qz_m, qz_v, z = self.z_encoder(encoder_input)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/nn/_base_components.py", line 286, in forward
q = self.encoder(x, *cat_list)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/scvi/nn/_base_components.py", line 175, in forward
x = layer(x)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py", line 171, in forward
return F.batch_norm(
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/functional.py", line 2448, in batch_norm
_verify_batch_size(input.size())
File "/cluster/home/ouliang/mambaforge/lib/python3.10/site-packages/torch/nn/functional.py", line 2416, in _verify_batch_size
raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 256])
```
Please fix this problem in time
The text was updated successfully, but these errors were encountered:
When you set batch normalization (requiring more than one sample per step) and drop_last=false, it will cause an error when the last sample per epoch (the remainder of the total sample divided by the mini batch is 1)
Please fix this problem in time
The text was updated successfully, but these errors were encountered: