diff --git a/src/lightning/pytorch/CHANGELOG.md b/src/lightning/pytorch/CHANGELOG.md index e3b847490a4c6..6ce3c05b54d44 100644 --- a/src/lightning/pytorch/CHANGELOG.md +++ b/src/lightning/pytorch/CHANGELOG.md @@ -232,6 +232,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed exporting `__version__` in `__init__` ([#19221](https://github.com/Lightning-AI/lightning/pull/19221)) +- Fixed an issue preventing the user to `model.load_from_checkpoint()` a GPU-trained model on a CPU-only machine with a CPU-only PyTorch installation ([#19024](https://github.com/Lightning-AI/lightning/pull/19024)) + + ## [2.1.3] - 2023-12-21 ### Changed diff --git a/src/lightning/pytorch/core/saving.py b/src/lightning/pytorch/core/saving.py index 09d888c56bdcd..0a7293079eb26 100644 --- a/src/lightning/pytorch/core/saving.py +++ b/src/lightning/pytorch/core/saving.py @@ -96,6 +96,8 @@ def _load_from_checkpoint( device = next((t for t in state_dict.values() if isinstance(t, torch.Tensor)), torch.tensor(0)).device assert isinstance(model, pl.LightningModule) + if device.type == "cpu" and model.device.type == "cpu": + return model return model.to(device) raise NotImplementedError(f"Unsupported {cls}") diff --git a/src/lightning/pytorch/strategies/single_device.py b/src/lightning/pytorch/strategies/single_device.py index 30224cb90be0b..cf45a1bf49755 100644 --- a/src/lightning/pytorch/strategies/single_device.py +++ b/src/lightning/pytorch/strategies/single_device.py @@ -76,7 +76,8 @@ def root_device(self) -> torch.device: @override def model_to_device(self) -> None: assert self.model is not None, "self.model must be set before self.model.to()" - self.model.to(self.root_device) + if self.model.device.type != self.root_device.type: + self.model.to(self.root_device) @property @override diff --git a/src/lightning/pytorch/strategies/strategy.py b/src/lightning/pytorch/strategies/strategy.py index 0a0f52e906dd5..205cfffa8cd24 100644 --- a/src/lightning/pytorch/strategies/strategy.py +++ b/src/lightning/pytorch/strategies/strategy.py @@ -533,7 +533,8 @@ def teardown(self) -> None: if self.lightning_module is not None: log.debug(f"{self.__class__.__name__}: moving model to CPU") - self.lightning_module.cpu() + if self.lightning_module.device.type != "cpu": + self.lightning_module.cpu() self.precision_plugin.teardown() assert self.accelerator is not None self.accelerator.teardown()