Shortcuts

Source code for pytorch_lightning.loggers.wandb

# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Weights and Biases
------------------
"""
import os
from argparse import Namespace
from typing import Any, Dict, List, Optional, Union

import torch.nn as nn

try:
    import wandb
    from wandb.wandb_run import Run
except ImportError:  # pragma: no-cover
    wandb = None
    Run = None

from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
from pytorch_lightning.utilities import rank_zero_only


[docs]class WandbLogger(LightningLoggerBase): r""" Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip: .. code-block:: bash pip install wandb Args: name: Display name for the run. save_dir: Path where data is saved. offline: Run offline (data can be streamed later to wandb servers). id: Sets the version, mainly used to resume a previous run. anonymous: Enables or explicitly disables anonymous logging. version: Sets the version, mainly used to resume a previous run. project: The name of the project to which this run will belong. log_model: Save checkpoints in wandb dir to upload on W&B servers. experiment: WandB experiment object. \**kwargs: Additional arguments like `entity`, `group`, `tags`, etc. used by :func:`wandb.init` can be passed as keyword arguments in this logger. Example: from pytorch_lightning.loggers import WandbLogger from pytorch_lightning import Trainer wandb_logger = WandbLogger() trainer = Trainer(logger=wandb_logger) See Also: - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/ Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__ on how to use W&B with Pytorch Lightning. """ def __init__( self, name: Optional[str] = None, save_dir: Optional[str] = None, offline: bool = False, id: Optional[str] = None, anonymous: bool = False, version: Optional[str] = None, project: Optional[str] = None, log_model: bool = False, experiment=None, **kwargs ): if wandb is None: raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover ' install it with `pip install wandb`.') super().__init__() self._name = name self._save_dir = save_dir self._anonymous = 'allow' if anonymous else None self._id = version or id self._project = project self._experiment = experiment self._offline = offline self._log_model = log_model self._kwargs = kwargs def __getstate__(self): state = self.__dict__.copy() # args needed to reload correct experiment state['_id'] = self._experiment.id if self._experiment is not None else None # cannot be pickled state['_experiment'] = None return state @property @rank_zero_experiment def experiment(self) -> Run: r""" Actual wandb object. To use wandb features in your :class:`~pytorch_lightning.core.lightning.LightningModule` do the following. Example:: self.logger.experiment.some_wandb_function() """ if self._experiment is None: if self._offline: os.environ['WANDB_MODE'] = 'dryrun' self._experiment = wandb.init( name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, reinit=True, id=self._id, resume='allow', **self._kwargs) # save checkpoints in wandb dir to upload on W&B servers if self._log_model: self._save_dir = self._experiment.dir return self._experiment def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100): self.experiment.watch(model, log=log, log_freq=log_freq)
[docs] @rank_zero_only def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: params = self._convert_params(params) params = self._flatten_dict(params) self.experiment.config.update(params, allow_val_change=True)
[docs] @rank_zero_only def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0' self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)
@property def save_dir(self) -> Optional[str]: return self._save_dir @property def name(self) -> Optional[str]: # don't create an experiment if we don't have one return self._experiment.project_name() if self._experiment else self._name @property def version(self) -> Optional[str]: # don't create an experiment if we don't have one return self._experiment.id if self._experiment else self._id

© Copyright Copyright (c) 2018-2020, William Falcon et al... Revision e0e402db.

Built with Sphinx using a theme provided by Read the Docs.
Read the Docs v: stable
Versions
latest
stable
1.0.3
1.0.2
1.0.1
1.0.0
0.10.0
0.9.0
0.8.5
0.8.4
0.8.3
0.8.2
0.8.1
0.8.0
0.7.6
0.7.5
0.7.4
0.7.3
0.7.2
0.7.1
0.7.0
0.6.0
0.5.3.2
0.5.3
0.4.9
Downloads
pdf
html
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.