Skip to content

Commit

Permalink
remove old configs leftover from removal of py3.5/py2 (#1551)
Browse files Browse the repository at this point in the history
* rm old configs leftover from removal of py3.5

* black format
  • Loading branch information
Jeff Yang authored Jan 11, 2021
1 parent 3c8b781 commit 9e4a44a
Show file tree
Hide file tree
Showing 19 changed files with 31 additions and 214 deletions.
175 changes: 0 additions & 175 deletions .travis.yml

This file was deleted.

1 change: 0 additions & 1 deletion conda.recipe/build_and_upload.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ conda config --set anaconda_upload no
conda build --no-test --output-folder conda_build conda.recipe -c pytorch

# Upload to Anaconda
# We could use --all but too much platforms to uploaded
ls conda_build/*/*.tar.bz2 | xargs -I {} anaconda -v -t $ANACONDA_TOKEN upload -u $UPLOAD_USER {}
4 changes: 2 additions & 2 deletions conda.recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ build:
# https://conda.io/docs/user-guide/tasks/build-packages/define-metadata.html#export-runtime-requirements
requirements:
build:
- python>=3.5
- python>=3.6
- setuptools
- pytorch>=1.3

run:
- python>=3.5
- python>=3.6
- pytorch>=1.3

test:
Expand Down
2 changes: 1 addition & 1 deletion examples/contrib/cifar10/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def run(
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
**spawn_kwargs
**spawn_kwargs,
):
"""Main entry to train an model on CIFAR10 dataset.
Expand Down
24 changes: 12 additions & 12 deletions ignite/contrib/engines/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def setup_common_training_handlers(
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
Expand Down Expand Up @@ -149,7 +149,7 @@ def _setup_common_training_handlers(
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any
**kwargs: Any,
) -> None:
if output_path is not None and save_handler is not None:
raise ValueError(
Expand Down Expand Up @@ -237,7 +237,7 @@ def _setup_common_distrib_training_handlers(
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any
**kwargs: Any,
) -> None:

_setup_common_training_handlers(
Expand Down Expand Up @@ -331,7 +331,7 @@ def setup_tb_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> TensorboardLogger:
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -363,7 +363,7 @@ def setup_visdom_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> VisdomLogger:
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -394,7 +394,7 @@ def setup_mlflow_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> MLflowLogger:
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -425,7 +425,7 @@ def setup_neptune_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> NeptuneLogger:
"""Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -456,7 +456,7 @@ def setup_wandb_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> WandBLogger:
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -487,7 +487,7 @@ def setup_plx_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> PolyaxonLogger:
"""Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -518,7 +518,7 @@ def setup_trains_logging(
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
**kwargs: Any,
) -> TrainsLogger:
"""Method to setup Trains logging on trainer and a list of evaluators. Logged metrics are:
Expand Down Expand Up @@ -560,7 +560,7 @@ def gen_save_best_models_by_val_score(
n_saved: int = 3,
trainer: Optional[Engine] = None,
tag: str = "val",
**kwargs: Any
**kwargs: Any,
) -> Checkpoint:
"""Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
Expand Down Expand Up @@ -619,7 +619,7 @@ def save_best_model_by_val_score(
n_saved: int = 3,
trainer: Optional[Engine] = None,
tag: str = "val",
**kwargs: Any
**kwargs: Any,
) -> Checkpoint:
"""Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/handlers/param_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ def simulate_values( # type: ignore[override]
schedulers: List[ParamScheduler],
durations: List[int],
param_names: Optional[Union[List[str], Tuple[str]]] = None,
**kwargs: Any
**kwargs: Any,
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/handlers/tqdm_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def __init__(
self,
persist: bool = False,
bar_format: str = "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]",
**tqdm_kwargs: Any
**tqdm_kwargs: Any,
) -> None:

try:
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/handlers/trains_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ def __init__(
output_uri: Optional[str] = None,
dirname: Optional[str] = None,
*args: Any,
**kwargs: Any
**kwargs: Any,
) -> None:

self._setup_check_trains(logger, output_uri)
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/handlers/visdom_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def __init__(
port: Optional[int] = None,
num_workers: int = 1,
raise_exceptions: bool = True,
**kwargs: Any
**kwargs: Any,
):
try:
import visdom
Expand Down
2 changes: 1 addition & 1 deletion ignite/distributed/comp_models/horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def spawn( # type: ignore[override]
nproc_per_node: int = 1,
hosts: Optional[str] = None,
backend: str = HOROVOD,
**kwargs: Any
**kwargs: Any,
) -> None:
c1 = "nnodes" in kwargs and kwargs["nnodes"] > 1
c2 = "node_rank" in kwargs and kwargs["node_rank"] > 0
Expand Down
2 changes: 1 addition & 1 deletion ignite/distributed/comp_models/native.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def spawn( # type: ignore[override]
master_addr: str = "127.0.0.1",
master_port: int = 2222,
backend: str = "nccl",
**kwargs: Any
**kwargs: Any,
) -> None:
world_size = nnodes * nproc_per_node

Expand Down
2 changes: 1 addition & 1 deletion ignite/distributed/comp_models/xla.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def spawn( # type: ignore[override]
nnodes: int = 1,
node_rank: int = 0,
backend: str = XLA_TPU,
**kwargs: Any
**kwargs: Any,
) -> None:
if "start_method" not in kwargs:
kwargs["start_method"] = "fork"
Expand Down
4 changes: 2 additions & 2 deletions ignite/distributed/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def __init__(
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
**spawn_kwargs: Any
**spawn_kwargs: Any,
) -> None:
if backend is not None:
if backend not in idist.available_backends():
Expand Down Expand Up @@ -216,7 +216,7 @@ def _setup_spawn_params(
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
**spawn_kwargs: Any
**spawn_kwargs: Any,
) -> Dict:
if nproc_per_node < 1:
raise ValueError(f"Argument nproc_per_node should positive, but given {nproc_per_node}")
Expand Down
Loading

0 comments on commit 9e4a44a

Please sign in to comment.