Skip to content

Commit 5dcee96

Browse files
louis-shevfdev-5
authored andcommitted
Chenglu/black (#2378)
* Updated black to 21.12b0 and fixed codebase for others folders (#2367) * Updated black to 21.12b0 and fixed codebase for others folders * Updated black * formatting fixed (#2372) * fix: remove tailing commas that would mislead black formating * autopep8 fix * fix: left out tailing comma * chg: change black version * fix: test_deprecated assert failed with additional newline * fix: typo * fix: fix func_no_reasons test assert failed Co-authored-by: louis-she <louis-she@users.noreply.github.com> Co-authored-by: vfdev <vfdev.5@gmail.com> Co-authored-by: louis-she <louis-she@users.noreply.github.com>
1 parent d8f8bf4 commit 5dcee96

File tree

100 files changed

+325
-434
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

100 files changed

+325
-434
lines changed

.github/workflows/trigger_circle_ci.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def trigger_new_pipeline(data, headers):
2424
"https://circleci.com/api/v2/project/gh/pytorch/ignite/pipeline", data=json.dumps(data), headers=headers
2525
)
2626
assert_result(result, 201)
27-
output = get_output(result.text, ["id",])
27+
output = get_output(result.text, ["id"])
2828
return output["id"]
2929

3030

@@ -46,7 +46,7 @@ def get_workflow_id(pipeline_id, headers):
4646
while True:
4747
result = requests.get(f"https://circleci.com/api/v2/pipeline/{pipeline_id}/workflow", headers=headers)
4848
assert_result(result, 200)
49-
output = get_output(result.text, ["items",])
49+
output = get_output(result.text, ["items"])
5050
items = output["items"]
5151
if len(items) > 1:
5252
raise RuntimeError(f"Incorrect number of workflow ids: {len(items)} != 1\n" f"items: {items}")

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ repos:
1515
exclude_types: ["python", "jupyter", "shell", "gitignore"]
1616

1717
- repo: https://github.com/python/black
18-
rev: 19.10b0
18+
rev: 21.12b0
1919
hooks:
2020
- id: black
2121
language_version: python3.8

CONTRIBUTING.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -109,11 +109,11 @@ If you modify the code, you will most probably also need to code some tests to e
109109

110110
- naming convention for files `test_*.py`, e.g. `test_precision.py`
111111
- naming of testing functions `def test_*`, e.g. `def test_precision_on_random_data()`
112-
- if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`.
112+
- if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`.
113113
Additionally, we may want to decorate it with `@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")`.
114114
For more examples, please see https://github.com/pytorch/ignite/blob/master/tests/ignite/engine/test_create_supervised.py
115-
- if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional
116-
conditions depending on the intended checks. For example, please see
115+
- if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional
116+
conditions depending on the intended checks. For example, please see
117117
https://github.com/pytorch/ignite/blob/master/tests/ignite/metrics/test_accuracy.py
118118

119119

@@ -131,7 +131,7 @@ format and check codebase for compliance with PEP8.
131131
If you choose not to use pre-commit, you can take advantage of IDE extensions configured to black format or invoke
132132
black manually to format files and commit them.
133133

134-
To install `flake8`, `black==19.10b0`, `isort==5.7.0` and `mypy`, please run
134+
To install `flake8`, `black==21.12b0`, `isort==5.7.0` and `mypy`, please run
135135
```bash
136136
bash ./tests/run_code_style.sh install
137137
```

docker/test_image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def main():
3333
print(traceback.format_exc())
3434
"""
3535
try:
36-
out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True,)
36+
out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True)
3737
assert isinstance(out, bytes), type(out)
3838
out = out.decode("utf-8").strip()
3939

examples/contrib/cifar10/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,11 +223,11 @@ def get_dataflow(config):
223223

224224
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
225225
train_loader = idist.auto_dataloader(
226-
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
226+
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
227227
)
228228

229229
test_loader = idist.auto_dataloader(
230-
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
230+
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
231231
)
232232
return train_loader, test_loader
233233

examples/contrib/cifar10/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
]
1414
)
1515

16-
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
16+
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
1717

1818

1919
def get_train_test_datasets(path):

examples/contrib/cifar10_qat/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -208,11 +208,11 @@ def get_dataflow(config):
208208

209209
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
210210
train_loader = idist.auto_dataloader(
211-
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
211+
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
212212
)
213213

214214
test_loader = idist.auto_dataloader(
215-
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
215+
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
216216
)
217217
return train_loader, test_loader
218218

examples/contrib/cifar10_qat/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
]
1818
)
1919

20-
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
20+
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
2121

2222

2323
def get_train_test_datasets(path):

examples/contrib/transformers/main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -228,11 +228,11 @@ def get_dataflow(config):
228228

229229
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
230230
train_loader = idist.auto_dataloader(
231-
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
231+
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
232232
)
233233

234234
test_loader = idist.auto_dataloader(
235-
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
235+
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
236236
)
237237
return train_loader, test_loader
238238

@@ -246,7 +246,7 @@ def initialize(config):
246246
# Adapt model for distributed settings if configured
247247
model = idist.auto_model(model)
248248

249-
optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"],)
249+
optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"])
250250
optimizer = idist.auto_optim(optimizer)
251251
criterion = nn.BCEWithLogitsLoss()
252252

examples/gan/dcgan.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535

3636

3737
class Net(nn.Module):
38-
""" A base class for both generator and the discriminator.
38+
"""A base class for both generator and the discriminator.
3939
Provides a common weight initialization scheme.
4040
4141
"""
@@ -56,7 +56,7 @@ def forward(self, x):
5656

5757

5858
class Generator(Net):
59-
""" Generator network.
59+
"""Generator network.
6060
6161
Args:
6262
nf (int): Number of filters in the second-to-last deconv layer
@@ -95,7 +95,7 @@ def forward(self, x):
9595

9696

9797
class Discriminator(Net):
98-
""" Discriminator network.
98+
"""Discriminator network.
9999
100100
Args:
101101
nf (int): Number of filters in the first conv layer.
@@ -133,9 +133,7 @@ def forward(self, x):
133133

134134

135135
def check_manual_seed(seed):
136-
""" If manual seed is not specified, choose a random one and communicate it to the user.
137-
138-
"""
136+
"""If manual seed is not specified, choose a random one and communicate it to the user."""
139137

140138
seed = seed or random.randint(1, 10000)
141139
random.seed(seed)
@@ -311,8 +309,8 @@ def step(engine, batch):
311309
@trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ))
312310
def print_logs(engine):
313311
fname = os.path.join(output_dir, LOGS_FNAME)
314-
columns = ["iteration",] + list(engine.state.metrics.keys())
315-
values = [str(engine.state.iteration),] + [str(round(value, 5)) for value in engine.state.metrics.values()]
312+
columns = ["iteration"] + list(engine.state.metrics.keys())
313+
values = [str(engine.state.iteration)] + [str(round(value, 5)) for value in engine.state.metrics.values()]
316314

317315
with open(fname, "a") as f:
318316
if f.tell() == 0:

0 commit comments

Comments
 (0)