Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
wkpark committed Oct 12, 2024
1 parent f71f241 commit 02ecb34
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions cuda_malloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ def enum_display_devices():
else:
gpu_names = set()
out = subprocess.check_output(['nvidia-smi', '-L'])
for l in out.split(b'\n'):
if len(l) > 0:
gpu_names.add(l.decode('utf-8').split(' (UUID')[0])
for line in out.split(b'\n'):
if len(line) > 0:
gpu_names.add(line.decode('utf-8').split(' (UUID')[0])
return gpu_names

blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
Expand All @@ -55,7 +55,7 @@ def enum_display_devices():
def cuda_malloc_supported():
try:
names = get_gpu_names()
except:
except Exception:
names = set()
for x in names:
if "NVIDIA" in x:
Expand All @@ -82,7 +82,7 @@ def cuda_malloc_supported():
version = module.__version__
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
args.cuda_malloc = cuda_malloc_supported()
except:
except Exception:
pass


Expand Down

0 comments on commit 02ecb34

Please sign in to comment.