-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy paththreadGPU.py
122 lines (109 loc) · 4.9 KB
/
threadGPU.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os
import sys
import logging
import subprocess
import traceback
from pynvml import *
import nvidia_smi
from time import sleep
from threading import Thread
class utilizationGPU(Thread):
"""
Class generaring a parallel thread to monitor the GPU usage
Initialize with :
thread = utilizationGPU(print_time = int # Frequency of printing the average and current GPU usage
print_current = bool # In addition to average, print current usage
time_step = float) # Time step for sampline
Start thread with:
thread.start()
Will print usage and memory average over the last "print_time" seconds
Stop the thread with
thread.stopLoop() # Important ! : Otherwise the loop will not be stopped properly
thread.join() # Classic
"""
def __init__(self,print_time=60,print_current=False,time_step=0.01):
# Call the Thread class's init function
super(utilizationGPU,self).__init__()
self.print_time = print_time
self.print_current = print_current
self.time_step = time_step
self.GPUs = []
self.occAvgTot = []
self.occAvgStep = []
self.memAvgTot = []
self.memAvgStep = []
self.running = True
try:
nvmlInit()
self.deviceCount = nvmlDeviceGetCount()
# Get list of handles #
logging.info("[GPU] Detected devices are :")
for i in range(self.deviceCount):
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(i)
self.GPUs.append(handle)
logging.info("[GPU] ..... Device %d : %s"%(i, nvmlDeviceGetName(handle)))
# Records #
self.occAvgTot.append(0)
self.occAvgStep.append(0)
self.memAvgTot.append(0)
self.memAvgStep.append(0)
logging.info("[GPU] Will print usage every %d seconds"%self.print_time)
except Exception as e:
logging.error("[GPU] *** Caught exception: %s : %s"%(str(e.__class__),str(e)))
traceback.print_exc()
# Override the run function of Thread class
def run(self):
import random
self.time_step = 0.01
counter = 0
print_counter = 0
while(self.running):
res = []
for i in range(self.deviceCount):
res.append(nvidia_smi.nvmlDeviceGetUtilizationRates(self.GPUs[i]))
# Print every self.print_time #
if print_counter == int(self.print_time/self.time_step):
# Print current #
if self.print_current:
s = "\t[GPU] "
for i in range(self.deviceCount):
s += "Device %d %s : utilization : %d%%, memory : %d%%\t"%(i, nvmlDeviceGetName(self.GPUs[i]),res[i].gpu,res[i].memory)
logging.info(s)
# Print avg #
if self.print_time<60:
logging.info("\n[GPU] Occupation over the last %d seconds"%self.print_time)
else:
minutes = self.print_time//60
seconds = self.print_time%60
logging.info("\n[GPU] Occupation over the last %d minutes, %d seconds"%(minutes,seconds))
s = "[GPU] "
for i in range(self.deviceCount):
self.occAvgStep[i] /= (print_counter*self.time_step)
self.memAvgStep[i] /= (print_counter*self.time_step)
s += "Device %d %s : utilization : %d%%, memory : %d%%\t"%(i, nvmlDeviceGetName(self.GPUs[i]),self.occAvgStep[i],self.memAvgStep[i])
# Reinitialize average #
self.occAvgStep[i] = 0
self.memAvgStep[i] = 0
logging.info(s)
# reset printing counter #
print_counter = 0
# Add to total and step #
for i in range(self.deviceCount):
self.occAvgTot[i] += res[i].gpu*self.time_step
self.occAvgStep[i] += res[i].gpu*self.time_step
self.memAvgTot[i] += res[i].memory*self.time_step
self.memAvgStep[i] += res[i].memory*self.time_step
# Sleep and counters #
print_counter += 1
counter += 1
sleep(self.time_step)
# Print total #
logging.info("[GPU] Average occupation over whole period")
s = "[GPU] "
for i in range(self.deviceCount):
self.occAvgTot[i] /= (counter*self.time_step)
self.memAvgTot[i] /= (counter*self.time_step)
s += "Device %d %s : utilization : %d%%, memory : %d%%\t"%(i, nvmlDeviceGetName(self.GPUs[i]),self.occAvgTot[i],self.memAvgTot[i])
logging.info(s)
def stopLoop(self):
self.running = False