File _service:extract_file:tau-2.31.1-python2to3.patch of Package tau-intel-mvapich2 (Revision a3cbd9b468cca1fe85d083f880c314df)
Currently displaying revision a3cbd9b468cca1fe85d083f880c314df , Show latest
13433
1
--- a/apex/src/scripts/consolidate.py (original)
2
--- b/apex/src/scripts/consolidate.py (refactored)
3
4
#!/usr/bin/python
5
6
-from __future__ import print_function
7
+
8
import os.path
9
import sys
10
import glob
11
12
rowdata[period] = {}
13
total = 0
14
for c in colnames:
15
- if c not in line.keys():
16
+ if c not in list(line.keys()):
17
rowdata[period][c] = 0
18
continue
19
if c not in rowdata[period]:
20
--- a/apex/src/scripts/task_scatterplot.py (original)
21
--- b/apex/src/scripts/task_scatterplot.py (refactored)
22
23
try:
24
mytup = (float(row[0]),float(row[1]))
25
except ValueError as e:
26
- print(index, " Bad row: ", row)
27
+ print((index, " Bad row: ", row))
28
continue
29
if row[2] not in dictionary:
30
dictionary[row[2]] = [mytup]
31
else:
32
dictionary[row[2]].append(mytup)
33
- print "Parsed", index, "samples"
34
+ print("Parsed", index, "samples")
35
36
#resize the figure
37
# Get current size
38
39
numplots = min(len(dictionary), 10)
40
for key in sorted(dictionary, key=lambda key: len(dictionary[key]), reverse=True):
41
index = index + 1
42
- print "Plotting", key
43
+ print("Plotting", key)
44
axes = pl.subplot(((numplots+1)/2), 2, index)
45
timestamps = np.array([x[0] for x in dictionary[key]])
46
values = np.array([x[1] for x in dictionary[key]])
47
48
pl.xlabel("seconds from program start")
49
if index >= numplots:
50
break
51
-print "Rendering..."
52
+print("Rendering...")
53
pl.tight_layout()
54
#pl.show()
55
pl.savefig("image.png")
56
--- a/etc/TAU-CSV-To-HeatMap.py (original)
57
--- b/etc/TAU-CSV-To-HeatMap.py (refactored)
58
59
csvSource = GetActiveSource()
60
61
if(('FileName' in dir(csvSource) and csvSource.FileName[0].lower().endswith(".csv")) is False):
62
- raise TypeError, "This macro requires a .csv file to be selected in the pipeline browser."
63
+ raise TypeError("This macro requires a .csv file to be selected in the pipeline browser.")
64
65
data = servermanager.Fetch(csvSource,0)
66
col0 = data.GetColumn(0)
67
--- a/examples/test.py (original)
68
--- b/examples/test.py (refactored)
69
70
def shell_command(command,errorMessage):
71
#command initiated where this script is ran
72
try:
73
- print command
74
+ print(command)
75
subprocess.check_call(command, stderr=subprocess.STDOUT, shell=True)
76
errorStatus=0
77
except :
78
- print errorMessage
79
+ print(errorMessage)
80
pass
81
errorStatus=1
82
return errorStatus
83
84
85
def dumpclean(obj):
86
if type(obj) == dict:
87
- for k, v in obj.items():
88
+ for k, v in list(obj.items()):
89
if hasattr(v, '__iter__'):
90
- print k
91
+ print(k)
92
dumpclean(v)
93
else:
94
- print '%s : %s' % (k, v)
95
+ print('%s : %s' % (k, v))
96
elif type(obj) == list:
97
for v in obj:
98
if hasattr(v, '__iter__'):
99
dumpclean(v)
100
else:
101
- print v
102
+ print(v)
103
else:
104
- print obj
105
+ print(obj)
106
107
def resultMeaning(result):
108
if result == 0 :
109
110
parentDir=os.getcwd()
111
112
113
- directories = filter(os.path.isdir, os.listdir(os.getcwd()))
114
+ directories = list(filter(os.path.isdir, os.listdir(os.getcwd())))
115
makeResults=dict((dirs,{"make":0,"build.sh":0}) for dirs in directories)
116
# 0,1 normal, -1 if doesn't exist
117
for currentDir in directories:
118
os.chdir(currentDir)
119
- print "*** CD into ", currentDir
120
+ print("*** CD into ", currentDir)
121
isBuild = os.path.isfile("build.sh")
122
isMake = os.path.isfile("Makefile")
123
if isMake :
124
125
target.write("#dir, make, build.sh \n")
126
target.write("\n")
127
128
- for dir,resultDict in makeResults.iteritems():
129
+ for dir,resultDict in makeResults.items():
130
result=[]
131
result.append(dir + " , ")
132
- for type,code in resultDict.iteritems():
133
+ for type,code in resultDict.items():
134
result.append(resultMeaning(code)+ " , ")
135
#print '%s: %s: %s' % (dir,type,resultMeaning(result))}
136
result.append("\n")
137
--- a/examples/gpu/pycuda/matmult.py (original)
138
--- b/examples/gpu/pycuda/matmult.py (refactored)
139
140
cuda.memcpy_htod(d_a, a)
141
cuda.memcpy_htod(d_b, b)
142
143
- print "threads:", number_of_threads, "blocks: ", number_of_blocks
144
+ print("threads:", number_of_threads, "blocks: ", number_of_blocks)
145
146
multiply_matrices = multiply_source.get_function("multiply_matrices")
147
multiply_matrices_shared_blocks = multiply_source.get_function("multiply_matrices_shared_blocks")
148
--- a/examples/mpi4py-c++-f90/samarc.py (original)
149
--- b/examples/mpi4py-c++-f90/samarc.py (refactored)
150
151
try:
152
import samint
153
except ImportError:
154
- print "Import Error: have to run with MPI"
155
+ print("Import Error: have to run with MPI")
156
157
158
class samarc:
159
160
self.inpfile=startfile
161
162
if not os.path.isfile(startfile):
163
- print 'Error: Could not find file %s' % startfile
164
+ print('Error: Could not find file %s' % startfile)
165
return None
166
167
samint.samarcInit(self.inpfile)
168
--- a/examples/mpi4py-c++-f90/samarcrun.py (original)
169
--- b/examples/mpi4py-c++-f90/samarcrun.py (refactored)
170
171
for n in range(nsteps):
172
step = step + 1
173
174
- print "---------------------------------------------------"
175
- print " step: ", step, "\ttime: ", simtime
176
+ print("---------------------------------------------------")
177
+ print(" step: ", step, "\ttime: ", simtime)
178
179
# flow solve
180
offBodySolver.runStep(simtime,dt)
181
--- a/examples/plugin/adios2_trace/mpi_reader.py (original)
182
--- b/examples/plugin/adios2_trace/mpi_reader.py (refactored)
183
184
185
def process_file(filename):
186
filename = filename.replace('.sst', '')
187
- print ("Opening:", filename)
188
+ print(("Opening:", filename))
189
with adios2.open(filename, "r", MPI.COMM_SELF, engine_type=engine) as fh:
190
for fstep in fh:
191
# inspect variables in current step
192
step_vars = fstep.available_variables()
193
# print variables information
194
- for name, info in step_vars.items():
195
- print(filename, "variable_name: " + name)
196
- for key, value in info.items():
197
- print(filename, "\t" + key + ": " + value)
198
+ for name, info in list(step_vars.items()):
199
+ print((filename, "variable_name: " + name))
200
+ for key, value in list(info.items()):
201
+ print((filename, "\t" + key + ": " + value))
202
print("\n")
203
# read the variable!
204
dummy = fstep.read(name)
205
# track current step
206
step = fstep.current_step()
207
- print(filename, "Step = ", step)
208
+ print((filename, "Step = ", step))
209
210
if __name__ == '__main__':
211
comm = MPI.COMM_WORLD
212
--- a/examples/plugin/adios2_trace/reader.py (original)
213
--- b/examples/plugin/adios2_trace/reader.py (refactored)
214
215
216
def process_file(filename):
217
filename = filename.replace('.sst', '')
218
- print ("Opening:", filename)
219
+ print(("Opening:", filename))
220
with adios2.open(filename, "r", MPI.COMM_SELF, engine_type=engine) as fh:
221
for fstep in fh:
222
# inspect variables in current step
223
step_vars = fstep.available_variables()
224
# print variables information
225
- for name, info in step_vars.items():
226
- print(filename, "variable_name: " + name)
227
- for key, value in info.items():
228
- print(filename, "\t" + key + ": " + value)
229
+ for name, info in list(step_vars.items()):
230
+ print((filename, "variable_name: " + name))
231
+ for key, value in list(info.items()):
232
+ print((filename, "\t" + key + ": " + value))
233
print("\n")
234
# read the variable!
235
dummy = fstep.read(name)
236
# track current step
237
step = fstep.current_step()
238
- print(filename, "Step = ", step)
239
+ print((filename, "Step = ", step))
240
241
if __name__ == '__main__':
242
#time.sleep(2)
243
--- a/examples/plugin/adios2_trace/reader_single.py (original)
244
--- b/examples/plugin/adios2_trace/reader_single.py (refactored)
245
246
247
def process_file(filename):
248
filename = filename.replace('.sst', '')
249
- print ("Opening:", filename)
250
+ print(("Opening:", filename))
251
with adios2.open(filename, "r", MPI.COMM_SELF, engine_type=engine) as fh:
252
for fstep in fh:
253
# inspect variables in current step
254
step_vars = fstep.available_variables()
255
# print variables information
256
- for name, info in step_vars.items():
257
- print(filename, "variable_name: " + name)
258
- for key, value in info.items():
259
- print(filename, "\t" + key + ": " + value)
260
+ for name, info in list(step_vars.items()):
261
+ print((filename, "variable_name: " + name))
262
+ for key, value in list(info.items()):
263
+ print((filename, "\t" + key + ": " + value))
264
print("\n")
265
# read the variable!
266
dummy = fstep.read(name)
267
# track current step
268
step = fstep.current_step()
269
- print(filename, "Step = ", step)
270
+ print((filename, "Step = ", step))
271
272
if __name__ == '__main__':
273
#time.sleep(2)
274
--- a/examples/plugin/sqlite3/nosql-parser.py (original)
275
--- b/examples/plugin/sqlite3/nosql-parser.py (refactored)
276
277
rows = c.execute(sql)
278
desc = c.description
279
column_names = [col[0] for col in desc]
280
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
281
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
282
return data
283
284
def get_counters(conn):
285
286
rows = c.execute(sql)
287
desc = c.description
288
column_names = [col[0] for col in desc]
289
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
290
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
291
return data
292
293
if __name__ == '__main__':
294
--- a/examples/plugin/sqlite3/parser.py (original)
295
--- b/examples/plugin/sqlite3/parser.py (refactored)
296
297
rows = c.execute('SELECT id, name, created FROM trial')
298
desc = c.description
299
column_names = [col[0] for col in desc]
300
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
301
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
302
return data
303
304
def get_metadata(conn):
305
306
rows = c.execute('SELECT trial, name, value FROM metadata')
307
desc = c.description
308
column_names = [col[0] for col in desc]
309
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
310
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
311
return data
312
313
def get_threads(conn):
314
315
rows = c.execute('SELECT id, node_rank, thread_rank FROM thread')
316
desc = c.description
317
column_names = [col[0] for col in desc]
318
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
319
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
320
return data
321
322
def get_metrics(conn):
323
324
rows = c.execute('SELECT trial, name FROM metric')
325
desc = c.description
326
column_names = [col[0] for col in desc]
327
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
328
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
329
return data
330
331
def get_timers(conn):
332
333
rows = c.execute('SELECT id, trial, parent, short_name FROM timer')
334
desc = c.description
335
column_names = [col[0] for col in desc]
336
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
337
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
338
return data
339
340
def get_timer_values(conn):
341
342
rows = c.execute('SELECT timer, metric, thread, value FROM timer_value')
343
desc = c.description
344
column_names = [col[0] for col in desc]
345
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
346
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
347
return data
348
349
def get_counters(conn):
350
351
rows = c.execute('SELECT id, trial, name FROM counter')
352
desc = c.description
353
column_names = [col[0] for col in desc]
354
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
355
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
356
return data
357
358
def get_counter_values(conn):
359
360
rows = c.execute('SELECT counter, timer, thread, sample_count, maximum_value, minimum_value, mean_value, sum_of_squares FROM counter_value')
361
desc = c.description
362
column_names = [col[0] for col in desc]
363
- data = [dict(zip(column_names, row)) for row in c.fetchall()]
364
+ data = [dict(list(zip(column_names, row))) for row in c.fetchall()]
365
return data
366
367
def write_callgraph(timers):
368
369
dot.node(str(timer['id']), timer['short_name'])
370
if timer['parent'] != None:
371
dot.edge(str(timer['id']), str(timer['parent']))
372
- print(dot.source)
373
+ print((dot.source))
374
#dot.render('callgraph.gv', view=True)
375
376
if __name__ == '__main__':
377
--- a/examples/pyspark/als.py (original)
378
--- b/examples/pyspark/als.py (refactored)
379
380
381
This example requires numpy (http://www.numpy.org/)
382
"""
383
-from __future__ import print_function
384
+
385
386
import sys
387
388
389
usb = sc.broadcast(us)
390
391
for i in range(ITERATIONS):
392
- ms = sc.parallelize(range(M), partitions) \
393
+ ms = sc.parallelize(list(range(M)), partitions) \
394
.map(lambda x: update(x, usb.value, Rb.value)) \
395
.collect()
396
# collect() returns a list, so array ends up being
397
398
ms = matrix(np.array(ms)[:, :, 0])
399
msb = sc.broadcast(ms)
400
401
- us = sc.parallelize(range(U), partitions) \
402
+ us = sc.parallelize(list(range(U)), partitions) \
403
.map(lambda x: update(x, msb.value, Rb.value.T)) \
404
.collect()
405
us = matrix(np.array(us)[:, :, 0])
406
--- a/examples/python/cpi.py (original)
407
--- b/examples/python/cpi.py (refactored)
408
409
410
def prn_pi(pi, PI):
411
message = "pi is approximately %.16f, error is %.16f"
412
- print (message % (pi, abs(pi - PI)))
413
+ print((message % (pi, abs(pi - PI))))
414
415
comm = MPI.COMM_WORLD
416
nprocs = comm.Get_size()
417
--- a/examples/python/firstprime.py (original)
418
--- b/examples/python/firstprime.py (refactored)
419
420
# Found factor. not prime
421
break # move on to next number
422
elif potentialfactor >= sqrtno:
423
- print("The first prime number after {} is {}".format(x,current))
424
+ print(("The first prime number after {} is {}".format(x,current)))
425
return current
426
current += 1
427
428
--- a/examples/python/hello-mpi.py (original)
429
--- b/examples/python/hello-mpi.py (refactored)
430
431
432
comm = MPI.COMM_WORLD
433
434
-print ("Hello! I'm rank %d from %d running in total..." % (comm.rank, comm.size))
435
+print(("Hello! I'm rank %d from %d running in total..." % (comm.rank, comm.size)))
436
437
comm.Barrier() # wait for everybody to synchronize _here_
438
--- a/examples/sos/pycoolrgui/pycoolr-plot/clr_utils.py (original)
439
--- b/examples/sos/pycoolrgui/pycoolr-plot/clr_utils.py (refactored)
440
441
for jt in jtext:
442
try:
443
j = json.loads(jt)
444
- except ValueError, e:
445
+ except ValueError as e:
446
continue
447
ret.append(j)
448
449
--- a/examples/sos/pycoolrgui/pycoolr-plot/coolr-back.py (original)
450
--- b/examples/sos/pycoolrgui/pycoolr-plot/coolr-back.py (refactored)
451
452
#!/usr/bin/env python
453
454
#import sys, os, re, _thread, signal
455
-import sys, os, re, thread, signal
456
+import sys, os, re, _thread, signal
457
#from cStringIO import StringIO
458
from io import StringIO
459
import subprocess
460
461
with open(cfgfn) as f:
462
cfgtmp = json.load(f)
463
# override if cfg defines any
464
- for k in cfgtmp.keys():
465
+ for k in list(cfgtmp.keys()):
466
cfg[k] = cfgtmp[k]
467
# override if specifed as cmd option
468
- for k in ocfg.keys():
469
+ for k in list(ocfg.keys()):
470
cfg[k] = ocfg[k]
471
472
473
474
targetnode = os.environ['PYCOOLR_NODE']
475
#targetnode = cfg['masternode']
476
if len(enclaves) == 0:
477
- if cfg.has_key('enclaves'):
478
+ if 'enclaves' in cfg:
479
enclaves = cfg['enclaves']
480
481
#print 'masternode:', cfg['masternode']
482
-print('targetnode:', targetnode)
483
-print('enclaves:', enclaves)
484
+print(('targetnode:', targetnode))
485
+print(('enclaves:', enclaves))
486
487
if len(appcfgfn) > 0:
488
with open(appcfgfn) as f:
489
appcfg = json.load(f)
490
- for k in appcfg.keys():
491
+ for k in list(appcfg.keys()):
492
cfg[k] = appcfg[k]
493
494
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
495
- print("Please double check %s: appname or appsamples tags" % appcfgfn)
496
+ if not ('appname' in cfg and 'appsamples' in cfg):
497
+ print(("Please double check %s: appname or appsamples tags" % appcfgfn))
498
sys.exit(1)
499
500
501
502
try:
503
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
504
except:
505
- print('unable to open', cfg["outputfn"])
506
+ print(('unable to open', cfg["outputfn"]))
507
508
509
#if not fakemode:
510
511
params['enclaves'] = enclaves
512
513
if sys.version_info[0] < 3:
514
- import Tkinter
515
+ import tkinter
516
#from Tkinter import *
517
- import tkFileDialog
518
- import tkFont
519
- from tkFont import Font
520
+ import tkinter.filedialog
521
+ import tkinter.font
522
+ from tkinter.font import Font
523
#from Tkinter.FileDialog import askopenfilename
524
else:
525
import tkinter
526
527
def __init__(self, master, row=2, col=3):
528
529
# Create a container
530
- self.frame = Tkinter.Frame(master,width=200,height=100)
531
+ self.frame = tkinter.Frame(master,width=200,height=100)
532
# Create 2 buttons
533
#self.button_left = Tkinter.Button(frame,text="< Decrease Slope",
534
# command=self.decrease)
535
536
self.nbsamples = params['cfg']['nbsamples']
537
#self.nbcvars = params['cfg']['nbcvars']
538
self.listmetrics = params['cfg']['metrics']
539
- print("self.listmetrics",self.listmetrics)
540
+ print(("self.listmetrics",self.listmetrics))
541
#self.listsamples = params['cfg']['appsamples']
542
self.nbGraphs = params['cfg']['nbgraphs']
543
544
545
self.btncvarsupdate = None
546
547
self.metrics = params['cfg']['metrics']
548
- print("self.metrics", self.metrics)
549
+ print(("self.metrics", self.metrics))
550
#self.ranks = params['cfg']['ranks']
551
self.ranks = [None] * self.nbsamples
552
self.procs = [None] * self.nbsamples
553
554
self.listFontFamily = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
555
self.listFontWeight = ['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black']
556
557
- self.list_fonts = list( tkFont.families() )
558
+ self.list_fonts = list( tkinter.font.families() )
559
560
self.selectedFontPolicy = None
561
self.selectedFontSize = None
562
self.selectedFontWeight = None
563
564
# create a custom font
565
- self.customFont = tkFont.Font(family="Helvetica", size=12)
566
+ self.customFont = tkinter.font.Font(family="Helvetica", size=12)
567
568
for idx in range(params['cfg']['nbgraphs']):
569
self.listUsedGraphs.append(-1)
570
571
try:
572
root.config(menu=menubar)
573
except AttributeError as attErr:
574
- print('menu Exception: ', type(attErr), attErr)
575
+ print(('menu Exception: ', type(attErr), attErr))
576
577
#self.winPvars()
578
#self.winCvars()
579
580
self.subSpawn()
581
582
def try_execute(self, c, statement, parameters=None):
583
- print ("statement: ", statement)
584
- print ("parameters: ", parameters)
585
+ print(("statement: ", statement))
586
+ print(("parameters: ", parameters))
587
try:
588
if parameters:
589
c.execute(statement,parameters);
590
else:
591
c.execute(statement);
592
except sqlite3.Error as e:
593
- print("database error...", e.args[0])
594
+ print(("database error...", e.args[0]))
595
596
def open_connection(self):
597
global conn
598
# check for file to exist
599
#print ("Checking for file: ", sqlite_file)
600
- print("Checking for file: ", self.sosdbfile)
601
+ print(("Checking for file: ", self.sosdbfile))
602
while not os.path.exists(self.sosdbfile):
603
- print("Waiting on file: ", self.sosdbfile)
604
+ print(("Waiting on file: ", self.sosdbfile))
605
time.sleep(1)
606
607
#print("Connecting to: ", sqlite_file)
608
- print("Connecting to: ", self.sosdbfile)
609
+ print(("Connecting to: ", self.sosdbfile))
610
# Connecting to the database file
611
#conn = sqlite3.connect(sqlite_file)
612
#fd = os.open(sqlite_file, os.O_RDONLY)
613
614
615
def browsefontpolicy(self):
616
print('browsefontpolicy')
617
- fontpolicydiag = tkFileDialog.askopenfilename(filetypes=[("Text files","*.fft")])
618
+ fontpolicydiag = tkinter.filedialog.askopenfilename(filetypes=[("Text files","*.fft")])
619
620
def onselectFontPolicy(self,evt):
621
w = evt.widget
622
selection = w.curselection()
623
value = w.get(selection[0])
624
self.selectedFontPolicy = value
625
- print('select font: ', value)
626
+ print(('select font: ', value))
627
628
def onselectFontSize(self, evt):
629
print('select font size')
630
631
selection = w.curselection()
632
value = w.get(selection[0])
633
self.selectedFontSize = value
634
- print('select font: ', value)
635
+ print(('select font: ', value))
636
637
def onselectFontWeight(self, evt):
638
print('select font weight')
639
640
selection = w.curselection()
641
value = w.get(selection[0])
642
self.selectedFontWeight = value
643
- print('select font: ', value)
644
+ print(('select font: ', value))
645
646
def loadFontPolicy(self):
647
fontpolicydiag = askopenfilename(filetypes=(("*.fft")))
648
649
#f1 = Tk.Frame(pvarswin,width=150,height=100)
650
s1 = Tk.Scrollbar(self.f1)
651
#l1 = Tk.Listbox(f1,selectmode='multiple',width=80,height=40)
652
- print("self.listmetrics",self.listmetrics)
653
- print("len(self.listmetrics)",len(self.listmetrics))
654
- print("self.nbsamples",self.nbsamples)
655
+ print(("self.listmetrics",self.listmetrics))
656
+ print(("len(self.listmetrics)",len(self.listmetrics)))
657
+ print(("self.nbsamples",self.nbsamples))
658
for i in range(self.nbsamples): self.l1.insert(i, self.listmetrics[i])
659
s1.config(command = self.l1.yview)
660
self.l1.config(yscrollcommand = s1.set)
661
662
663
def clearplot(self,idxGraph):
664
665
- print('clearplot: idxGraph=', idxGraph)
666
+ print(('clearplot: idxGraph=', idxGraph))
667
ax = self.ax[idxGraph]
668
ax.cla()
669
#ax.clf()
670
671
total_val=total_val+ref4
672
num_vals=num_vals+1
673
mean_val=total_val/num_vals
674
- print('display record ref4='+str(ref4))
675
+ print(('display record ref4='+str(ref4)))
676
self.data_lr[i].add(t,ref4)
677
#self.data_lr[i].add(t,mean_val)
678
goodrecord=1
679
680
try:
681
ax.cla()
682
except Exception as errCla:
683
- print('update_gui: Error cla(): ', type(errCla), errCla)
684
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
685
686
ax.set_xlim([t-gxsec, t])
687
#print 'get x and y'
688
689
690
#print 'parse graphs'
691
metric_value = max(sample[1],0)
692
- print("metric_value",metric_value)
693
+ print(("metric_value",metric_value))
694
numeric = re.search(r'\d+', metric_value)
695
metric_value_num = numeric.group()
696
metric_value_float = float(metric_value_num)
697
698
try:
699
ax.cla()
700
except Exception as errCla:
701
- print('update_gui: Error cla(): ', type(errCla), errCla)
702
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
703
704
ax.set_xlim([pack_time-gxsec, pack_time])
705
#print 'get x and y'
706
707
try:
708
ax.cla()
709
except Exception as errCla:
710
- print('update_gui: Error cla(): ', type(errCla), errCla)
711
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
712
713
ax.set_xlim([pack_time-gxsec, pack_time])
714
#print 'get x and y'
715
716
try:
717
j = json.loads(payload)
718
except ValueError as e:
719
- print('Failed to load json data: %s' %e)
720
+ print(('Failed to load json data: %s' %e))
721
continue
722
#return False
723
724
725
if 'node' not in e and\
726
'sample' not in e and\
727
'time' not in e:
728
- print('Ignore this invalid sample:', json.dumps(e))
729
+ print(('Ignore this invalid sample:', json.dumps(e)))
730
continue
731
732
#print 'set timestamp'
733
734
try:
735
self.canvas.draw()
736
except Exception as errDraw:
737
- print('Error drawing canvas: ', type(errDraw), errDraw)
738
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
739
#plt.draw()
740
741
profile_t7 = time.time()
742
743
all_rows = c.fetchall()
744
ts = np.array([x[0] for x in all_rows])
745
min_timestamp = ts[0]
746
- print("min timestamp: ", min_timestamp)
747
+ print(("min timestamp: ", min_timestamp))
748
749
750
def get_min_timestamp(self):
751
752
sql_statement = ("SELECT min(time_pack) FROM viewCombined;")
753
print("get_min_timestamp Executing query")
754
755
- print("sql statement: ", sql_statement)
756
+ print(("sql statement: ", sql_statement))
757
#self.try_execute(c, sql_statement)
758
os.environ['SOS_SQL'] = sql_statement
759
sos_bin_path = os.environ.get('SOS_BIN_DIR')
760
- print('SOS BIN path: ', sos_bin_path)
761
+ print(('SOS BIN path: ', sos_bin_path))
762
os.system('cd '+ sos_bin_path)
763
- print('current dir: ', os.getcwd())
764
+ print(('current dir: ', os.getcwd()))
765
# Redirect stdout of passed command into a string
766
767
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
768
- print('soscmd: ', soscmd)
769
+ print(('soscmd: ', soscmd))
770
tmp_res_min_ts_sql = subprocess.check_output(soscmd, shell=True)
771
772
#self.res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
773
- print('get min ts: tmp res sql=', tmp_res_min_ts_sql)
774
+ print(('get min ts: tmp res sql=', tmp_res_min_ts_sql))
775
res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
776
- print("List of result SQL MIN TS: ", res_min_ts_sql)
777
+ print(("List of result SQL MIN TS: ", res_min_ts_sql))
778
min_ts_rows = res_min_ts_sql[1].split(",")
779
- print("List of result SQL MIN TS values: ", min_ts_rows)
780
+ print(("List of result SQL MIN TS values: ", min_ts_rows))
781
# Remove first element of SQL result
782
#ts = np.array([x[0] for x in min_ts_rows])
783
str_min_timestamp = min_ts_rows[0].replace('\"', '')
784
785
sql_statement = ("SELECT value_name, value, time_pack FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
786
#sql_statement = ("SELECT * FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
787
788
- print("sql statement: ", sql_statement )
789
+ print(("sql statement: ", sql_statement ))
790
#self.try_execute(c, sql_statement)
791
os.environ['SOS_SQL'] = sql_statement
792
sos_bin_path = os.environ.get('SOS_BIN_DIR')
793
- print('SOS BIN path: ', sos_bin_path)
794
+ print(('SOS BIN path: ', sos_bin_path))
795
os.system('cd '+ sos_bin_path)
796
- print('current dir: ', os.getcwd())
797
+ print(('current dir: ', os.getcwd()))
798
# Redirect stdout of passed command into a string
799
800
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
801
- print('soscmd: ', soscmd)
802
+ print(('soscmd: ', soscmd))
803
tmp_res_sql = subprocess.check_output(soscmd, shell=True)
804
805
self.try_execute(c, sql_statement)
806
807
#print 'stdout of SOS demo: ', sys.stdout
808
#self.res_sql = resultstdout.getvalue()
809
- print('tmp res_sql: ', tmp_res_sql)
810
+ print(('tmp res_sql: ', tmp_res_sql))
811
812
self.res_sql = tmp_res_sql.splitlines()
813
# REmove first element of SQL result
814
self.res_sql.pop(0)
815
816
for item_sql in self.res_sql:
817
- print('res sql: ', item_sql)
818
+ print(('res sql: ', item_sql))
819
820
821
# Call demo with SQL statement given as argument and store standard output
822
823
while self.ranks.size == 0:
824
time.sleep(1)
825
self.ranks,self.procs = self.get_ranks(self.conn)
826
- print ("ranks: ", self.ranks)
827
+ print(("ranks: ", self.ranks))
828
829
# get the number of nodes
830
self.nodes,self.noderanks = self.get_nodes(self.conn)
831
while self.nodes.size == 0:
832
time.sleep(1)
833
nodes,self.noderanks = self.get_nodes(self.conn)
834
- print ("nodes: ", self.nodes)
835
+ print(("nodes: ", self.nodes))
836
837
self.get_min_timestamp_db(self.conn)
838
#resize the figure
839
840
print('SOS: Execute demo app')
841
sos_path = os.environ.get('SOS_BUILD_DIR')
842
self.sos_bin_path = sos_path+"/bin"
843
- print('SOS BIN PATH: ', self.sos_bin_path)
844
+ print(('SOS BIN PATH: ', self.sos_bin_path))
845
os.system("cd "+ self.sos_bin_path)
846
847
848
849
850
self.opendb()
851
852
- print("metrics: ", self.metrics)
853
+ print(("metrics: ", self.metrics))
854
#self.get_min_timestamp()
855
856
while True:
857
858
countsamples = 0
859
for sample in self.rows[j]:
860
params['ts'] = 0
861
- print 'PYCOOLR sample: ', sample
862
+ print('PYCOOLR sample: ', sample)
863
#self.req_sql(self.conn, self.ranks, self.rows)
864
profile_t2 = time.time()
865
self.lock.acquire()
866
867
if self.listRecordSample[i] != -1:
868
j = self.listRecordSample[i]
869
870
- print('readsosmetrics: i=%d, j=%d' %(i,j))
871
+ print(('readsosmetrics: i=%d, j=%d' %(i,j)))
872
873
#rank = self.ranks[j]
874
#rank2 = self.ranks2[j]
875
876
self.rows[j] = self.conn.fetchall()
877
#print 'rows: ', self.rows[j]
878
if len(self.rows[j]) <= 0:
879
- print("Error: query returned no rows.",)
880
+ print(("Error: query returned no rows.",))
881
else:
882
goodrecord = 1
883
884
885
886
887
payload.strip()
888
- print('payload =',payload)
889
+ print(('payload =',payload))
890
try:
891
j = json.loads(payload)
892
except ValueError as e:
893
- print('Failed to load json data: %s' %e)
894
+ print(('Failed to load json data: %s' %e))
895
continue
896
#return False
897
898
899
if 'node' not in e and\
900
'sample' not in e and\
901
'time' not in e:
902
- print('Ignore this invalid sample:', json.dumps(e))
903
+ print(('Ignore this invalid sample:', json.dumps(e)))
904
continue
905
906
#print 'set timestamp'
907
908
print('subSpawn: load beacon subscriber library')
909
envlibpath = os.environ['PYCOOLR_LIBPATH']
910
libarbjsonbeep = cdll.LoadLibrary(envlibpath+'/libarbitraryjsonbeepmulsub.so')
911
- thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
912
- thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
913
+ _thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
914
+ _thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
915
#thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
916
except Exception as errThread:
917
- print("Error: unable to start thread: ", errThread)
918
+ print(("Error: unable to start thread: ", errThread))
919
920
elif self.tool == "sos":
921
try:
922
#thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
923
- thread.start_new_thread(self.readsosmetrics,())
924
+ _thread.start_new_thread(self.readsosmetrics,())
925
#thread.start_new_thread(self.readsosmetrics_db,())
926
927
except Exception as errThread:
928
- print('Error: unable to start thread: ', errThread)
929
+ print(('Error: unable to start thread: ', errThread))
930
931
932
self.refresh_plot()
933
934
935
self.selectedcvarsvalues[i] = self.listcvarsentry[i].get()
936
strcvarsvalues += self.selectedcvarsvalues[i]
937
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
938
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
939
if i+1 < self.numselectedcvars:
940
strcvarsmetrics += ","
941
strcvarsvalues += ","
942
943
#self.strcvars += "="
944
#self.strcvars += self.selectedcvarsvalues[i]
945
#strcvars += ","
946
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
947
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
948
if i+1 < self.numselectedcvars:
949
strcvarsmetrics += ","
950
strcvarsvalues += ","
951
952
#self.strcvars += ":"
953
self.strcvars += strcvarsvalues
954
955
- print("strcvarsmetrics: ", strcvarsmetrics)
956
- print("strcvarsvalues: ", strcvarsvalues)
957
- print("strcvars: ", self.strcvars)
958
+ print(("strcvarsmetrics: ", strcvarsmetrics))
959
+ print(("strcvarsvalues: ", strcvarsvalues))
960
+ print(("strcvars: ", self.strcvars))
961
962
# Test if we have to communicate MPI_T CVARS in a Publish/Subscribe mode
963
if cvars_comm_mode == "pub":
964
965
966
for i in range(len(selection)):
967
value = w.get(selection[i])
968
- print("selection:", selection, ": '%s'" % value)
969
+ print(("selection:", selection, ": '%s'" % value))
970
self.selectedcvarsmetrics[i] = value
971
972
if self.listlabelcvarsmetric:
973
974
self.listcvarsarrayindexentry = [None] * len(selection)
975
self.listcvarsarrayindex = [None] * len(selection)
976
977
- print('selection: ', selection)
978
- print('range selection: ', range(len(selection)))
979
-
980
- for cvaritem, cvarindex in zip(selection, range(len(selection))):
981
+ print(('selection: ', selection))
982
+ print(('range selection: ', list(range(len(selection)))))
983
+
984
+ for cvaritem, cvarindex in zip(selection, list(range(len(selection)))):
985
986
value = w.get(selection[cvarindex])
987
- print('len selection: ', len(selection))
988
- print('value of item %d: %s ' % (cvarindex, value))
989
- print('cvaritem: ', cvaritem)
990
- print('cvarindex= ', cvarindex)
991
- print('cvarsindexrow= ', self.cvarsindexrow)
992
-
993
- print('cfg cvars array:', self.listcfgcvarsarray[0])
994
+ print(('len selection: ', len(selection)))
995
+ print(('value of item %d: %s ' % (cvarindex, value)))
996
+ print(('cvaritem: ', cvaritem))
997
+ print(('cvarindex= ', cvarindex))
998
+ print(('cvarsindexrow= ', self.cvarsindexrow))
999
+
1000
+ print(('cfg cvars array:', self.listcfgcvarsarray[0]))
1001
if value == self.listcfgcvarsarray[0]:
1002
1003
self.listlabelcvarsmetric[cvarindex]=Tk.Label(self.stepCvarsUpdate, text=value)
1004
1005
#print "selection:", selection, ": '%s'" % value
1006
1007
listintselection = [int (i) for i in selection]
1008
- print('listintselection: ', listintselection)
1009
+ print(('listintselection: ', listintselection))
1010
1011
for i in range(self.nbsamples):
1012
if (self.listSamplesAllocated[i] > -1) and (i not in listintselection):
1013
1014
if self.listSamplesAllocated[j] == -1:
1015
#index = int(j)
1016
self.listUsedGraphs[i] = j
1017
- print('graph %d allocated to sample %d' % (i, j))
1018
+ print(('graph %d allocated to sample %d' % (i, j)))
1019
self.listRecordSample[i] = j
1020
self.listSamplesAllocated[j] = i
1021
break
1022
1023
# Mark current graph as used
1024
self.listUsedGraphs[j] = 1
1025
# Record the current graph as plotting the current sample
1026
- print('Record Sample %d for graph %d' %(index,j))
1027
+ print(('Record Sample %d for graph %d' %(index,j)))
1028
self.listRecordSample[j] = index
1029
1030
# Mark current sample as allocated to the current graph
1031
1032
self.canvas.draw()
1033
#self.frame.update()
1034
except Exception as errDraw:
1035
- print('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw)
1036
+ print(('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw))
1037
self.lock.release()
1038
1039
self.root.after(1000,self.refresh_plot)
1040
1041
try:
1042
self.canvas.draw()
1043
except Exception as errDraw:
1044
- print('Error drawing canvas: ', type(errDraw), errDraw)
1045
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
1046
1047
def checkfn(self, idx, text):
1048
print('checkfn')
1049
- print('Check index=%d text=%s' % (idx,text))
1050
+ print(('Check index=%d text=%s' % (idx,text)))
1051
#print 'Size of listbtnchecked[]= ', len(self.listbtnchecked)
1052
#self.listbtnchecked[idx] = 1
1053
1054
1055
# print 'nothing'
1056
1057
1058
-root = Tkinter.Tk()
1059
+root = tkinter.Tk()
1060
1061
app = Coolrsub(root,2,3)
1062
root.mainloop()
1063
--- a/examples/sos/pycoolrgui/pycoolr-plot/coolr-launch.py (original)
1064
--- b/examples/sos/pycoolrgui/pycoolr-plot/coolr-launch.py (refactored)
1065
1066
try:
1067
opts, args = getopt.getopt(sys.argv[1:],
1068
shortopt, longopt)
1069
-except getopt.GetoptError, err:
1070
- print err
1071
+except getopt.GetoptError as err:
1072
+ print(err)
1073
usage()
1074
sys.exit(1)
1075
1076
1077
elif o in ("--ncols"):
1078
ocfg["ncols"]=int(a)
1079
elif o in ("--list"):
1080
- print ''
1081
- print '[available graph modules]'
1082
- print ''
1083
+ print('')
1084
+ print('[available graph modules]')
1085
+ print('')
1086
for i in cfg["modnames"]:
1087
- print i
1088
- print ''
1089
- print ''
1090
+ print(i)
1091
+ print('')
1092
+ print('')
1093
sys.exit(0)
1094
elif o in ("--mods"):
1095
ocfg["modnames"] = a.split(",")
1096
1097
if len(args) < 1:
1098
- print ''
1099
- print 'No config file is specified. Enabled the fake mode.'
1100
- print ''
1101
+ print('')
1102
+ print('No config file is specified. Enabled the fake mode.')
1103
+ print('')
1104
cfg["masternode"] = "frontend"
1105
cfg["drawexternal"] = "no"
1106
cfg["drawacpipwr"] = "no"
1107
1108
with open(cfgfn) as f:
1109
cfgtmp = json.load(f)
1110
# override if cfg defines any
1111
- for k in cfgtmp.keys():
1112
+ for k in list(cfgtmp.keys()):
1113
cfg[k] = cfgtmp[k]
1114
# override if specifed as cmd option
1115
- for k in ocfg.keys():
1116
+ for k in list(ocfg.keys()):
1117
cfg[k] = ocfg[k]
1118
1119
if len(targetnode) == 0 :
1120
targetnode = cfg['masternode']
1121
if len(enclaves) == 0:
1122
- if cfg.has_key('enclaves'):
1123
+ if 'enclaves' in cfg:
1124
enclaves = cfg['enclaves']
1125
1126
-print 'masternode:', cfg['masternode']
1127
-print 'targetnode:', targetnode
1128
-print 'enclaves:', enclaves
1129
+print('masternode:', cfg['masternode'])
1130
+print('targetnode:', targetnode)
1131
+print('enclaves:', enclaves)
1132
1133
if len(appcfgfn) > 0:
1134
with open(appcfgfn) as f:
1135
appcfg = json.load(f)
1136
- for k in appcfg.keys():
1137
+ for k in list(appcfg.keys()):
1138
cfg[k] = appcfg[k]
1139
1140
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
1141
- print "Please double check %s: appname or appsamples tags" % appcfgfn
1142
+ if not ('appname' in cfg and 'appsamples' in cfg):
1143
+ print("Please double check %s: appname or appsamples tags" % appcfgfn)
1144
sys.exit(1)
1145
1146
1147
1148
try:
1149
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
1150
except:
1151
- print 'unable to open', cfg["outputfn"]
1152
-
1153
-print >>logf, json.dumps(info)
1154
+ print('unable to open', cfg["outputfn"])
1155
+
1156
+print(json.dumps(info), file=logf)
1157
1158
#if not fakemode:
1159
# querycmds = cfg['querycmds']
1160
1161
1162
1163
if sys.version_info[0] < 3:
1164
- import Tkinter as Tk
1165
+ import tkinter as Tk
1166
else:
1167
import tkinter as Tk
1168
1169
1170
#root.after(2000,mainLoop)
1171
1172
ngraphs = len(params['cfg']['appsamples'])
1173
-print 'samples ', params['cfg']['appsamples']
1174
+print('samples ', params['cfg']['appsamples'])
1175
data_lr = [listrotate2D(length=params['lrlen']) for i in range(ngraphs)]
1176
1177
#fig = plt.figure( figsize=(cfg["figwidth"],cfg["figheight"]) )
1178
--- a/examples/sos/pycoolrgui/pycoolr-plot/coolr-sos-db.py (original)
1179
--- b/examples/sos/pycoolrgui/pycoolr-plot/coolr-sos-db.py (refactored)
1180
1181
#!/usr/bin/env python
1182
1183
-import sys, os, re, thread, signal
1184
+import sys, os, re, _thread, signal
1185
import multiprocessing
1186
import json
1187
import sqlite3
1188
1189
try:
1190
opts, args = getopt.getopt(sys.argv[1:],
1191
shortopt, longopt)
1192
-except getopt.GetoptError, err:
1193
- print err
1194
+except getopt.GetoptError as err:
1195
+ print(err)
1196
usage()
1197
sys.exit(1)
1198
1199
1200
elif o in ("--ncols"):
1201
ocfg["ncols"]=int(a)
1202
elif o in ("--list"):
1203
- print ''
1204
- print '[available graph modules]'
1205
- print ''
1206
+ print('')
1207
+ print('[available graph modules]')
1208
+ print('')
1209
for i in cfg["modnames"]:
1210
- print i
1211
- print ''
1212
- print ''
1213
+ print(i)
1214
+ print('')
1215
+ print('')
1216
sys.exit(0)
1217
elif o in ("--mods"):
1218
ocfg["modnames"] = a.split(",")
1219
1220
1221
if len(args) < 1:
1222
- print ''
1223
- print 'No config file is specified. Enabled the fake mode.'
1224
- print ''
1225
+ print('')
1226
+ print('No config file is specified. Enabled the fake mode.')
1227
+ print('')
1228
cfg["masternode"] = "frontend"
1229
cfg["drawexternal"] = "no"
1230
cfg["drawacpipwr"] = "no"
1231
1232
with open(cfgfn) as f:
1233
cfgtmp = json.load(f)
1234
# override if cfg defines any
1235
- for k in cfgtmp.keys():
1236
+ for k in list(cfgtmp.keys()):
1237
cfg[k] = cfgtmp[k]
1238
# override if specifed as cmd option
1239
- for k in ocfg.keys():
1240
+ for k in list(ocfg.keys()):
1241
cfg[k] = ocfg[k]
1242
1243
1244
1245
targetnode = os.environ['PYCOOLR_NODE']
1246
#targetnode = cfg['masternode']
1247
if len(enclaves) == 0:
1248
- if cfg.has_key('enclaves'):
1249
+ if 'enclaves' in cfg:
1250
enclaves = cfg['enclaves']
1251
1252
#print 'masternode:', cfg['masternode']
1253
-print 'targetnode:', targetnode
1254
-print 'enclaves:', enclaves
1255
+print('targetnode:', targetnode)
1256
+print('enclaves:', enclaves)
1257
1258
if len(appcfgfn) > 0:
1259
with open(appcfgfn) as f:
1260
appcfg = json.load(f)
1261
- for k in appcfg.keys():
1262
+ for k in list(appcfg.keys()):
1263
cfg[k] = appcfg[k]
1264
1265
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
1266
- print "Please double check %s: appname or appsamples tags" % appcfgfn
1267
+ if not ('appname' in cfg and 'appsamples' in cfg):
1268
+ print("Please double check %s: appname or appsamples tags" % appcfgfn)
1269
sys.exit(1)
1270
1271
1272
1273
try:
1274
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
1275
except:
1276
- print 'unable to open', cfg["outputfn"]
1277
+ print('unable to open', cfg["outputfn"])
1278
1279
1280
#if not fakemode:
1281
1282
params['enclaves'] = enclaves
1283
1284
if sys.version_info[0] < 3:
1285
- import Tkinter
1286
+ import tkinter
1287
#from Tkinter import *
1288
- import tkFileDialog
1289
- import tkFont
1290
- from tkFont import Font
1291
+ import tkinter.filedialog
1292
+ import tkinter.font
1293
+ from tkinter.font import Font
1294
#from Tkinter.FileDialog import askopenfilename
1295
else:
1296
import tkinter
1297
1298
def __init__(self, master, row=2, col=3):
1299
1300
# Create a container
1301
- self.frame = Tkinter.Frame(master,width=200,height=100)
1302
+ self.frame = tkinter.Frame(master,width=200,height=100)
1303
# Create 2 buttons
1304
#self.button_left = Tkinter.Button(frame,text="< Decrease Slope",
1305
# command=self.decrease)
1306
1307
self.listFontFamily = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
1308
self.listFontWeight = ['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black']
1309
1310
- self.list_fonts = list( tkFont.families() )
1311
+ self.list_fonts = list( tkinter.font.families() )
1312
1313
self.selectedFontPolicy = None
1314
self.selectedFontSize = None
1315
self.selectedFontWeight = None
1316
1317
# create a custom font
1318
- self.customFont = tkFont.Font(family="Helvetica", size=12)
1319
+ self.customFont = tkinter.font.Font(family="Helvetica", size=12)
1320
1321
for idx in range(params['cfg']['nbgraphs']):
1322
self.listUsedGraphs.append(-1)
1323
1324
try:
1325
root.config(menu=menubar)
1326
except AttributeError as attErr:
1327
- print 'menu Exception: ', type(attErr), attErr
1328
+ print('menu Exception: ', type(attErr), attErr)
1329
1330
#self.winPvars()
1331
#self.winCvars()
1332
1333
self.subSpawn(sos_db_file)
1334
1335
def try_execute(self, c, statement, parameters=None):
1336
- print ("statement: ", statement)
1337
- print ("parameters: ", parameters)
1338
+ print(("statement: ", statement))
1339
+ print(("parameters: ", parameters))
1340
try:
1341
if parameters:
1342
c.execute(statement,parameters);
1343
else:
1344
c.execute(statement);
1345
except sqlite3.Error as e:
1346
- print("database error.....", e.args[0])
1347
+ print(("database error.....", e.args[0]))
1348
1349
def open_connection(self,sqlite_file):
1350
global conn
1351
# check for file to exist
1352
#print ("Checking for file: ", sqlite_file)
1353
- print ("Checking for file: ", "sosd.00000.db")
1354
+ print(("Checking for file: ", "sosd.00000.db"))
1355
while not os.path.exists("sosd.00000.db"):
1356
- print ("Waiting on file: ", sqlite_file)
1357
+ print(("Waiting on file: ", sqlite_file))
1358
time.sleep(1)
1359
1360
#print("Connecting to: ", sqlite_file)
1361
- print("Connecting to: ", "sosd.00000.db")
1362
+ print(("Connecting to: ", "sosd.00000.db"))
1363
# Connecting to the database file
1364
#conn = sqlite3.connect(sqlite_file)
1365
#fd = os.open(sqlite_file, os.O_RDONLY)
1366
1367
return c
1368
1369
def btnfontsupdate(self):
1370
- print 'Update font'
1371
+ print('Update font')
1372
if self.selectedFontPolicy or self.selectedFontSize or self.selectedFontWeight:
1373
matplotlib.rcParams.update({'font.size': self.selectedFontSize, 'font.family': self.selectedFontPolicy})
1374
#self.customFont.configure(family=self.selectedFontPolicy)
1375
1376
matplotlib.rc('font', **font)
1377
1378
def ckbtnFontBold(self):
1379
- print 'Bold selected'
1380
+ print('Bold selected')
1381
1382
def ckbtnFontItalic(self):
1383
- print 'Italic selected'
1384
+ print('Italic selected')
1385
1386
def ckbtnFontUnderline(self):
1387
- print 'Underline selected'
1388
+ print('Underline selected')
1389
1390
def browsefontpolicy(self):
1391
- print 'browsefontpolicy'
1392
- fontpolicydiag = tkFileDialog.askopenfilename(filetypes=[("Text files","*.fft")])
1393
+ print('browsefontpolicy')
1394
+ fontpolicydiag = tkinter.filedialog.askopenfilename(filetypes=[("Text files","*.fft")])
1395
1396
def onselectFontPolicy(self,evt):
1397
w = evt.widget
1398
selection = w.curselection()
1399
value = w.get(selection[0])
1400
self.selectedFontPolicy = value
1401
- print 'select font: ', value
1402
+ print('select font: ', value)
1403
1404
def onselectFontSize(self, evt):
1405
- print 'select font size'
1406
+ print('select font size')
1407
w = evt.widget
1408
selection = w.curselection()
1409
value = w.get(selection[0])
1410
self.selectedFontSize = value
1411
- print 'select font: ', value
1412
+ print('select font: ', value)
1413
1414
def onselectFontWeight(self, evt):
1415
- print 'select font weight'
1416
+ print('select font weight')
1417
w = evt.widget
1418
selection = w.curselection()
1419
value = w.get(selection[0])
1420
self.selectedFontWeight = value
1421
- print 'select font: ', value
1422
+ print('select font: ', value)
1423
1424
def loadFontPolicy(self):
1425
fontpolicydiag = askopenfilename(filetypes=(("*.fft")))
1426
1427
#def cvarsmenu(self):
1428
1429
def fontmenu(self):
1430
- print 'nothing'
1431
+ print('nothing')
1432
1433
self.paramswin = Tk.Tk()
1434
self.paramswin.title("Fonts: family, size and weight")
1435
1436
1437
def clearplot(self,idxGraph):
1438
1439
- print 'clearplot: idxGraph=', idxGraph
1440
+ print('clearplot: idxGraph=', idxGraph)
1441
ax = self.ax[idxGraph]
1442
ax.cla()
1443
#ax.clf()
1444
1445
total_val=total_val+ref4
1446
num_vals=num_vals+1
1447
mean_val=total_val/num_vals
1448
- print 'display record ref4='+str(ref4)
1449
+ print('display record ref4='+str(ref4))
1450
self.data_lr[i].add(t,ref4)
1451
#self.data_lr[i].add(t,mean_val)
1452
goodrecord=1
1453
1454
if goodrecord==0:
1455
- print 'bad record'
1456
+ print('bad record')
1457
return
1458
1459
def updategui3(self, params, rows):
1460
1461
total_val=0
1462
num_vals=0
1463
1464
- print '[PYCOOLR] Starting update gui'
1465
+ print('[PYCOOLR] Starting update gui')
1466
#if sample['node'] == params['targetnode'] and sample['sample'] == 'tau':
1467
#
1468
# data handling
1469
1470
1471
#print("Fetching rows.")
1472
rows[j] = self.conn.fetchall()
1473
- print 'row: ', rows[j]
1474
+ print('row: ', rows[j])
1475
if len(rows[j]) <= 0:
1476
- print("Error: query returned no rows.",)
1477
+ print(("Error: query returned no rows.",))
1478
else:
1479
goodrecord = 1
1480
1481
1482
try:
1483
ax.cla()
1484
except Exception as errCla:
1485
- print 'update_gui: Error cla(): ', type(errCla), errCla
1486
+ print('update_gui: Error cla(): ', type(errCla), errCla)
1487
1488
#ax.set_xlim([t-gxsec, t])
1489
#print 'get x and y'
1490
1491
try:
1492
ax.cla()
1493
except Exception as errCla:
1494
- print 'update_gui: Error cla(): ', type(errCla), errCla
1495
+ print('update_gui: Error cla(): ', type(errCla), errCla)
1496
1497
ax.set_xlim([pack_time-gxsec, pack_time])
1498
#print 'get x and y'
1499
1500
graphs = [None, None, None, None, None, None]
1501
axises = [None, None, None, None, None, None]
1502
1503
- print '[PYCOOLR] Starting update gui'
1504
+ print('[PYCOOLR] Starting update gui')
1505
#if sample['node'] == params['targetnode'] and sample['sample'] == 'tau':
1506
#
1507
# data handling
1508
1509
1510
#print("Fetching rows.")
1511
rows[j] = self.conn.fetchall()
1512
- print 'rows: ', rows[j]
1513
+ print('rows: ', rows[j])
1514
if len(rows[j]) <= 0:
1515
- print("Error: query returned no rows.",)
1516
+ print(("Error: query returned no rows.",))
1517
else:
1518
goodrecord = 1
1519
1520
1521
graph = {}
1522
1523
if newplot:
1524
- print 'newplot True'
1525
+ print('newplot True')
1526
axis = pl.subplot(1)
1527
axis.set_title("Time per iteration");
1528
graph[r] = (pl.plot(pack_time, metric_values, marker='*', linestyle='-', label=str(r))[0])
1529
1530
pl.ylabel("Seconds")
1531
pl.xlabel("Timestamp")
1532
else:
1533
- print 'newplot False'
1534
+ print('newplot False')
1535
#axis = pl.subplot(321)
1536
graph[r].set_data(pack_time, metric_values)
1537
axis.relim() # Recalculate limits
1538
1539
try:
1540
ax.cla()
1541
except Exception as errCla:
1542
- print 'update_gui: Error cla(): ', type(errCla), errCla
1543
+ print('update_gui: Error cla(): ', type(errCla), errCla)
1544
1545
ax.set_xlim([t-gxsec, t])
1546
#print 'get x and y'
1547
1548
1549
def subscribe(self,libarbjsonbeep):
1550
1551
- print 'start thread with Subscribe'
1552
+ print('start thread with Subscribe')
1553
1554
listargs = ['MEMORY','NODE_POWER_WATTS','MPI_T_PVAR']
1555
1556
1557
1558
def publish(self,libarbpubcvars):
1559
1560
- print 'start thread with Publish'
1561
+ print('start thread with Publish')
1562
1563
#listargs = ['MEMORY','NODE_POWER_WATTS','MPI_T_PVAR']
1564
1565
1566
payload += resultPayload[j]
1567
1568
payload.strip()
1569
- print 'payload =',payload
1570
+ print('payload =',payload)
1571
try:
1572
j = json.loads(payload)
1573
except ValueError as e:
1574
- print 'Failed to load json data: %s' %e
1575
+ print('Failed to load json data: %s' %e)
1576
continue
1577
#return False
1578
1579
1580
if 'node' not in e and\
1581
'sample' not in e and\
1582
'time' not in e:
1583
- print 'Ignore this invalid sample:', json.dumps(e)
1584
+ print('Ignore this invalid sample:', json.dumps(e))
1585
continue
1586
1587
#print 'set timestamp'
1588
1589
#print 'finished parsing listEvents'
1590
#draw to refresh plotting
1591
#layout.canvas.draw()
1592
- print 'draw canvas'
1593
+ print('draw canvas')
1594
try:
1595
self.canvas.draw()
1596
except Exception as errDraw:
1597
- print 'Error drawing canvas: ', type(errDraw), errDraw
1598
+ print('Error drawing canvas: ', type(errDraw), errDraw)
1599
#plt.draw()
1600
1601
profile_t7 = time.time()
1602
1603
all_rows = c.fetchall()
1604
ts = np.array([x[0] for x in all_rows])
1605
min_timestamp = ts[0]
1606
- print("min timestamp: ", min_timestamp)
1607
+ print(("min timestamp: ", min_timestamp))
1608
1609
def req_sql(self, c, ranks, ranks2, group_column, metric):
1610
- print 'req_sql entering'
1611
+ print('req_sql entering')
1612
for r in ranks:
1613
sql_statement = ("SELECT distinct tbldata.name, tblvals.val, tblvals.time_pack, tblpubs.comm_rank FROM tblvals INNER JOIN tbldata ON tblvals.guid = tbldata.guid INNER JOIN tblpubs ON tblpubs.guid = tbldata.pub_guid WHERE tblvals.guid IN (SELECT guid FROM tbldata WHERE tbldata.name LIKE '" + metric + "') AND tblpubs." + group_column)
1614
"""
1615
1616
sql_statement = (sql_statement + " like '" + r + "' and tblvals.val > 0 order by tblvals.time_pack;")
1617
1618
#params = [metric,r]
1619
- print "Executing query: ", sql_statement,
1620
+ print("Executing query: ", sql_statement, end=' ')
1621
self.try_execute(c, sql_statement)
1622
- print "Done. "
1623
+ print("Done. ")
1624
1625
#print("Fetching rows.")
1626
all_rows = c.fetchall()
1627
if len(all_rows) <= 0:
1628
- print("Error: query returned no rows.",)
1629
- print(sql_statement, params)
1630
+ print(("Error: query returned no rows.",))
1631
+ print((sql_statement, params))
1632
1633
#print("Making numpy array of: metric_values")
1634
metric_values = np.array([max(x[1],0) for x in all_rows])
1635
1636
while self.ranks.size == 0:
1637
time.sleep(1)
1638
self.ranks,self.procs = self.get_ranks(self.conn)
1639
- print ("ranks: ", self.ranks)
1640
+ print(("ranks: ", self.ranks))
1641
1642
# get the number of nodes
1643
self.nodes,self.noderanks = self.get_nodes(self.conn)
1644
while self.nodes.size == 0:
1645
time.sleep(1)
1646
nodes,self.noderanks = self.get_nodes(self.conn)
1647
- print ("nodes: ", self.nodes)
1648
+ print(("nodes: ", self.nodes))
1649
1650
self.get_min_timestamp(self.conn)
1651
#resize the figure
1652
1653
1654
def readsosmetrics(self,arguments):
1655
1656
- print 'readsosmetrics'
1657
+ print('readsosmetrics')
1658
profile_t1 = time.time()
1659
self.opendb(arguments)
1660
1661
- print 'after opening db, read db and plot ....'
1662
+ print('after opening db, read db and plot ....')
1663
1664
while True:
1665
1666
1667
if self.listRecordSample[i] != -1:
1668
j = self.listRecordSample[i]
1669
1670
- print 'readsosmetrics: i=%d, j=%d' %(i,j)
1671
+ print('readsosmetrics: i=%d, j=%d' %(i,j))
1672
1673
#rank = self.ranks[j]
1674
#rank2 = self.ranks2[j]
1675
1676
self.rows[j] = self.conn.fetchall()
1677
#print 'rows: ', self.rows[j]
1678
if len(self.rows[j]) <= 0:
1679
- print("Error: query returned no rows.",)
1680
+ print(("Error: query returned no rows.",))
1681
else:
1682
goodrecord = 1
1683
1684
1685
1686
def readEvents(self,libarbjsonbeep):
1687
1688
- print '[PYCOOLR] readEvents begin'
1689
+ print('[PYCOOLR] readEvents begin')
1690
1691
low_index = 0
1692
high_index = 0
1693
1694
try:
1695
j = json.loads(payload)
1696
except ValueError as e:
1697
- print 'Failed to load json data: %s' %e
1698
+ print('Failed to load json data: %s' %e)
1699
continue
1700
#return False
1701
1702
1703
if 'node' not in e and\
1704
'sample' not in e and\
1705
'time' not in e:
1706
- print 'Ignore this invalid sample:', json.dumps(e)
1707
+ print('Ignore this invalid sample:', json.dumps(e))
1708
continue
1709
1710
#print 'set timestamp'
1711
1712
1713
def subSpawn(self,arguments):
1714
1715
- print 'subSpawn: load beacon subscriber library'
1716
+ print('subSpawn: load beacon subscriber library')
1717
envlibpath = os.environ['PYCOOLR_LIBPATH']
1718
libarbjsonbeep = cdll.LoadLibrary(envlibpath+'/libarbitraryjsonbeepmulsub.so')
1719
1720
1721
1722
try:
1723
#thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
1724
- thread.start_new_thread(self.readsosmetrics,(arguments,))
1725
+ _thread.start_new_thread(self.readsosmetrics,(arguments,))
1726
1727
except Exception as errThread:
1728
- print 'Error: unable to start thread: ', errThread
1729
+ print('Error: unable to start thread: ', errThread)
1730
1731
self.refresh_plot()
1732
#self.readEvents(libarbjsonbeep)
1733
1734
#print "selection:", selection, ": '%s'" % value
1735
1736
listintselection = [int (i) for i in selection]
1737
- print 'listintselection: ', listintselection
1738
+ print('listintselection: ', listintselection)
1739
1740
for i in range(self.nbsamples):
1741
if (self.listSamplesAllocated[i] > -1) and (i not in listintselection):
1742
1743
if self.listSamplesAllocated[j] == -1:
1744
#index = int(j)
1745
self.listUsedGraphs[i] = j
1746
- print 'graph %d allocated to sample %d' % (i, j)
1747
+ print('graph %d allocated to sample %d' % (i, j))
1748
self.listRecordSample[i] = j
1749
self.listSamplesAllocated[j] = i
1750
break
1751
1752
# Mark current graph as used
1753
self.listUsedGraphs[j] = 1
1754
# Record the current graph as plotting the current sample
1755
- print 'Record Sample %d for graph %d' %(index,j)
1756
+ print('Record Sample %d for graph %d' %(index,j))
1757
self.listRecordSample[j] = index
1758
1759
# Mark current sample as allocated to the current graph
1760
1761
self.canvas.draw()
1762
#self.frame.update()
1763
except Exception as errDraw:
1764
- print 'refresh_plot: Error drawing canvas: ', type(errDraw), errDraw
1765
+ print('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw)
1766
self.lock.release()
1767
1768
self.root.after(1000,self.refresh_plot)
1769
1770
def updatebtn(self):
1771
- print 'update buttonupdate button'
1772
+ print('update buttonupdate button')
1773
try:
1774
self.canvas.draw()
1775
except Exception as errDraw:
1776
- print 'Error drawing canvas: ', type(errDraw), errDraw
1777
+ print('Error drawing canvas: ', type(errDraw), errDraw)
1778
1779
def checkfn(self, idx, text):
1780
- print 'checkfn'
1781
- print 'Check index=%d text=%s' % (idx,text)
1782
+ print('checkfn')
1783
+ print('Check index=%d text=%s' % (idx,text))
1784
#print 'Size of listbtnchecked[]= ', len(self.listbtnchecked)
1785
#self.listbtnchecked[idx] = 1
1786
1787
1788
# print 'nothing'
1789
1790
1791
-root = Tkinter.Tk()
1792
+root = tkinter.Tk()
1793
1794
app = Coolrsub(root,2,3)
1795
root.mainloop()
1796
--- a/examples/sos/pycoolrgui/pycoolr-plot/coolr.py (original)
1797
--- b/examples/sos/pycoolrgui/pycoolr-plot/coolr.py (refactored)
1798
1799
#!/usr/bin/env python
1800
1801
#import sys, os, re, _thread, signal
1802
-import sys, os, re, thread, signal
1803
+import sys, os, re, _thread, signal
1804
#from cStringIO import StringIO
1805
from io import StringIO
1806
import subprocess
1807
1808
with open(cfgfn) as f:
1809
cfgtmp = json.load(f)
1810
# override if cfg defines any
1811
- for k in cfgtmp.keys():
1812
+ for k in list(cfgtmp.keys()):
1813
cfg[k] = cfgtmp[k]
1814
# override if specifed as cmd option
1815
- for k in ocfg.keys():
1816
+ for k in list(ocfg.keys()):
1817
cfg[k] = ocfg[k]
1818
1819
1820
1821
targetnode = os.environ['PYCOOLR_NODE']
1822
#targetnode = cfg['masternode']
1823
if len(enclaves) == 0:
1824
- if cfg.has_key('enclaves'):
1825
+ if 'enclaves' in cfg:
1826
enclaves = cfg['enclaves']
1827
1828
#print 'masternode:', cfg['masternode']
1829
-print('targetnode:', targetnode)
1830
-print('enclaves:', enclaves)
1831
+print(('targetnode:', targetnode))
1832
+print(('enclaves:', enclaves))
1833
1834
if len(appcfgfn) > 0:
1835
with open(appcfgfn) as f:
1836
appcfg = json.load(f)
1837
- for k in appcfg.keys():
1838
+ for k in list(appcfg.keys()):
1839
cfg[k] = appcfg[k]
1840
1841
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
1842
- print("Please double check %s: appname or appsamples tags" % appcfgfn)
1843
+ if not ('appname' in cfg and 'appsamples' in cfg):
1844
+ print(("Please double check %s: appname or appsamples tags" % appcfgfn))
1845
sys.exit(1)
1846
1847
1848
1849
try:
1850
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
1851
except:
1852
- print('unable to open', cfg["outputfn"])
1853
+ print(('unable to open', cfg["outputfn"]))
1854
1855
1856
#if not fakemode:
1857
1858
params['enclaves'] = enclaves
1859
1860
if sys.version_info[0] < 3:
1861
- import Tkinter
1862
+ import tkinter
1863
#from Tkinter import *
1864
- import tkFileDialog
1865
- import tkFont
1866
- from tkFont import Font
1867
+ import tkinter.filedialog
1868
+ import tkinter.font
1869
+ from tkinter.font import Font
1870
#from Tkinter.FileDialog import askopenfilename
1871
else:
1872
import tkinter
1873
1874
def __init__(self, master, row=2, col=3):
1875
1876
# Create a container
1877
- self.frame = Tkinter.Frame(master,width=200,height=100)
1878
+ self.frame = tkinter.Frame(master,width=200,height=100)
1879
# Create 2 buttons
1880
#self.button_left = Tkinter.Button(frame,text="< Decrease Slope",
1881
# command=self.decrease)
1882
1883
sql_statement = ("SELECT MAX(frame) FROM tblVals;")
1884
self.try_execute(self.conn, sql_statement)
1885
query_result = self.conn.fetchall()
1886
- print("query_result", query_result[0][0])
1887
+ print(("query_result", query_result[0][0]))
1888
if type(query_result[0][0]) == int:
1889
frame = int(query_result[0][0])
1890
- print("frame", frame)
1891
+ print(("frame", frame))
1892
self.metricsDB = ""
1893
#Get the metric's names
1894
#sql_statement = ("SELECT distinct(value_name), comm_rank FROM viewCombined where frame > 0 ORDER BY value_name, comm_rank;")
1895
1896
params['cfg']['units'] = ["KB" if (metric[0].find("KB") > -1) else "counts" for metric in self.metrics]
1897
params['cfg']['units'] = ["#Events" if (metric[0].find("NumEvents") > -1) else units for metric,units in zip(self.metrics,params['cfg']['units'])]
1898
print("---------------------------------------------------------------------------------------------------------")
1899
- print("self.nbsamples",self.nbsamples)
1900
- print("self.listmetrics",self.listmetrics)
1901
+ print(("self.nbsamples",self.nbsamples))
1902
+ print(("self.listmetrics",self.listmetrics))
1903
1904
1905
1906
#self.metrics = params['cfg']['metrics']
1907
- print("self.metrics", self.metrics)
1908
+ print(("self.metrics", self.metrics))
1909
#self.ranks = params['cfg']['ranks']
1910
self.ranks = [None] * self.nbsamples
1911
self.procs = [None] * self.nbsamples
1912
1913
self.listFontFamily = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
1914
self.listFontWeight = ['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black']
1915
1916
- self.list_fonts = list( tkFont.families() )
1917
+ self.list_fonts = list( tkinter.font.families() )
1918
1919
self.selectedFontPolicy = None
1920
self.selectedFontSize = None
1921
self.selectedFontWeight = None
1922
1923
# create a custom font
1924
- self.customFont = tkFont.Font(family="Helvetica", size=12)
1925
+ self.customFont = tkinter.font.Font(family="Helvetica", size=12)
1926
1927
for idx in range(params['cfg']['nbgraphs']):
1928
self.listUsedGraphs.append(-1)
1929
1930
try:
1931
root.config(menu=menubar)
1932
except AttributeError as attErr:
1933
- print('menu Exception: ', type(attErr), attErr)
1934
+ print(('menu Exception: ', type(attErr), attErr))
1935
1936
#self.winPvars()
1937
#self.winCvars()
1938
1939
self.subSpawn()
1940
1941
def try_execute(self, c, statement, parameters=None):
1942
- print ("statement: ", statement)
1943
- print ("parameters: ", parameters)
1944
+ print(("statement: ", statement))
1945
+ print(("parameters: ", parameters))
1946
try:
1947
if parameters:
1948
c.execute(statement,parameters);
1949
else:
1950
c.execute(statement);
1951
except sqlite3.Error as e:
1952
- print("database error...", e.args[0])
1953
+ print(("database error...", e.args[0]))
1954
1955
def open_connection(self):
1956
global conn
1957
# check for file to exist
1958
#print ("Checking for file: ", sqlite_file)
1959
- print("Checking for file: ", self.sosdbfile)
1960
+ print(("Checking for file: ", self.sosdbfile))
1961
while not os.path.exists(self.sosdbfile):
1962
- print("Waiting on file: ", self.sosdbfile)
1963
+ print(("Waiting on file: ", self.sosdbfile))
1964
time.sleep(1)
1965
1966
#print("Connecting to: ", sqlite_file)
1967
- print("Connecting to: ", self.sosdbfile)
1968
+ print(("Connecting to: ", self.sosdbfile))
1969
# Connecting to the database file
1970
#conn = sqlite3.connect(sqlite_file)
1971
#fd = os.open(sqlite_file, os.O_RDONLY)
1972
1973
1974
def browsefontpolicy(self):
1975
print('browsefontpolicy')
1976
- fontpolicydiag = tkFileDialog.askopenfilename(filetypes=[("Text files","*.fft")])
1977
+ fontpolicydiag = tkinter.filedialog.askopenfilename(filetypes=[("Text files","*.fft")])
1978
1979
def onselectFontPolicy(self,evt):
1980
w = evt.widget
1981
selection = w.curselection()
1982
value = w.get(selection[0])
1983
self.selectedFontPolicy = value
1984
- print('select font: ', value)
1985
+ print(('select font: ', value))
1986
1987
def onselectFontSize(self, evt):
1988
print('select font size')
1989
1990
selection = w.curselection()
1991
value = w.get(selection[0])
1992
self.selectedFontSize = value
1993
- print('select font: ', value)
1994
+ print(('select font: ', value))
1995
1996
def onselectFontWeight(self, evt):
1997
print('select font weight')
1998
1999
selection = w.curselection()
2000
value = w.get(selection[0])
2001
self.selectedFontWeight = value
2002
- print('select font: ', value)
2003
+ print(('select font: ', value))
2004
2005
def loadFontPolicy(self):
2006
fontpolicydiag = askopenfilename(filetypes=(("*.fft")))
2007
2008
#f1 = Tk.Frame(pvarswin,width=150,height=100)
2009
s1 = Tk.Scrollbar(self.f1)
2010
#l1 = Tk.Listbox(f1,selectmode='multiple',width=80,height=40)
2011
- print("self.listmetrics",self.listmetrics)
2012
- print("len(self.listmetrics)",len(self.listmetrics))
2013
+ print(("self.listmetrics",self.listmetrics))
2014
+ print(("len(self.listmetrics)",len(self.listmetrics)))
2015
2016
2017
for i in range(self.nbsamples):
2018
2019
2020
def clearplot(self,idxGraph):
2021
2022
- print('clearplot: idxGraph=', idxGraph)
2023
+ print(('clearplot: idxGraph=', idxGraph))
2024
ax = self.ax[idxGraph]
2025
ax.cla()
2026
#ax.clf()
2027
2028
total_val=total_val+ref4
2029
num_vals=num_vals+1
2030
mean_val=total_val/num_vals
2031
- print('display record ref4='+str(ref4))
2032
+ print(('display record ref4='+str(ref4)))
2033
self.data_lr[i].add(t,ref4)
2034
#self.data_lr[i].add(t,mean_val)
2035
goodrecord=1
2036
2037
try:
2038
ax.cla()
2039
except Exception as errCla:
2040
- print('update_gui: Error cla(): ', type(errCla), errCla)
2041
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
2042
2043
ax.set_xlim([t-gxsec, t])
2044
#print 'get x and y'
2045
2046
2047
#print 'parse graphs'
2048
metric_value = max(sample[1],0)
2049
- print("metric_value",metric_value)
2050
+ print(("metric_value",metric_value))
2051
numeric = re.search(r'\d+', metric_value)
2052
metric_value_num = numeric.group()
2053
metric_value_float = float(metric_value_num)
2054
2055
try:
2056
ax.cla()
2057
except Exception as errCla:
2058
- print('update_gui: Error cla(): ', type(errCla), errCla)
2059
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
2060
2061
ax.set_xlim([pack_time-gxsec, pack_time])
2062
#print 'get x and y'
2063
2064
try:
2065
ax.cla()
2066
except Exception as errCla:
2067
- print('update_gui: Error cla(): ', type(errCla), errCla)
2068
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
2069
2070
ax.set_xlim([pack_time-gxsec, pack_time])
2071
#print 'get x and y'
2072
2073
try:
2074
j = json.loads(payload)
2075
except ValueError as e:
2076
- print('Failed to load json data: %s' %e)
2077
+ print(('Failed to load json data: %s' %e))
2078
continue
2079
#return False
2080
2081
2082
if 'node' not in e and\
2083
'sample' not in e and\
2084
'time' not in e:
2085
- print('Ignore this invalid sample:', json.dumps(e))
2086
+ print(('Ignore this invalid sample:', json.dumps(e)))
2087
continue
2088
2089
#print 'set timestamp'
2090
2091
try:
2092
self.canvas.draw()
2093
except Exception as errDraw:
2094
- print('Error drawing canvas: ', type(errDraw), errDraw)
2095
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
2096
#plt.draw()
2097
2098
profile_t7 = time.time()
2099
2100
all_rows = c.fetchall()
2101
ts = np.array([x[0] for x in all_rows])
2102
min_timestamp = ts[0]
2103
- print("min timestamp: ", min_timestamp)
2104
+ print(("min timestamp: ", min_timestamp))
2105
2106
2107
def get_min_timestamp(self):
2108
2109
sql_statement = ("SELECT min(time_pack) FROM viewCombined;")
2110
print("get_min_timestamp Executing query")
2111
2112
- print("sql statement: ", sql_statement)
2113
+ print(("sql statement: ", sql_statement))
2114
#self.try_execute(c, sql_statement)
2115
os.environ['SOS_SQL'] = sql_statement
2116
sos_bin_path = os.environ.get('SOS_BIN_DIR')
2117
- print('SOS BIN path: ', sos_bin_path)
2118
+ print(('SOS BIN path: ', sos_bin_path))
2119
os.system('cd '+ sos_bin_path)
2120
- print('current dir: ', os.getcwd())
2121
+ print(('current dir: ', os.getcwd()))
2122
# Redirect stdout of passed command into a string
2123
2124
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
2125
- print('soscmd: ', soscmd)
2126
+ print(('soscmd: ', soscmd))
2127
tmp_res_min_ts_sql = subprocess.check_output(soscmd, shell=True)
2128
2129
#self.res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
2130
- print('get min ts: tmp res sql=', tmp_res_min_ts_sql)
2131
+ print(('get min ts: tmp res sql=', tmp_res_min_ts_sql))
2132
res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
2133
- print("List of result SQL MIN TS: ", res_min_ts_sql)
2134
+ print(("List of result SQL MIN TS: ", res_min_ts_sql))
2135
min_ts_rows = res_min_ts_sql[1].split(",")
2136
- print("List of result SQL MIN TS values: ", min_ts_rows)
2137
+ print(("List of result SQL MIN TS values: ", min_ts_rows))
2138
# Remove first element of SQL result
2139
#ts = np.array([x[0] for x in min_ts_rows])
2140
str_min_timestamp = min_ts_rows[0].replace('\"', '')
2141
2142
sql_statement = ("SELECT value_name, value, time_pack FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
2143
#sql_statement = ("SELECT * FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
2144
2145
- print("sql statement: ", sql_statement )
2146
+ print(("sql statement: ", sql_statement ))
2147
#self.try_execute(c, sql_statement)
2148
os.environ['SOS_SQL'] = sql_statement
2149
sos_bin_path = os.environ.get('SOS_BIN_DIR')
2150
- print('SOS BIN path: ', sos_bin_path)
2151
+ print(('SOS BIN path: ', sos_bin_path))
2152
os.system('cd '+ sos_bin_path)
2153
- print('current dir: ', os.getcwd())
2154
+ print(('current dir: ', os.getcwd()))
2155
# Redirect stdout of passed command into a string
2156
2157
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
2158
- print('soscmd: ', soscmd)
2159
+ print(('soscmd: ', soscmd))
2160
tmp_res_sql = subprocess.check_output(soscmd, shell=True)
2161
2162
self.try_execute(c, sql_statement)
2163
2164
#print 'stdout of SOS demo: ', sys.stdout
2165
#self.res_sql = resultstdout.getvalue()
2166
- print('tmp res_sql: ', tmp_res_sql)
2167
+ print(('tmp res_sql: ', tmp_res_sql))
2168
2169
self.res_sql = tmp_res_sql.splitlines()
2170
# REmove first element of SQL result
2171
self.res_sql.pop(0)
2172
2173
for item_sql in self.res_sql:
2174
- print('res sql: ', item_sql)
2175
+ print(('res sql: ', item_sql))
2176
2177
2178
# Call demo with SQL statement given as argument and store standard output
2179
2180
print('req_sql entering')
2181
self.res_sql = ""
2182
for elem in metric:
2183
- print("elem: ", elem)
2184
- print("metric",metric[0])
2185
+ print(("elem: ", elem))
2186
+ print(("metric",metric[0]))
2187
sql_statement = ("SELECT value_name, value, time_pack, max(frame) FROM viewCombined WHERE value_name LIKE '" + metric[0]+ "' AND comm_rank="+ str(metric[1]) +" group by value_name;")
2188
#sql_statement = ("SELECT * FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
2189
2190
2191
while self.ranks.size == 0:
2192
time.sleep(1)
2193
self.ranks,self.procs = self.get_ranks(self.conn)
2194
- print ("ranks: ", self.ranks)
2195
+ print(("ranks: ", self.ranks))
2196
2197
# get the number of nodes
2198
self.nodes,self.noderanks = self.get_nodes(self.conn)
2199
while self.nodes.size == 0:
2200
time.sleep(1)
2201
nodes,self.noderanks = self.get_nodes(self.conn)
2202
- print ("nodes: ", self.nodes)
2203
+ print(("nodes: ", self.nodes))
2204
2205
self.get_min_timestamp_db(self.conn)
2206
#resize the figure
2207
2208
print('SOS: Execute demo app')
2209
sos_path = os.environ.get('SOS_BUILD_DIR')
2210
self.sos_bin_path = sos_path+"/bin"
2211
- print('SOS BIN PATH: ', self.sos_bin_path)
2212
+ print(('SOS BIN PATH: ', self.sos_bin_path))
2213
os.system("cd "+ self.sos_bin_path)
2214
2215
2216
2217
2218
self.opendb()
2219
2220
- print("metrics: ", self.metrics)
2221
+ print(("metrics: ", self.metrics))
2222
#self.get_min_timestamp()
2223
2224
while True:
2225
2226
countsamples = 0
2227
for sample in self.rows[j]:
2228
params['ts'] = 0
2229
- print 'PYCOOLR sample: ', sample
2230
+ print('PYCOOLR sample: ', sample)
2231
#self.req_sql(self.conn, self.ranks, self.rows)
2232
profile_t2 = time.time()
2233
self.lock.acquire()
2234
2235
if self.listRecordSample[i] != -1:
2236
j = self.listRecordSample[i]
2237
2238
- print('readsosmetrics: i=%d, j=%d' %(i,j))
2239
+ print(('readsosmetrics: i=%d, j=%d' %(i,j)))
2240
2241
#rank = self.ranks[j]
2242
#rank2 = self.ranks2[j]
2243
2244
self.rows[j] = self.conn.fetchall()
2245
#print 'rows: ', self.rows[j]
2246
if len(self.rows[j]) <= 0:
2247
- print("Error: query returned no rows.",)
2248
+ print(("Error: query returned no rows.",))
2249
else:
2250
goodrecord = 1
2251
2252
2253
2254
2255
payload.strip()
2256
- print('payload =',payload)
2257
+ print(('payload =',payload))
2258
try:
2259
j = json.loads(payload)
2260
except ValueError as e:
2261
- print('Failed to load json data: %s' %e)
2262
+ print(('Failed to load json data: %s' %e))
2263
continue
2264
#return False
2265
2266
2267
if 'node' not in e and\
2268
'sample' not in e and\
2269
'time' not in e:
2270
- print('Ignore this invalid sample:', json.dumps(e))
2271
+ print(('Ignore this invalid sample:', json.dumps(e)))
2272
continue
2273
2274
#print 'set timestamp'
2275
2276
print('subSpawn: load beacon subscriber library')
2277
envlibpath = os.environ['PYCOOLR_LIBPATH']
2278
libarbjsonbeep = cdll.LoadLibrary(envlibpath+'/libarbitraryjsonbeepmulsub.so')
2279
- thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
2280
- thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
2281
+ _thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
2282
+ _thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
2283
#thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
2284
except Exception as errThread:
2285
- print("Error: unable to start thread: ", errThread)
2286
+ print(("Error: unable to start thread: ", errThread))
2287
2288
elif self.tool == "sos":
2289
try:
2290
#thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
2291
- thread.start_new_thread(self.readsosmetrics,())
2292
+ _thread.start_new_thread(self.readsosmetrics,())
2293
#thread.start_new_thread(self.readsosmetrics_db,())
2294
2295
except Exception as errThread:
2296
- print('Error: unable to start thread: ', errThread)
2297
+ print(('Error: unable to start thread: ', errThread))
2298
2299
2300
self.refresh_plot()
2301
2302
2303
self.selectedcvarsvalues[i] = self.listcvarsentry[i].get()
2304
strcvarsvalues += self.selectedcvarsvalues[i]
2305
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
2306
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
2307
if i+1 < self.numselectedcvars:
2308
strcvarsmetrics += ","
2309
strcvarsvalues += ","
2310
2311
#self.strcvars += "="
2312
#self.strcvars += self.selectedcvarsvalues[i]
2313
#strcvars += ","
2314
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
2315
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
2316
if i+1 < self.numselectedcvars:
2317
strcvarsmetrics += ","
2318
strcvarsvalues += ","
2319
2320
#self.strcvars += ":"
2321
self.strcvars += strcvarsvalues
2322
2323
- print("strcvarsmetrics: ", strcvarsmetrics)
2324
- print("strcvarsvalues: ", strcvarsvalues)
2325
- print("strcvars: ", self.strcvars)
2326
+ print(("strcvarsmetrics: ", strcvarsmetrics))
2327
+ print(("strcvarsvalues: ", strcvarsvalues))
2328
+ print(("strcvars: ", self.strcvars))
2329
2330
# Test if we have to communicate MPI_T CVARS in a Publish/Subscribe mode
2331
if cvars_comm_mode == "pub":
2332
2333
2334
for i in range(len(selection)):
2335
value = w.get(selection[i])
2336
- print("selection:", selection, ": '%s'" % value)
2337
+ print(("selection:", selection, ": '%s'" % value))
2338
self.selectedcvarsmetrics[i] = value
2339
2340
if self.listlabelcvarsmetric:
2341
2342
self.listcvarsarrayindexentry = [None] * len(selection)
2343
self.listcvarsarrayindex = [None] * len(selection)
2344
2345
- print('selection: ', selection)
2346
- print('range selection: ', range(len(selection)))
2347
-
2348
- for cvaritem, cvarindex in zip(selection, range(len(selection))):
2349
+ print(('selection: ', selection))
2350
+ print(('range selection: ', list(range(len(selection)))))
2351
+
2352
+ for cvaritem, cvarindex in zip(selection, list(range(len(selection)))):
2353
2354
value = w.get(selection[cvarindex])
2355
- print('len selection: ', len(selection))
2356
- print('value of item %d: %s ' % (cvarindex, value))
2357
- print('cvaritem: ', cvaritem)
2358
- print('cvarindex= ', cvarindex)
2359
- print('cvarsindexrow= ', self.cvarsindexrow)
2360
-
2361
- print('cfg cvars array:', self.listcfgcvarsarray[0])
2362
+ print(('len selection: ', len(selection)))
2363
+ print(('value of item %d: %s ' % (cvarindex, value)))
2364
+ print(('cvaritem: ', cvaritem))
2365
+ print(('cvarindex= ', cvarindex))
2366
+ print(('cvarsindexrow= ', self.cvarsindexrow))
2367
+
2368
+ print(('cfg cvars array:', self.listcfgcvarsarray[0]))
2369
if value == self.listcfgcvarsarray[0]:
2370
2371
self.listlabelcvarsmetric[cvarindex]=Tk.Label(self.stepCvarsUpdate, text=value)
2372
2373
#print "selection:", selection, ": '%s'" % value
2374
2375
listintselection = [int (i) for i in selection]
2376
- print('listintselection: ', listintselection)
2377
- print('listSamplesAllocated: ', self.listSamplesAllocated)
2378
- print('nbsamples', self.nbsamples)
2379
- print('len(self.listSamplesAllocated)', len(self.listSamplesAllocated))
2380
+ print(('listintselection: ', listintselection))
2381
+ print(('listSamplesAllocated: ', self.listSamplesAllocated))
2382
+ print(('nbsamples', self.nbsamples))
2383
+ print(('len(self.listSamplesAllocated)', len(self.listSamplesAllocated)))
2384
2385
for i in range(self.nbsamples):
2386
if (self.listSamplesAllocated[i] > -1) and (i not in listintselection):
2387
2388
if self.listSamplesAllocated[j] == -1:
2389
#index = int(j)
2390
self.listUsedGraphs[i] = j
2391
- print('graph %d allocated to sample %d' % (i, j))
2392
+ print(('graph %d allocated to sample %d' % (i, j)))
2393
self.listRecordSample[i] = j
2394
self.listSamplesAllocated[j] = i
2395
break
2396
2397
# Mark current graph as used
2398
self.listUsedGraphs[j] = 1
2399
# Record the current graph as plotting the current sample
2400
- print('Record Sample %d for graph %d' %(index,j))
2401
+ print(('Record Sample %d for graph %d' %(index,j)))
2402
self.listRecordSample[j] = index
2403
2404
# Mark current sample as allocated to the current graph
2405
2406
self.canvas.draw()
2407
#self.frame.update()
2408
except Exception as errDraw:
2409
- print('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw)
2410
+ print(('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw))
2411
self.lock.release()
2412
2413
self.root.after(1000,self.refresh_plot)
2414
2415
try:
2416
self.canvas.draw()
2417
except Exception as errDraw:
2418
- print('Error drawing canvas: ', type(errDraw), errDraw)
2419
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
2420
2421
def checkfn(self, idx, text):
2422
print('checkfn')
2423
- print('Check index=%d text=%s' % (idx,text))
2424
+ print(('Check index=%d text=%s' % (idx,text)))
2425
#print 'Size of listbtnchecked[]= ', len(self.listbtnchecked)
2426
#self.listbtnchecked[idx] = 1
2427
2428
2429
# print 'nothing'
2430
2431
2432
-root = Tkinter.Tk()
2433
+root = tkinter.Tk()
2434
2435
app = Coolrsub(root,2,3)
2436
root.mainloop()
2437
--- a/examples/sos/pycoolrgui/pycoolr-plot/init_coolr.py (original)
2438
--- b/examples/sos/pycoolrgui/pycoolr-plot/init_coolr.py (refactored)
2439
2440
try:
2441
opts, args = getopt.getopt(sys.argv[1:],
2442
shortopt, longopt)
2443
-except getopt.GetoptError, err:
2444
- print err
2445
+except getopt.GetoptError as err:
2446
+ print(err)
2447
usage()
2448
sys.exit(1)
2449
2450
2451
elif o in ("--ncols"):
2452
ocfg["ncols"]=int(a)
2453
elif o in ("--list"):
2454
- print ''
2455
- print '[available graph modules]'
2456
- print ''
2457
+ print('')
2458
+ print('[available graph modules]')
2459
+ print('')
2460
for i in cfg["modnames"]:
2461
- print i
2462
- print ''
2463
- print ''
2464
+ print(i)
2465
+ print('')
2466
+ print('')
2467
sys.exit(0)
2468
elif o in ("--mods"):
2469
ocfg["modnames"] = a.split(",")
2470
2471
if len(args) < 1:
2472
- print ''
2473
- print 'No config file is specified. Enabled the fake mode.'
2474
- print ''
2475
+ print('')
2476
+ print('No config file is specified. Enabled the fake mode.')
2477
+ print('')
2478
cfg["masternode"] = "frontend"
2479
cfg["drawexternal"] = "no"
2480
cfg["drawacpipwr"] = "no"
2481
2482
with open(cfgfn) as f:
2483
cfgtmp = json.load(f)
2484
# override if cfg defines any
2485
- for k in cfgtmp.keys():
2486
+ for k in list(cfgtmp.keys()):
2487
cfg[k] = cfgtmp[k]
2488
# override if specifed as cmd option
2489
- for k in ocfg.keys():
2490
+ for k in list(ocfg.keys()):
2491
cfg[k] = ocfg[k]
2492
2493
if len(targetnode) == 0 :
2494
targetnode = cfg['masternode']
2495
if len(enclaves) == 0:
2496
- if cfg.has_key('enclaves'):
2497
+ if 'enclaves' in cfg:
2498
enclaves = cfg['enclaves']
2499
2500
-print 'masternode:', cfg['masternode']
2501
-print 'targetnode:', targetnode
2502
-print 'enclaves:', enclaves
2503
+print('masternode:', cfg['masternode'])
2504
+print('targetnode:', targetnode)
2505
+print('enclaves:', enclaves)
2506
2507
if len(appcfgfn) > 0:
2508
with open(appcfgfn) as f:
2509
appcfg = json.load(f)
2510
- for k in appcfg.keys():
2511
+ for k in list(appcfg.keys()):
2512
cfg[k] = appcfg[k]
2513
2514
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
2515
- print "Please double check %s: appname or appsamples tags" % appcfgfn
2516
+ if not ('appname' in cfg and 'appsamples' in cfg):
2517
+ print("Please double check %s: appname or appsamples tags" % appcfgfn)
2518
sys.exit(1)
2519
2520
2521
2522
try:
2523
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
2524
except:
2525
- print 'unable to open', cfg["outputfn"]
2526
+ print('unable to open', cfg["outputfn"])
2527
2528
-print >>logf, json.dumps(info)
2529
+print(json.dumps(info), file=logf)
2530
2531
#if not fakemode:
2532
# querycmds = cfg['querycmds']
2533
--- a/examples/sos/pycoolrgui/pycoolr-plot/layout.py (original)
2534
--- b/examples/sos/pycoolrgui/pycoolr-plot/layout.py (refactored)
2535
2536
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
2537
2538
if sys.version_info[0] < 3:
2539
- import Tkinter as Tk
2540
+ import tkinter as Tk
2541
else:
2542
import tkinter as Tk
2543
2544
--- a/examples/sos/pycoolrgui/pycoolr-plot/listrotate.py (original)
2545
--- b/examples/sos/pycoolrgui/pycoolr-plot/listrotate.py (refactored)
2546
2547
lr = listrotate2D(5)
2548
for i in range(8):
2549
lr.add(i,i)
2550
- print lr.getlistx()
2551
- print lr.getlisty()
2552
- print lr.getlistr()
2553
- print
2554
+ print(lr.getlistx())
2555
+ print(lr.getlisty())
2556
+ print(lr.getlistr())
2557
+ print()
2558
2559
- print '------------'
2560
+ print('------------')
2561
lr = listrotate1D(5)
2562
for i in range(8):
2563
lr.add(i)
2564
- print lr.getlist()
2565
- print lr.getlistr()
2566
- print
2567
+ print(lr.getlist())
2568
+ print(lr.getlistr())
2569
+ print()
2570
--- a/src/Profile/ltau.py (original)
2571
--- b/src/Profile/ltau.py (refactored)
2572
2573
fullname = os.path.join(dirname, 'profile.doc')
2574
if os.path.exists(fullname):
2575
sts = os.system('${PAGER-more} ' + fullname)
2576
- if sts: print '*** Pager exit status:', sts
2577
+ if sts: print('*** Pager exit status:', sts)
2578
break
2579
else:
2580
- print 'Sorry, can\'t find the help file "profile.doc"',
2581
- print 'along the Python search path.'
2582
+ print('Sorry, can\'t find the help file "profile.doc"', end=' ')
2583
+ print('along the Python search path.')
2584
2585
2586
2587
2588
self.simulate_call(cmd)
2589
sys.setprofile(self.dispatcher)
2590
try:
2591
- exec cmd in globals, locals
2592
+ exec(cmd, globals, locals)
2593
finally:
2594
sys.setprofile(None)
2595
return self
2596
2597
if __name__ == '__main__':
2598
usage = "tau.py scriptfile [arg] ..."
2599
if not sys.argv[1:]:
2600
- print "Usage: ", usage
2601
+ print("Usage: ", usage)
2602
sys.exit(2)
2603
2604
class ProfileParser(OptionParser):
2605
2606
sys.path.insert(0, os.path.dirname(sys.argv[0]))
2607
run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
2608
else:
2609
- print "Usage: ", usage
2610
+ print("Usage: ", usage)
2611
--- a/src/Profile/tau_python_wrapper.py (original)
2612
--- b/src/Profile/tau_python_wrapper.py (refactored)
2613
2614
Runs MODULE with automatic Python instrumentation.
2615
"""
2616
2617
-from __future__ import print_function
2618
+
2619
import os
2620
import sys
2621
2622
--- a/src/python/tau_profile_parser.py (original)
2623
--- b/src/python/tau_profile_parser.py (refactored)
2624
2625
Parses a set of TAU profile files and yields multi-indexed Pandas dataframes for the
2626
interval and atomic events.
2627
"""
2628
-from __future__ import print_function
2629
+
2630
import csv
2631
import glob
2632
import mmap
2633
--- a/tests/programs/python/firstprime.py (original)
2634
--- b/tests/programs/python/firstprime.py (refactored)
2635
2636
# Found factor. not prime
2637
break # move on to next number
2638
elif potentialfactor >= sqrtno:
2639
- print("The first prime number after {} is {}".format(x,current))
2640
+ print(("The first prime number after {} is {}".format(x,current)))
2641
return current
2642
current += 1
2643
2644
--- a/tools/src/tau_prof2json.py (original)
2645
--- b/tools/src/tau_prof2json.py (refactored)
2646
2647
"""A python script to convert TAU profiles to JSON
2648
"""
2649
2650
-from __future__ import print_function
2651
+
2652
import os
2653
import sys
2654
import argparse
2655
2656
start_time_stamp = app_dict["Starting Timestamp"]
2657
end_time_stamp = None
2658
if "Ending Timestamp" not in app_dict:
2659
- end_time_stamp = str(long(start_time_stamp) + max_inclusive)
2660
+ end_time_stamp = str(int(start_time_stamp) + max_inclusive)
2661
else:
2662
end_time_stamp = app_dict["Ending Timestamp"]
2663
- if workflow_start == 0 or workflow_start > long(start_time_stamp):
2664
- workflow_start = long(start_time_stamp)
2665
- if workflow_end == 0 or workflow_end < long(end_time_stamp):
2666
- workflow_end = long(end_time_stamp)
2667
+ if workflow_start == 0 or workflow_start > int(start_time_stamp):
2668
+ workflow_start = int(start_time_stamp)
2669
+ if workflow_end == 0 or workflow_end < int(end_time_stamp):
2670
+ workflow_end = int(end_time_stamp)
2671
local_time = app_dict["Local Time"]
2672
if start_time_stamp != None and have_workflow_file:
2673
for wc in workflow_dict["Workflow Component"]:
2674
2675
timer["process index"] = int(node)
2676
timer["thread index"] = int(thread)
2677
timer["Function"] = function_name
2678
- timer["Calls"] = long(tokens[0])
2679
- timer["Subroutines"] = long(tokens[1])
2680
- timer["Exclusive Time"] = long(tokens[2])
2681
- timer["Inclusive Time"] = long(tokens[3])
2682
+ timer["Calls"] = int(tokens[0])
2683
+ timer["Subroutines"] = int(tokens[1])
2684
+ timer["Exclusive Time"] = int(tokens[2])
2685
+ timer["Inclusive Time"] = int(tokens[3])
2686
group = tokens[5]
2687
# handle the ADIOS special case
2688
if "ADIOS" in function_name:
2689
if "ADIOS" not in group_totals:
2690
- group_totals["ADIOS"] = long(tokens[2])
2691
- group_counts["ADIOS"] = long(tokens[0])
2692
- else:
2693
- group_totals["ADIOS"] = group_totals["ADIOS"] + long(tokens[2])
2694
- group_counts["ADIOS"] = group_counts["ADIOS"] + long(tokens[0])
2695
+ group_totals["ADIOS"] = int(tokens[2])
2696
+ group_counts["ADIOS"] = int(tokens[0])
2697
+ else:
2698
+ group_totals["ADIOS"] = group_totals["ADIOS"] + int(tokens[2])
2699
+ group_counts["ADIOS"] = group_counts["ADIOS"] + int(tokens[0])
2700
else:
2701
if group not in group_totals:
2702
- group_totals[group] = long(tokens[2])
2703
- group_counts[group] = long(tokens[0])
2704
- else:
2705
- group_totals[group] = group_totals[group] + long(tokens[2])
2706
- group_counts[group] = group_counts[group] + long(tokens[0])
2707
+ group_totals[group] = int(tokens[2])
2708
+ group_counts[group] = int(tokens[0])
2709
+ else:
2710
+ group_totals[group] = group_totals[group] + int(tokens[2])
2711
+ group_counts[group] = group_counts[group] + int(tokens[0])
2712
data["Timers"].append(timer)
2713
- if max_inclusive < long(tokens[3]):
2714
- max_inclusive = long(tokens[3])
2715
+ if max_inclusive < int(tokens[3]):
2716
+ max_inclusive = int(tokens[3])
2717
return max_inclusive
2718
2719
def extract_group_totals():
2720
2721
application_metadata["aggr_io_read_bytes"] = read_bytes
2722
if write_bytes > 0:
2723
application_metadata["aggr_io_write_bytes"] = write_bytes
2724
- application_metadata["total_time"] = long(application_metadata["end-timestamp"]) - long(application_metadata["start-timestamp"])
2725
+ application_metadata["total_time"] = int(application_metadata["end-timestamp"]) - int(application_metadata["start-timestamp"])
2726
2727
def parse_aggregates(node, context, thread, infile, data):
2728
aggregates = infile.readline()
2729
2730
counter["process index"] = int(node)
2731
counter["thread index"] = int(thread)
2732
counter["Counter"] = counter_name
2733
- counter["Num Events"] = long(tokens[0])
2734
+ counter["Num Events"] = int(tokens[0])
2735
counter["Max Value"] = float(tokens[1])
2736
counter["Min Value"] = float(tokens[2])
2737
counter["Mean Value"] = float(tokens[3])
2738
counter["SumSqr Value"] = float(tokens[4])
2739
if "Message size for " in counter_name:
2740
- value = long(tokens[0]) * float(tokens[3])
2741
+ value = int(tokens[0]) * float(tokens[3])
2742
c_name = "Collective_Bytes"
2743
if c_name not in group_totals:
2744
group_totals[c_name] = value
2745
else:
2746
group_totals[c_name] = group_totals[c_name] + value
2747
if counter_name == "Message size sent to all nodes":
2748
- value = long(tokens[0]) * float(tokens[3])
2749
+ value = int(tokens[0]) * float(tokens[3])
2750
c_name = "Send_Bytes"
2751
if c_name not in group_totals:
2752
group_totals[c_name] = value
2753
else:
2754
group_totals[c_name] = group_totals[c_name] + value
2755
if counter_name == "Message size received from all nodes":
2756
- value = long(tokens[0]) * float(tokens[3])
2757
+ value = int(tokens[0]) * float(tokens[3])
2758
c_name = "Recv_Bytes"
2759
if c_name not in group_totals:
2760
group_totals[c_name] = value
2761
else:
2762
group_totals[c_name] = group_totals[c_name] + value
2763
if counter_name == "Bytes Read":
2764
- value = long(tokens[0]) * float(tokens[3])
2765
+ value = int(tokens[0]) * float(tokens[3])
2766
c_name = "Read_Bytes"
2767
if c_name not in group_totals:
2768
group_totals[c_name] = value
2769
else:
2770
group_totals[c_name] = group_totals[c_name] + value
2771
if counter_name == "Bytes Written":
2772
- value = long(tokens[0]) * float(tokens[3])
2773
+ value = int(tokens[0]) * float(tokens[3])
2774
c_name = "Write_Bytes"
2775
if c_name not in group_totals:
2776
group_totals[c_name] = value
2777
else:
2778
group_totals[c_name] = group_totals[c_name] + value
2779
if counter_name == "ADIOS data size":
2780
- value = long(tokens[0]) * float(tokens[3])
2781
+ value = int(tokens[0]) * float(tokens[3])
2782
c_name = "ADIOS_data_size"
2783
if c_name not in group_totals:
2784
group_totals[c_name] = value
2785
--- a/tools/src/tau_prof_to_json.py (original)
2786
--- b/tools/src/tau_prof_to_json.py (refactored)
2787
2788
"""A python script to convert TAU profiles to JSON
2789
"""
2790
2791
-from __future__ import print_function
2792
+
2793
import os
2794
import sys
2795
import argparse
2796
2797
timer["process index"] = int(node)
2798
timer["thread index"] = int(thread)
2799
timer["Function"] = function_name
2800
- timer["Calls"] = long(tokens[0])
2801
- timer["Subroutines"] = long(tokens[1])
2802
- timer["Exclusive Time"] = long(tokens[2])
2803
- timer["Inclusive Time"] = long(tokens[3])
2804
+ timer["Calls"] = int(tokens[0])
2805
+ timer["Subroutines"] = int(tokens[1])
2806
+ timer["Exclusive Time"] = int(tokens[2])
2807
+ timer["Inclusive Time"] = int(tokens[3])
2808
data["Timers"].append(timer)
2809
2810
def parse_aggregates(node, context, thread, infile, data):
2811
2812
counter["process index"] = int(node)
2813
counter["thread index"] = int(thread)
2814
counter["Counter"] = counter_name
2815
- counter["Num Events"] = long(tokens[0])
2816
+ counter["Num Events"] = int(tokens[0])
2817
counter["Max Value"] = float(tokens[1])
2818
counter["Min Value"] = float(tokens[2])
2819
counter["Mean Value"] = float(tokens[3])
2820
--- a/tools/src/tau_resolve_addresses.py (original)
2821
--- b/tools/src/tau_resolve_addresses.py (refactored)
2822
2823
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2824
"""
2825
2826
-from __future__ import with_statement
2827
+
2828
2829
import re
2830
import os
2831
2832
from optparse import OptionParser
2833
from subprocess import Popen, PIPE
2834
from threading import Thread
2835
-from Queue import Queue, Empty
2836
+from queue import Queue, Empty
2837
from xml.sax import saxutils
2838
2839
USAGE = """
2840
2841
self.exe = exe
2842
self.cmdstr = ' '.join(cmd)
2843
if not os.path.exists(self.exe):
2844
- print 'WARNING: %r not found. Addresses in this binary will not be resolved.' % self.exe
2845
+ print('WARNING: %r not found. Addresses in this binary will not be resolved.' % self.exe)
2846
self.p = self.q = self.t = None
2847
else:
2848
self.p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)
2849
2850
self.t = Thread(target=Addr2Line.enqueue_output, args=(self.p.stdout, self.q))
2851
self.t.daemon = True
2852
self.t.start()
2853
- print 'New process: %s' % self.cmdstr
2854
+ print('New process: %s' % self.cmdstr)
2855
2856
def close(self):
2857
if self.p:
2858
2859
exe = match.group(2)
2860
addr = match.group(3)
2861
if exe == 'UNKNOWN':
2862
- for p in self.pipes.itervalues():
2863
+ for p in self.pipes.values():
2864
resolved = p.resolve(addr)
2865
if resolved[0] != 'UNRESOLVED':
2866
break
2867
2868
return match.group(0)
2869
2870
# Extract lines from memory and rewrite
2871
- print 'New thread: %s' % self.name
2872
+ print('New thread: %s' % self.name)
2873
t0 = time.clock()
2874
j = 1
2875
- for i in xrange(self.firstline, self.lastline):
2876
+ for i in range(self.firstline, self.lastline):
2877
if j % ITERS_PER_REPORT == 0:
2878
timespan = time.clock() - t0
2879
time_per_iter = timespan / ITERS_PER_REPORT
2880
eta = (self.linecount - j) * time_per_iter
2881
etadate = time.ctime(time.time() + eta)
2882
- print '%s: %d records in %f seconds, ETA %f seconds (%s)' % (self.name, ITERS_PER_REPORT, timespan, eta, etadate)
2883
+ print('%s: %d records in %f seconds, ETA %f seconds (%s)' % (self.name, ITERS_PER_REPORT, timespan, eta, etadate))
2884
if eta > 1000:
2885
- print 'This is going to take a long time. Maybe use the --jobs option? See --help'
2886
+ print('This is going to take a long time. Maybe use the --jobs option? See --help')
2887
t0 = time.clock()
2888
lineno = self.unresolved[i]
2889
span = self.linespan[lineno]
2890
2891
line = self.mm[start:stop]
2892
try:
2893
result = (lineno, re.sub(PATTERN, repl, line))
2894
- except Addr2LineError, e:
2895
- print e.value
2896
+ except Addr2LineError as e:
2897
+ print(e.value)
2898
break
2899
self.results.append(result)
2900
j += 1
2901
2902
with open(outfile, 'wb') as fout:
2903
2904
# Scan events from input file
2905
- print 'Scanning %r' % infile
2906
+ print('Scanning %r' % infile)
2907
all_exes = set()
2908
linespan = list()
2909
unresolved = list()
2910
2911
break
2912
if j % ITERS_PER_REPORT == 0:
2913
timespan = time.clock() - t0
2914
- print 'Scanned %d lines in %f seconds' % (j, timespan)
2915
+ print('Scanned %d lines in %f seconds' % (j, timespan))
2916
linespan.append((offset, offset + len(line)))
2917
offset += len(line)
2918
match = re.search(PATTERN, line)
2919
2920
2921
# "Rewind" the input file and report
2922
fin.seek(0, 0)
2923
- print 'Found %d executables in profile' % len(all_exes)
2924
- print 'Found %d unresolved addresses' % linecount
2925
+ print('Found %d executables in profile' % len(all_exes))
2926
+ print('Found %d unresolved addresses' % linecount)
2927
if jobs > linecount:
2928
jobs = linecount
2929
- print 'Reducing jobs to %d' % jobs
2930
+ print('Reducing jobs to %d' % jobs)
2931
2932
# Build list of executables to search
2933
all_exes |= fallback_exes
2934
if not all_exes:
2935
- print 'ERROR: No executables or other binary objects specified. See --help.'
2936
+ print('ERROR: No executables or other binary objects specified. See --help.')
2937
sys.exit(1)
2938
2939
# Calculate work division
2940
2941
if jobs > 1:
2942
chunklen = linecount / jobs
2943
chunkrem = linecount % jobs
2944
- for i in xrange(jobs):
2945
+ for i in range(jobs):
2946
count = chunklen
2947
if i < chunkrem:
2948
count += 1
2949
chunks.append((start, count))
2950
start += count
2951
- print '%d workers process %d records, %d process %d records' % (chunkrem, chunklen+1, (jobs-chunkrem), chunklen)
2952
+ print('%d workers process %d records, %d process %d records' % (chunkrem, chunklen+1, (jobs-chunkrem), chunklen))
2953
else:
2954
chunks = [(0, linecount)]
2955
- print 'One thread will process %d records' % linecount
2956
+ print('One thread will process %d records' % linecount)
2957
2958
# Launch worker processes
2959
mm = mmap(fin.fileno(), 0)
2960
workers = list()
2961
- for i in xrange(jobs):
2962
+ for i in range(jobs):
2963
w = Worker(addr2line, all_exes, mm, unresolved, linespan, chunks[i])
2964
w.start()
2965
workers.append(w)
2966
2967
# Process worker output
2968
for rank, w in enumerate(workers):
2969
w.join()
2970
- print '%s (%d/%d) completed' % (w.name, rank, len(workers))
2971
+ print('%s (%d/%d) completed' % (w.name, rank, len(workers)))
2972
for lineno, line in w.results:
2973
if i < lineno:
2974
start = linespan[i][0]
2975
stop = linespan[lineno-1][1]
2976
- print 'writing lines %d:%d' % (i, lineno-1)
2977
+ print('writing lines %d:%d' % (i, lineno-1))
2978
fout.write(mm[start:stop])
2979
i = lineno
2980
fout.write(line)
2981
i += 1
2982
2983
# Write out remainder of file
2984
- print 'Address resolution complete, writing metrics to file...'
2985
+ print('Address resolution complete, writing metrics to file...')
2986
start = linespan[i-1][1]
2987
fin.seek(start, 0)
2988
for line in fin:
2989
2990
2991
for infile in files:
2992
outfile = os.path.join(outdir, os.path.basename(infile))
2993
- print '%s => %s' % (infile, outfile)
2994
+ print('%s => %s' % (infile, outfile))
2995
try:
2996
tauprofile_xml(infile, outfile, options)
2997
except IOError:
2998
- print 'Invalid input or output file. Check command arguments'
2999
+ print('Invalid input or output file. Check command arguments')
3000
sys.exit(1)
3001
3002
--- a/tools/src/tausamp_constructParaverTrace.py (original)
3003
--- b/tools/src/tausamp_constructParaverTrace.py (refactored)
3004
3005
mpiCallerType = 70000000
3006
3007
def usage():
3008
- print "\nUsage: process.py [-m --mpi] [-c --callpath]\n"
3009
- print "Where:"
3010
- print "\t-m, --mpi : keep MPI events"
3011
- print "\t-c, --callpath : keep callpath\n"
3012
+ print("\nUsage: process.py [-m --mpi] [-c --callpath]\n")
3013
+ print("Where:")
3014
+ print("\t-m, --mpi : keep MPI events")
3015
+ print("\t-c, --callpath : keep callpath\n")
3016
sys.exit(1)
3017
3018
def getFileExtents(infname):
3019
global startTimestamp
3020
global endTimestamp
3021
- print "Pre-processing", infname, "..."
3022
+ print("Pre-processing", infname, "...")
3023
input = open(infname, 'r')
3024
localStartTimestamp = 0
3025
localEndTimestamp = 0
3026
3027
appl = 1
3028
thread = 1
3029
eventSet = set()
3030
- print "Processing", infname, "..."
3031
+ print("Processing", infname, "...")
3032
input = open(infname, 'r')
3033
currentCallpath = ""
3034
for line in input:
3035
3036
traceFile.write(endEvent)
3037
3038
def sortedDictValues(adict):
3039
- items = adict.items()
3040
+ items = list(adict.items())
3041
items.sort()
3042
return [value for key, value in items]
3043
3044
3045
pcfFile.write("EVENT_TYPE\n")
3046
pcfFile.write("9 50000001 MPI Point-to-point\n")
3047
pcfFile.write("VALUES\n")
3048
- for (k,v) in mpiValues.items():
3049
+ for (k,v) in list(mpiValues.items()):
3050
if mpiTypes[k] == 50000001:
3051
pcfFile.write(str(v) + " " + str(k) + "\n")
3052
pcfFile.write("0 End\n")
3053
3054
pcfFile.write("EVENT_TYPE\n")
3055
pcfFile.write("9 50000002 MPI Collective Comm\n")
3056
pcfFile.write("VALUES\n")
3057
- for (k,v) in mpiValues.items():
3058
+ for (k,v) in list(mpiValues.items()):
3059
if mpiTypes[k] == 50000002:
3060
pcfFile.write(str(v) + " " + str(k) + "\n")
3061
pcfFile.write("0 End\n")
3062
3063
pcfFile.write("EVENT_TYPE\n")
3064
pcfFile.write("9 50000003 MPI Other\n")
3065
pcfFile.write("VALUES\n")
3066
- for (k,v) in mpiValues.items():
3067
+ for (k,v) in list(mpiValues.items()):
3068
if mpiTypes[k] == 50000003:
3069
pcfFile.write(str(v) + " " + str(k) + "\n")
3070
pcfFile.write("0 End\n")
3071
pcfFile.write("\n\n")
3072
3073
- sortedList = sorted(counterMap.iteritems(), key=itemgetter(1))
3074
+ sortedList = sorted(iter(counterMap.items()), key=itemgetter(1))
3075
pcfFile.write("EVENT_TYPE\n")
3076
for i in sortedList:
3077
pcfFile.write("7 " + str(i[1]) + " " + str(i[0]) + "\n")
3078
3079
pcfFile.write("13 Gradient 13\n")
3080
pcfFile.write("14 Gradient 14\n\n\n")
3081
3082
- sortedList = sorted(callpathMap.iteritems(), key=itemgetter(1))
3083
- pcfFile.write("EVENT_TYPE\n")
3084
- for (k,v) in callDepthMap.items():
3085
+ sortedList = sorted(iter(callpathMap.items()), key=itemgetter(1))
3086
+ pcfFile.write("EVENT_TYPE\n")
3087
+ for (k,v) in list(callDepthMap.items()):
3088
pcfFile.write("0 " + str(k) + " " + v + "\n")
3089
pcfFile.write("VALUES\n")
3090
for i in sortedList:
3091
pcfFile.write(str(i[1]) + " " + str(i[0]) + "\n")
3092
pcfFile.write("\n\n")
3093
3094
- sortedList = sorted(callpathMap.iteritems(), key=itemgetter(1))
3095
+ sortedList = sorted(iter(callpathMap.items()), key=itemgetter(1))
3096
pcfFile.write("EVENT_TYPE\n")
3097
pcfFile.write("0 60000019 User function\n")
3098
pcfFile.write("VALUES\n")
3099
3100
pcfFile.write("2 CPU Bursts\n\n")
3101
3102
pcfFile.close()
3103
- print pcfname, "mapping file created"
3104
+ print(pcfname, "mapping file created")
3105
3106
def writeRowFile(numFiles):
3107
rowfname = "tracefile.row"
3108
3109
rowFile.write("\n\n")
3110
rowFile.write("LEVEL NODE SIZE " + str(1) + " \nunknown\n")
3111
rowFile.close()
3112
- print rowfname, "row file created"
3113
+ print(rowfname, "row file created")
3114
3115
def createTraceFile(tracefname, numFiles):
3116
traceFile = open(tracefname, 'w')
3117
3118
processFile(infname, traceFile)
3119
3120
traceFile.close()
3121
- print tracefname, "trace file created"
3122
+ print(tracefname, "trace file created")
3123
3124
writePcfFile(callpathMap)
3125
writeRowFile(numFiles);
3126
3127
- print negatives, "negative values ignored out of", total, "total values"
3128
+ print(negatives, "negative values ignored out of", total, "total values")
3129
3130
if __name__ == "__main__":
3131
main(sys.argv[1:])
3132
--- a/tools/src/tausamp_extract4folding.py (original)
3133
--- b/tools/src/tausamp_extract4folding.py (refactored)
3134
3135
total = 0
3136
3137
def usage():
3138
- print "\nUsage: process.py [-m --mpi] [-c --callpath]\n"
3139
- print "Where:"
3140
- print "\t-m, --mpi : keep MPI events"
3141
- print "\t-c, --callpath : keep callpath\n"
3142
+ print("\nUsage: process.py [-m --mpi] [-c --callpath]\n")
3143
+ print("Where:")
3144
+ print("\t-m, --mpi : keep MPI events")
3145
+ print("\t-c, --callpath : keep callpath\n")
3146
sys.exit(1)
3147
3148
def parseArgs(argv):
3149
3150
outputFile.write(tmp)
3151
3152
def sortedDictValues(adict):
3153
- items = adict.items()
3154
+ items = list(adict.items())
3155
items.sort()
3156
return [value for key, value in items]
3157
3158
3159
if infname.startswith("ebstrace.processed."):
3160
outfname = infname.replace("processed", "extracted")
3161
outputFile = open(outfname, 'w')
3162
- print infname, "-->",
3163
+ print(infname, "-->", end=' ')
3164
processFile(infname, outputFile)
3165
outputFile.close()
3166
newOutfname = outfname.replace(".0.0.0", "." + node + "." + str(thread))
3167
- print newOutfname
3168
+ print(newOutfname)
3169
os.rename(outfname, newOutfname)
3170
3171
outfname = "ebstrace.extracted.maps.txt"
3172
outputFile = open(outfname, 'w')
3173
3174
- sortedList = sorted(callpathMap.iteritems(), key=itemgetter(1))
3175
+ sortedList = sorted(iter(callpathMap.items()), key=itemgetter(1))
3176
outputFile.write("# function map \n")
3177
for i in sortedList:
3178
outputFile.write(str(i[1]) + " " + str(i[0]) + "\n")
3179
3180
- sortedList = sorted(counterMap.iteritems(), key=itemgetter(1))
3181
+ sortedList = sorted(iter(counterMap.items()), key=itemgetter(1))
3182
outputFile.write("# metric map \n")
3183
for i in sortedList:
3184
outputFile.write(str(i[1]) + " " + str(i[0]) + "\n")
3185
3186
outputFile.close()
3187
- print outfname, "mapping file created"
3188
- print negatives, "negative values ignored out of", total, "total values"
3189
+ print(outfname, "mapping file created")
3190
+ print(negatives, "negative values ignored out of", total, "total values")
3191
3192
if __name__ == "__main__":
3193
main(sys.argv[1:])
3194
--- a/tools/src/perfexplorer/cqos/big.py (original)
3195
--- b/tools/src/perfexplorer/cqos/big.py (refactored)
3196
3197
import sys
3198
import time
3199
-import commands
3200
+import subprocess
3201
3202
True = 1
3203
False = 0
3204
3205
#for gridx in ['16', '32']:
3206
for gridx in ['64']:
3207
gridy=gridx
3208
- gridsize = `gridx` + 'x' + `gridy`
3209
+ gridsize = repr(gridx) + 'x' + repr(gridy)
3210
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
3211
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.' + classifier + ' lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof
3212
# get KSP recommendation
3213
#print getrec
3214
- (status, output) = commands.getstatusoutput(getrec)
3215
+ (status, output) = subprocess.getstatusoutput(getrec)
3216
#print output
3217
if output.startswith('fgmres'):
3218
ksp='fgmres'
3219
3220
ksp='bcgs'
3221
if output.startswith('tfqmr'):
3222
ksp='tfqmr'
3223
- print ksp
3224
+ print(ksp)
3225
# get PC recommendation
3226
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.pc.' + classifier + ' lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' ksp:' + ksp
3227
#print getrec
3228
- (status, output) = commands.getstatusoutput(getrec)
3229
+ (status, output) = subprocess.getstatusoutput(getrec)
3230
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
3231
#print output
3232
if output.startswith('jacobi'):
3233
3234
if output.startswith('icc'):
3235
pc='icc'
3236
procs='1'
3237
- print pc
3238
+ print(pc)
3239
3240
# make directories for results
3241
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes + '-' + 'pc' + pc
3242
- print dirname
3243
+ print(dirname)
3244
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
3245
#print createdir
3246
- commands.getstatusoutput(createdir)
3247
- commands.getstatusoutput(createdir + '/default')
3248
- commands.getstatusoutput(createdir + '/' + ksp)
3249
+ subprocess.getstatusoutput(createdir)
3250
+ subprocess.getstatusoutput(createdir + '/default')
3251
+ subprocess.getstatusoutput(createdir + '/' + ksp)
3252
# run with default solver
3253
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type gmres -pc_type ilu -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
3254
# print mycommand
3255
start = time.time()
3256
- (status, output) = commands.getstatusoutput(mycommand)
3257
+ (status, output) = subprocess.getstatusoutput(mycommand)
3258
end = time.time()
3259
default = end - start
3260
- print 'DEFAULT: ', default
3261
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
3262
+ print('DEFAULT: ', default)
3263
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
3264
# run with recommendation
3265
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -pc_type ' + pc + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
3266
start = time.time()
3267
- (status, output) = commands.getstatusoutput(mycommand)
3268
+ (status, output) = subprocess.getstatusoutput(mycommand)
3269
end = time.time()
3270
recommended = end - start
3271
# print output
3272
- print 'RECOMMENDED: ', recommended
3273
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
3274
+ print('RECOMMENDED: ', recommended)
3275
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
3276
#f.write('lidvelocity, grashof, gridsize, ksp, pc, default, recommended\n')
3277
- f.write(lidvelocity + ',' + grashof + ',' + gridsize + ',' + ksp + ',' + pc + ',' + `default` + ',' + `recommended` + '\n')
3278
+ f.write(lidvelocity + ',' + grashof + ',' + gridsize + ',' + ksp + ',' + pc + ',' + repr(default) + ',' + repr(recommended) + '\n')
3279
f.flush()
3280
3281
f.close()
3282
--- a/tools/src/perfexplorer/cqos/c60.py (original)
3283
--- b/tools/src/perfexplorer/cqos/c60.py (refactored)
3284
3285
global inApp
3286
global inExp
3287
global fileName
3288
- print "getting parameters..."
3289
+ print("getting parameters...")
3290
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
3291
keys = parameterMap.keySet()
3292
for key in keys:
3293
- print key, parameterMap.get(key)
3294
+ print(key, parameterMap.get(key))
3295
config = parameterMap.get("config")
3296
tmp = parameterMap.get("app")
3297
if tmp != None:
3298
3299
tmp = parameterMap.get("fileName")
3300
if tmp != None:
3301
fileName = tmp
3302
- print "...done."
3303
+ print("...done.")
3304
3305
def loadTrials():
3306
global results
3307
global inExp
3308
- print "loading trials for experiment..."
3309
+ print("loading trials for experiment...")
3310
Utilities.setSession(config)
3311
trials = Utilities.getTrialsForExperiment(inApp, inExp)
3312
for trial in trials:
3313
loaded = TrialMeanResult(trial)
3314
results.add(loaded)
3315
- print "...done."
3316
+ print("...done.")
3317
return results
3318
3319
def loadExperiments():
3320
global results
3321
global inExp
3322
- print "loading experiments..."
3323
+ print("loading experiments...")
3324
Utilities.setSession(config)
3325
experiments = Utilities.getExperimentsForApplication(inApp)
3326
for experiment in experiments:
3327
inExp = experiment.getName();
3328
- print "processing experiment: ", inExp
3329
+ print("processing experiment: ", inExp)
3330
results = loadTrials()
3331
- print "...done."
3332
+ print("...done.")
3333
return results
3334
3335
def buildClassifier(results):
3336
- print "building classifier..."
3337
+ print("building classifier...")
3338
metadataFields = HashSet()
3339
metadataFields.add("molecule name")
3340
metadataFields.add("basis set")
3341
3342
classifier.setClassifierType(CQoSClassifierOperation.ALTERNATING_DECISION_TREE)
3343
classifier.processData()
3344
classifier.writeClassifier(fileName + ".adt")
3345
- print classifier.crossValidateModel()
3346
+ print(classifier.crossValidateModel())
3347
test(classifier)
3348
classifier.setClassifierType(CQoSClassifierOperation.NAIVE_BAYES)
3349
classifier.processData()
3350
classifier.writeClassifier(fileName + ".nb")
3351
- print classifier.crossValidateModel()
3352
+ print(classifier.crossValidateModel())
3353
test(classifier)
3354
classifier.setClassifierType(CQoSClassifierOperation.RANDOM_TREE)
3355
classifier.processData()
3356
classifier.writeClassifier(fileName + ".rt")
3357
- print classifier.crossValidateModel()
3358
+ print(classifier.crossValidateModel())
3359
test(classifier)
3360
classifier.setClassifierType(CQoSClassifierOperation.SUPPORT_VECTOR_MACHINE)
3361
classifier.processData()
3362
classifier.writeClassifier(fileName + ".svm")
3363
- print classifier.crossValidateModel()
3364
+ print(classifier.crossValidateModel())
3365
test(classifier)
3366
classifier.setClassifierType(CQoSClassifierOperation.J48)
3367
classifier.processData()
3368
classifier.writeClassifier(fileName + ".j48")
3369
- print classifier.crossValidateModel()
3370
+ print(classifier.crossValidateModel())
3371
test(classifier)
3372
classifier.setClassifierType(CQoSClassifierOperation.MULTILAYER_PERCEPTRON)
3373
classifier.processData()
3374
classifier.writeClassifier(fileName + ".mp")
3375
- print classifier.crossValidateModel()
3376
+ print(classifier.crossValidateModel())
3377
test(classifier)
3378
classifier.writeClassifier(fileName)
3379
- print "...done."
3380
+ print("...done.")
3381
return classifier
3382
3383
def test(classifier):
3384
3385
inputFields.put("core count", "8")
3386
inputFields.put("mplevl", mp)
3387
if classifier.getClass(inputFields) == "DIRECT":
3388
- print inputFields, "Direct / Conventional: ", classifier.getClass(inputFields), classifier.getConfidence()
3389
+ print(inputFields, "Direct / Conventional: ", classifier.getClass(inputFields), classifier.getConfidence())
3390
3391
3392
-print "--------------- JPython test script start ------------"
3393
+print("--------------- JPython test script start ------------")
3394
3395
getParameters()
3396
3397
-print "getting trials..."
3398
+print("getting trials...")
3399
3400
results = loadTrials()
3401
#results = loadExperiments()
3402
3403
-print "...done."
3404
-print "Total Trials:", results.size()
3405
+print("...done.")
3406
+print("Total Trials:", results.size())
3407
3408
classifier = buildClassifier(results)
3409
#test(classifier)
3410
3411
-print "---------------- JPython test script end -------------"
3412
+print("---------------- JPython test script end -------------")
3413
--- a/tools/src/perfexplorer/cqos/chemistry.new.py (original)
3414
--- b/tools/src/perfexplorer/cqos/chemistry.new.py (refactored)
3415
3416
global inApp
3417
global inExp
3418
global fileName
3419
- print "getting parameters..."
3420
+ print("getting parameters...")
3421
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
3422
keys = parameterMap.keySet()
3423
for key in keys:
3424
- print key, parameterMap.get(key)
3425
+ print(key, parameterMap.get(key))
3426
config = parameterMap.get("config")
3427
tmp = parameterMap.get("app")
3428
if tmp != None:
3429
3430
tmp = parameterMap.get("fileName")
3431
if tmp != None:
3432
fileName = tmp
3433
- print "...done."
3434
+ print("...done.")
3435
3436
def loadTrials():
3437
global results
3438
global inExp
3439
- print "loading trials for experiment..."
3440
+ print("loading trials for experiment...")
3441
Utilities.setSession(config)
3442
trials = Utilities.getTrialsForExperiment(inApp, inExp)
3443
for trial in trials:
3444
loaded = TrialMeanResult(trial)
3445
results.add(loaded)
3446
- print "...done."
3447
+ print("...done.")
3448
return results
3449
3450
def loadExperiments():
3451
global results
3452
global inExp
3453
- print "loading experiments..."
3454
+ print("loading experiments...")
3455
Utilities.setSession(config)
3456
experiments = Utilities.getExperimentsForApplication(inApp)
3457
for experiment in experiments:
3458
inExp = experiment.getName();
3459
- print "processing experiment: ", inExp
3460
+ print("processing experiment: ", inExp)
3461
results = loadTrials()
3462
- print "...done."
3463
+ print("...done.")
3464
return results
3465
3466
def buildClassifier(results):
3467
- print "building classifier..."
3468
+ print("building classifier...")
3469
metadataFields = HashSet()
3470
# metadataFields.add("basis set")
3471
# ALEX metadataFields.add("NUCLEAR REPULSION ENERGY"); # molecule dependent
3472
3473
classifier.setClassifierType(CQoSClassifierOperation.ALTERNATING_DECISION_TREE)
3474
classifier.processData()
3475
classifier.writeClassifier(fileName + ".adt")
3476
- print classifier.crossValidateModel()
3477
+ print(classifier.crossValidateModel())
3478
test(classifier)
3479
classifier.setClassifierType(CQoSClassifierOperation.NAIVE_BAYES)
3480
classifier.processData()
3481
classifier.writeClassifier(fileName + ".nb")
3482
- print classifier.crossValidateModel()
3483
+ print(classifier.crossValidateModel())
3484
test(classifier)
3485
classifier.setClassifierType(CQoSClassifierOperation.RANDOM_TREE)
3486
classifier.processData()
3487
classifier.writeClassifier(fileName + ".rt")
3488
- print classifier.crossValidateModel()
3489
+ print(classifier.crossValidateModel())
3490
test(classifier)
3491
classifier.setClassifierType(CQoSClassifierOperation.SUPPORT_VECTOR_MACHINE)
3492
classifier.processData()
3493
classifier.writeClassifier(fileName + ".svm")
3494
- print classifier.crossValidateModel()
3495
+ print(classifier.crossValidateModel())
3496
test(classifier)
3497
classifier.setClassifierType(CQoSClassifierOperation.J48)
3498
classifier.processData()
3499
classifier.writeClassifier(fileName + ".j48")
3500
- print classifier.crossValidateModel()
3501
+ print(classifier.crossValidateModel())
3502
test(classifier)
3503
classifier.setClassifierType(CQoSClassifierOperation.MULTILAYER_PERCEPTRON)
3504
classifier.processData()
3505
classifier.writeClassifier(fileName + ".mp")
3506
- print classifier.crossValidateModel()
3507
+ print(classifier.crossValidateModel())
3508
test(classifier)
3509
classifier.writeClassifier(fileName)
3510
- print "...done."
3511
+ print("...done.")
3512
return classifier
3513
3514
def test(classifier):
3515
3516
inputFields.put("core count", "8")
3517
inputFields.put("mplevl", mp)
3518
if classifier.getClass(inputFields) == "DIRECT":
3519
- print inputFields, "Direct / Conventional: ", classifier.getClass(inputFields), classifier.getConfidence()
3520
- print ""
3521
+ print(inputFields, "Direct / Conventional: ", classifier.getClass(inputFields), classifier.getConfidence())
3522
+ print("")
3523
3524
-print "--------------- JPython test script start ------------"
3525
+print("--------------- JPython test script start ------------")
3526
3527
getParameters()
3528
3529
-print "getting trials..."
3530
+print("getting trials...")
3531
3532
#results = loadTrials()
3533
results = loadExperiments()
3534
3535
-print "...done."
3536
-print "Total Trials:", results.size()
3537
+print("...done.")
3538
+print("Total Trials:", results.size())
3539
3540
classifier = buildClassifier(results)
3541
#test(classifier)
3542
3543
-print "---------------- JPython test script end -------------"
3544
+print("---------------- JPython test script end -------------")
3545
--- a/tools/src/perfexplorer/cqos/chemistry.py (original)
3546
--- b/tools/src/perfexplorer/cqos/chemistry.py (refactored)
3547
3548
global inApp
3549
global inExp
3550
global fileName
3551
- print "getting parameters..."
3552
+ print("getting parameters...")
3553
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
3554
keys = parameterMap.keySet()
3555
for key in keys:
3556
- print key, parameterMap.get(key)
3557
+ print(key, parameterMap.get(key))
3558
config = parameterMap.get("config")
3559
tmp = parameterMap.get("app")
3560
if tmp != None:
3561
3562
tmp = parameterMap.get("fileName")
3563
if tmp != None:
3564
fileName = tmp
3565
- print "...done."
3566
+ print("...done.")
3567
3568
def loadTrials():
3569
global results
3570
global inExp
3571
- print "loading trials for experiment..."
3572
+ print("loading trials for experiment...")
3573
Utilities.setSession(config)
3574
trials = Utilities.getTrialsForExperiment(inApp, inExp)
3575
for trial in trials:
3576
loaded = TrialMeanResult(trial)
3577
results.add(loaded)
3578
- print "...done."
3579
+ print("...done.")
3580
return results
3581
3582
def loadExperiments():
3583
global results
3584
global inExp
3585
- print "loading experiments..."
3586
+ print("loading experiments...")
3587
Utilities.setSession(config)
3588
experiments = Utilities.getExperimentsForApplication(inApp)
3589
for experiment in experiments:
3590
inExp = experiment.getName();
3591
- print "processing experiment: ", inExp
3592
+ print("processing experiment: ", inExp)
3593
results = loadTrials()
3594
- print "...done."
3595
+ print("...done.")
3596
return results
3597
3598
def buildClassifier(results):
3599
- print "building classifier..."
3600
+ print("building classifier...")
3601
metadataFields = HashSet()
3602
metadataFields.add("molecule name")
3603
metadataFields.add("basis set")
3604
3605
classifier.setClassifierType(CQoSClassifierOperation.ALTERNATING_DECISION_TREE)
3606
classifier.processData()
3607
classifier.writeClassifier(fileName + ".adt")
3608
- print classifier.crossValidateModel()
3609
+ print(classifier.crossValidateModel())
3610
test(classifier)
3611
classifier.setClassifierType(CQoSClassifierOperation.NAIVE_BAYES)
3612
classifier.processData()
3613
classifier.writeClassifier(fileName + ".nb")
3614
- print classifier.crossValidateModel()
3615
+ print(classifier.crossValidateModel())
3616
#test(classifier)
3617
classifier.setClassifierType(CQoSClassifierOperation.RANDOM_TREE)
3618
classifier.processData()
3619
classifier.writeClassifier(fileName + ".rt")
3620
- print classifier.crossValidateModel()
3621
+ print(classifier.crossValidateModel())
3622
#test(classifier)
3623
classifier.setClassifierType(CQoSClassifierOperation.SUPPORT_VECTOR_MACHINE)
3624
classifier.processData()
3625
classifier.writeClassifier(fileName + ".svm")
3626
- print classifier.crossValidateModel()
3627
+ print(classifier.crossValidateModel())
3628
#test(classifier)
3629
classifier.setClassifierType(CQoSClassifierOperation.J48)
3630
classifier.processData()
3631
classifier.writeClassifier(fileName + ".j48")
3632
- print classifier.crossValidateModel()
3633
+ print(classifier.crossValidateModel())
3634
#test(classifier)
3635
classifier.setClassifierType(CQoSClassifierOperation.MULTILAYER_PERCEPTRON)
3636
classifier.processData()
3637
classifier.writeClassifier(fileName + ".mp")
3638
- print classifier.crossValidateModel()
3639
+ print(classifier.crossValidateModel())
3640
#test(classifier)
3641
classifier.writeClassifier(fileName)
3642
- print "...done."
3643
+ print("...done.")
3644
return classifier
3645
3646
def test(classifier):
3647
3648
inputFields.put("core count", "8")
3649
inputFields.put("mplevl", mp)
3650
if classifier.getClass(inputFields) == "DIRECT":
3651
- print inputFields, "Direct / Conventional: ", classifier.getClass(inputFields), classifier.getConfidence()
3652
+ print(inputFields, "Direct / Conventional: ", classifier.getClass(inputFields), classifier.getConfidence())
3653
3654
3655
-print "--------------- JPython test script start ------------"
3656
+print("--------------- JPython test script start ------------")
3657
3658
getParameters()
3659
3660
-print "getting trials..."
3661
+print("getting trials...")
3662
3663
#results = loadTrials()
3664
results = loadExperiments()
3665
3666
-print "...done."
3667
-print "Total Trials:", results.size()
3668
+print("...done.")
3669
+print("Total Trials:", results.size())
3670
3671
classifier = buildClassifier(results)
3672
#test(classifier)
3673
3674
-print "---------------- JPython test script end -------------"
3675
+print("---------------- JPython test script end -------------")
3676
--- a/tools/src/perfexplorer/cqos/cqos.py (original)
3677
--- b/tools/src/perfexplorer/cqos/cqos.py (refactored)
3678
3679
global inApp
3680
global inExp
3681
global fileName
3682
- print "getting parameters..."
3683
+ print("getting parameters...")
3684
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
3685
keys = parameterMap.keySet()
3686
for key in keys:
3687
- print key, parameterMap.get(key)
3688
+ print(key, parameterMap.get(key))
3689
config = parameterMap.get("config")
3690
inApp = parameterMap.get("app")
3691
# inExp = parameterMap.get("exp")
3692
fileName = parameterMap.get("fileName")
3693
- print "...done."
3694
+ print("...done.")
3695
3696
def loadTrials():
3697
- print "loading data..."
3698
+ print("loading data...")
3699
Utilities.setSession(config)
3700
trials = Utilities.getTrialsForExperiment(inApp, inExp)
3701
- print "...done."
3702
+ print("...done.")
3703
return trials
3704
3705
def loadExperiments():
3706
- print "loading data..."
3707
+ print("loading data...")
3708
Utilities.setSession(config)
3709
experiments = Utilities.getExperimentsForApplication(inApp)
3710
- print "...done."
3711
+ print("...done.")
3712
return experiments
3713
3714
def buildClassifier(results):
3715
- print "building classifier..."
3716
+ print("building classifier...")
3717
metadataFields = HashSet()
3718
metadataFields.add("molecule name")
3719
metadataFields.add("basis set")
3720
3721
#classifier.setClassifierType(CQoSClassifierOperation.SUPPORT_VECTOR_MACHINE)
3722
classifier.processData()
3723
classifier.writeClassifier(fileName)
3724
- print "...done."
3725
+ print("...done.")
3726
3727
3728
-print "--------------- JPython test script start ------------"
3729
+print("--------------- JPython test script start ------------")
3730
3731
getParameters()
3732
results = ArrayList()
3733
3734
-print "getting trials..."
3735
+print("getting trials...")
3736
3737
# trials = loadTrials()
3738
# for trial in trials:
3739
3740
experiments = loadExperiments()
3741
for experiment in experiments:
3742
inExp = experiment.getName();
3743
- print "processing experiment: ", inExp
3744
+ print("processing experiment: ", inExp)
3745
trials = loadTrials()
3746
for trial in trials:
3747
loaded = TrialMeanResult(trial)
3748
results.add(loaded)
3749
3750
-print "...done."
3751
-print "Total Trials:", results.size()
3752
+print("...done.")
3753
+print("Total Trials:", results.size())
3754
3755
buildClassifier(results)
3756
3757
-print "---------------- JPython test script end -------------"
3758
+print("---------------- JPython test script end -------------")
3759
--- a/tools/src/perfexplorer/cqos/evaluate.py (original)
3760
--- b/tools/src/perfexplorer/cqos/evaluate.py (refactored)
3761
3762
global inApp
3763
global inExp
3764
global fileName
3765
- print "getting parameters..."
3766
+ print("getting parameters...")
3767
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
3768
keys = parameterMap.keySet()
3769
for key in keys:
3770
- print key, parameterMap.get(key)
3771
+ print(key, parameterMap.get(key))
3772
config = parameterMap.get("config")
3773
inApp = parameterMap.get("app")
3774
inExp = parameterMap.get("exp")
3775
fileName = parameterMap.get("fileName")
3776
- print "...done."
3777
+ print("...done.")
3778
3779
def testClassifier(classifier):
3780
bcgs = 0
3781
3782
className = classifier.getClass(inputFields)
3783
confidence = classifier.getConfidence()
3784
if className == "bcgs":
3785
- print inputFields
3786
+ print(inputFields)
3787
bcgs+=1
3788
if className == "fgmres":
3789
fgmres+=1
3790
3791
if className == "tfqmr":
3792
tfqmr+=1
3793
3794
- print "bcgs", bcgs
3795
- print "fgmres", fgmres
3796
- print "gmres", gmres
3797
- print "tfqmr", tfqmr
3798
+ print("bcgs", bcgs)
3799
+ print("fgmres", fgmres)
3800
+ print("gmres", gmres)
3801
+ print("tfqmr", tfqmr)
3802
3803
-print "--------------- JPython test script start ------------"
3804
+print("--------------- JPython test script start ------------")
3805
3806
getParameters()
3807
3808
-print "TESTING J48"
3809
+print("TESTING J48")
3810
classifier = CQoSClassifierOperation.readClassifier(fileName + ".j48")
3811
testClassifier(classifier)
3812
3813
-print "TESTING MP"
3814
+print("TESTING MP")
3815
classifier = CQoSClassifierOperation.readClassifier(fileName + ".mp")
3816
testClassifier(classifier)
3817
3818
-print "TESTING NB"
3819
+print("TESTING NB")
3820
classifier = CQoSClassifierOperation.readClassifier(fileName + ".nb")
3821
testClassifier(classifier)
3822
3823
-print "TESTING SVM"
3824
+print("TESTING SVM")
3825
classifier = CQoSClassifierOperation.readClassifier(fileName + ".svm")
3826
testClassifier(classifier)
3827
3828
-print "---------------- JPython test script end -------------"
3829
+print("---------------- JPython test script end -------------")
3830
--- a/tools/src/perfexplorer/cqos/linear_solver.py (original)
3831
--- b/tools/src/perfexplorer/cqos/linear_solver.py (refactored)
3832
3833
global inApp
3834
global inExp
3835
global fileName
3836
- print "getting parameters..."
3837
+ print("getting parameters...")
3838
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
3839
keys = parameterMap.keySet()
3840
for key in keys:
3841
- print key, parameterMap.get(key)
3842
+ print(key, parameterMap.get(key))
3843
config = parameterMap.get("config")
3844
inApp = parameterMap.get("app")
3845
inExp = parameterMap.get("exp")
3846
fileName = parameterMap.get("fileName")
3847
- print "...done."
3848
+ print("...done.")
3849
3850
def loadTrials():
3851
- print "loading data..."
3852
+ print("loading data...")
3853
Utilities.setSession(config)
3854
trials = Utilities.getTrialsForExperiment(inApp, inExp)
3855
- print "...done."
3856
+ print("...done.")
3857
return trials
3858
3859
def loadExperiments():
3860
- print "loading data..."
3861
+ print("loading data...")
3862
Utilities.setSession(config)
3863
experiments = Utilities.getExperimentsForApplication(inApp)
3864
- print "...done."
3865
+ print("...done.")
3866
return experiments
3867
3868
def buildClassifier(results):
3869
- print "building classifier..."
3870
+ print("building classifier...")
3871
metadataFields = HashSet()
3872
3873
# general properties
3874
3875
start = time.clock()
3876
classifier = CQoSClassifierOperation(results, "P_WALL_CLOCK_TIME", metadataFields, "ksp")
3877
end = time.clock()
3878
- print end - start, " seconds to initialize classifier"
3879
+ print(end - start, " seconds to initialize classifier")
3880
classifier.setClassifierType(CQoSClassifierOperation.J48)
3881
start = time.clock()
3882
classifier.processData()
3883
end = time.clock()
3884
- print end - start, " seconds to build classifier"
3885
- print "validating classifier..."
3886
- start = time.clock()
3887
- print classifier.crossValidateModel()
3888
- end = time.clock()
3889
- print end - start, " seconds to validate classifier"
3890
+ print(end - start, " seconds to build classifier")
3891
+ print("validating classifier...")
3892
+ start = time.clock()
3893
+ print(classifier.crossValidateModel())
3894
+ end = time.clock()
3895
+ print(end - start, " seconds to validate classifier")
3896
classifier.writeClassifier(fileName + ".j48")
3897
classifier.setClassifierType(CQoSClassifierOperation.NAIVE_BAYES)
3898
start = time.clock()
3899
classifier.processData()
3900
end = time.clock()
3901
- print end - start, " seconds to build classifier"
3902
- print "validating classifier..."
3903
- start = time.clock()
3904
- print classifier.crossValidateModel()
3905
- end = time.clock()
3906
- print end - start, " seconds to validate classifier"
3907
+ print(end - start, " seconds to build classifier")
3908
+ print("validating classifier...")
3909
+ start = time.clock()
3910
+ print(classifier.crossValidateModel())
3911
+ end = time.clock()
3912
+ print(end - start, " seconds to validate classifier")
3913
classifier.writeClassifier(fileName + ".nb")
3914
classifier.setClassifierType(CQoSClassifierOperation.SUPPORT_VECTOR_MACHINE)
3915
start = time.clock()
3916
classifier.processData()
3917
end = time.clock()
3918
- print end - start, " seconds to build classifier"
3919
- print "validating classifier..."
3920
- start = time.clock()
3921
- print classifier.crossValidateModel()
3922
- end = time.clock()
3923
- print end - start, " seconds to validate classifier"
3924
+ print(end - start, " seconds to build classifier")
3925
+ print("validating classifier...")
3926
+ start = time.clock()
3927
+ print(classifier.crossValidateModel())
3928
+ end = time.clock()
3929
+ print(end - start, " seconds to validate classifier")
3930
classifier.writeClassifier(fileName + ".svm")
3931
classifier.setClassifierType(CQoSClassifierOperation.MULTILAYER_PERCEPTRON)
3932
start = time.clock()
3933
classifier.processData()
3934
end = time.clock()
3935
- print end - start, " seconds to build classifier"
3936
- print "validating classifier..."
3937
- start = time.clock()
3938
- print classifier.crossValidateModel()
3939
- end = time.clock()
3940
- print end - start, " seconds to validate classifier"
3941
+ print(end - start, " seconds to build classifier")
3942
+ print("validating classifier...")
3943
+ start = time.clock()
3944
+ print(classifier.crossValidateModel())
3945
+ end = time.clock()
3946
+ print(end - start, " seconds to validate classifier")
3947
classifier.writeClassifier(fileName + ".mp")
3948
- print "...done."
3949
+ print("...done.")
3950
return classifier
3951
3952
def testClassifier(classifier):
3953
3954
className = classifier.getClass(inputFields)
3955
confidence = classifier.getConfidence()
3956
if confidence != "bcgs":
3957
- print inputFields
3958
- print "\tSolver: ", className, confidence
3959
-
3960
-
3961
-
3962
-print "--------------- JPython test script start ------------"
3963
+ print(inputFields)
3964
+ print("\tSolver: ", className, confidence)
3965
+
3966
+
3967
+
3968
+print("--------------- JPython test script start ------------")
3969
3970
getParameters()
3971
results = ArrayList()
3972
3973
-print "getting trials..."
3974
+print("getting trials...")
3975
start = time.clock()
3976
3977
trials = loadTrials()
3978
index = 1
3979
totalTrials = trials.size()
3980
for trial in trials:
3981
- print "\rLoading trial ", index, "of", totalTrials,
3982
+ print("\rLoading trial ", index, "of", totalTrials, end=' ')
3983
loaded = TrialMeanResult(trial)
3984
"""
3985
# important - split the trial, because it's iterative, and each iteration
3986
3987
# loaded = TrialMeanResult(trial)
3988
# results.add(loaded)
3989
3990
-print "...done."
3991
+print("...done.")
3992
end = time.clock()
3993
-print end - start, " seconds to load data"
3994
-print "Total Trials:", results.size()
3995
+print(end - start, " seconds to load data")
3996
+print("Total Trials:", results.size())
3997
3998
classifier = buildClassifier(results)
3999
# classifier = CQoSClassifierOperation.readClassifier(fileName + ".j48")
4000
# testClassifier(classifier)
4001
4002
-print "---------------- JPython test script end -------------"
4003
+print("---------------- JPython test script end -------------")
4004
--- a/tools/src/perfexplorer/cqos/loopksp.py (original)
4005
--- b/tools/src/perfexplorer/cqos/loopksp.py (refactored)
4006
4007
import sys
4008
import time
4009
-import commands
4010
+import subprocess
4011
4012
True = 1
4013
False = 0
4014
4015
for kspmaxit in ['200', '400', '600']:
4016
for gridx in ['16', '32']:
4017
gridy=gridx
4018
- gridsize = `gridx` + 'x' + `gridy`
4019
+ gridsize = repr(gridx) + 'x' + repr(gridy)
4020
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4021
for ksprtol in ['1.000000e-04', '1.000000e-05']:
4022
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' ksprtol:' + ksprtol
4023
# get KSP recommendation
4024
#print getrec
4025
- (status, output) = commands.getstatusoutput(getrec)
4026
+ (status, output) = subprocess.getstatusoutput(getrec)
4027
#print output
4028
if output.startswith('fgmres'):
4029
ksp='fgmres'
4030
4031
ksp='bcgs'
4032
if output.startswith('tfqmr'):
4033
ksp='tfqmr'
4034
- print ksp
4035
+ print(ksp)
4036
"""
4037
# get PC recommendation
4038
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.pc.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' ksp:' + ksp + ' ksprtol:' + ksprtol
4039
4040
4041
# make directories for results
4042
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes
4043
- print dirname
4044
+ print(dirname)
4045
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
4046
#print createdir
4047
- commands.getstatusoutput(createdir)
4048
- commands.getstatusoutput(createdir + '/default')
4049
- commands.getstatusoutput(createdir + '/' + ksp)
4050
+ subprocess.getstatusoutput(createdir)
4051
+ subprocess.getstatusoutput(createdir + '/default')
4052
+ subprocess.getstatusoutput(createdir + '/' + ksp)
4053
# run with default solver
4054
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
4055
# print mycommand
4056
start = time.time()
4057
- (status, output) = commands.getstatusoutput(mycommand)
4058
+ (status, output) = subprocess.getstatusoutput(mycommand)
4059
end = time.time()
4060
default = end - start
4061
- print 'DEFAULT: ', default
4062
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4063
+ print('DEFAULT: ', default)
4064
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4065
# run with recommendation
4066
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
4067
start = time.time()
4068
- (status, output) = commands.getstatusoutput(mycommand)
4069
+ (status, output) = subprocess.getstatusoutput(mycommand)
4070
end = time.time()
4071
recommended = end - start
4072
# print output
4073
- print 'RECOMMENDED: ', recommended
4074
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4075
- f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + ksprtol + ',' + ksp + ',' + `default` + ',' + `recommended` + '\n')
4076
+ print('RECOMMENDED: ', recommended)
4077
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4078
+ f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + ksprtol + ',' + ksp + ',' + repr(default) + ',' + repr(recommended) + '\n')
4079
f.flush()
4080
4081
f.close()
4082
--- a/tools/src/perfexplorer/cqos/looppc.py (original)
4083
--- b/tools/src/perfexplorer/cqos/looppc.py (refactored)
4084
4085
import sys
4086
import time
4087
-import commands
4088
+import subprocess
4089
4090
True = 1
4091
False = 0
4092
4093
for kspmaxit in ['200', '400', '600']:
4094
for gridx in ['16', '32']:
4095
gridy=gridx
4096
- gridsize = `gridx` + 'x' + `gridy`
4097
+ gridsize = repr(gridx) + 'x' + repr(gridy)
4098
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4099
for ksprtol in ['1.000000e-04', '1.000000e-05']:
4100
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' ksprtol:' + ksprtol
4101
# get KSP recommendation
4102
#print getrec
4103
- (status, output) = commands.getstatusoutput(getrec)
4104
+ (status, output) = subprocess.getstatusoutput(getrec)
4105
#print output
4106
if output.startswith('fgmres'):
4107
ksp='fgmres'
4108
4109
ksp='bcgs'
4110
if output.startswith('tfqmr'):
4111
ksp='tfqmr'
4112
- print ksp
4113
+ print(ksp)
4114
# get PC recommendation
4115
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.pc.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' ksp:' + ksp + ' ksprtol:' + ksprtol
4116
#print getrec
4117
- (status, output) = commands.getstatusoutput(getrec)
4118
+ (status, output) = subprocess.getstatusoutput(getrec)
4119
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4120
#print output
4121
if output.startswith('jacobi'):
4122
4123
if output.startswith('icc'):
4124
pc='icc'
4125
procs='1'
4126
- print pc
4127
+ print(pc)
4128
4129
# make directories for results
4130
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes + '-' + 'pc' + pc
4131
- print dirname
4132
+ print(dirname)
4133
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
4134
#print createdir
4135
- commands.getstatusoutput(createdir)
4136
- commands.getstatusoutput(createdir + '/default')
4137
- commands.getstatusoutput(createdir + '/' + ksp)
4138
+ subprocess.getstatusoutput(createdir)
4139
+ subprocess.getstatusoutput(createdir + '/default')
4140
+ subprocess.getstatusoutput(createdir + '/' + ksp)
4141
# run with default solver
4142
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type gmres -pc_type ilu -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
4143
# print mycommand
4144
start = time.time()
4145
- (status, output) = commands.getstatusoutput(mycommand)
4146
+ (status, output) = subprocess.getstatusoutput(mycommand)
4147
end = time.time()
4148
default = end - start
4149
- print 'DEFAULT: ', default
4150
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4151
+ print('DEFAULT: ', default)
4152
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4153
# run with recommendation
4154
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -pc_type ' + pc + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
4155
start = time.time()
4156
- (status, output) = commands.getstatusoutput(mycommand)
4157
+ (status, output) = subprocess.getstatusoutput(mycommand)
4158
end = time.time()
4159
recommended = end - start
4160
# print output
4161
- print 'RECOMMENDED: ', recommended
4162
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4163
- f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + ksprtol + ',' + pc + ',' + ksp + ',' + `default` + ',' + `recommended` + '\n')
4164
+ print('RECOMMENDED: ', recommended)
4165
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4166
+ f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + ksprtol + ',' + pc + ',' + ksp + ',' + repr(default) + ',' + repr(recommended) + '\n')
4167
f.flush()
4168
4169
f.close()
4170
--- a/tools/src/perfexplorer/cqos/loopy.py (original)
4171
--- b/tools/src/perfexplorer/cqos/loopy.py (refactored)
4172
4173
import sys
4174
import time
4175
-import commands
4176
+import subprocess
4177
4178
True = 1
4179
False = 0
4180
4181
for kspmaxit in ['200', '400', '600']:
4182
for gridx in ['16', '32']:
4183
gridy=gridx
4184
- gridsize = `gridx` + 'x' + `gridy`
4185
+ gridsize = repr(gridx) + 'x' + repr(gridy)
4186
#for pc in ['jacobi', 'ilu', 'bjacobi', 'none', 'sor', 'asm', 'cholesky', 'icc']:
4187
for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4188
#for matrixsize in ['15876x15876', '3844x3844']:
4189
4190
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' pc:' + pc2 + ' ksprtol:' + ksprtol
4191
# get recommendation
4192
#print getrec
4193
- (status, output) = commands.getstatusoutput(getrec)
4194
+ (status, output) = subprocess.getstatusoutput(getrec)
4195
#print output
4196
if output.startswith('fgmres'):
4197
ksp='fgmres'
4198
4199
ksp='bcgs'
4200
if output.startswith('tfqmr'):
4201
ksp='tfqmr'
4202
- print ksp
4203
+ print(ksp)
4204
# make directories for results
4205
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes + '-' + 'pc' + pc
4206
- print dirname
4207
+ print(dirname)
4208
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
4209
#print createdir
4210
- commands.getstatusoutput(createdir)
4211
- commands.getstatusoutput(createdir + '/default')
4212
- commands.getstatusoutput(createdir + '/' + ksp)
4213
+ subprocess.getstatusoutput(createdir)
4214
+ subprocess.getstatusoutput(createdir + '/default')
4215
+ subprocess.getstatusoutput(createdir + '/' + ksp)
4216
# run with default solver
4217
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -pc_type ' + pc + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
4218
# print mycommand
4219
start = time.time()
4220
- (status, output) = commands.getstatusoutput(mycommand)
4221
+ (status, output) = subprocess.getstatusoutput(mycommand)
4222
end = time.time()
4223
default = end - start
4224
- print 'DEFAULT: ', default
4225
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4226
+ print('DEFAULT: ', default)
4227
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4228
# run with recommendation
4229
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -pc_type ' + pc + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
4230
start = time.time()
4231
- (status, output) = commands.getstatusoutput(mycommand)
4232
+ (status, output) = subprocess.getstatusoutput(mycommand)
4233
end = time.time()
4234
recommended = end - start
4235
# print output
4236
- print 'RECOMMENDED: ', recommended
4237
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4238
- f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + pc + ',' + ksprtol + ',' + ksp + ',' + `default` + ',' + `recommended` + '\n')
4239
+ print('RECOMMENDED: ', recommended)
4240
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4241
+ f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + pc + ',' + ksprtol + ',' + ksp + ',' + repr(default) + ',' + repr(recommended) + '\n')
4242
4243
f.close()
4244
4245
--- a/tools/src/perfexplorer/cqos/loopy2.py (original)
4246
--- b/tools/src/perfexplorer/cqos/loopy2.py (refactored)
4247
4248
import sys
4249
import time
4250
-import commands
4251
+import subprocess
4252
4253
True = 1
4254
False = 0
4255
4256
for kspmaxit in ['200', '400', '600']:
4257
for gridx in ['16', '32']:
4258
gridy=gridx
4259
- gridsize = `gridx` + 'x' + `gridy`
4260
+ gridsize = repr(gridx) + 'x' + repr(gridy)
4261
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4262
for ksprtol in ['1.000000e-04', '1.000000e-05']:
4263
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' ksprtol:' + ksprtol
4264
# get KSP recommendation
4265
#print getrec
4266
- (status, output) = commands.getstatusoutput(getrec)
4267
+ (status, output) = subprocess.getstatusoutput(getrec)
4268
#print output
4269
if output.startswith('fgmres'):
4270
ksp='fgmres'
4271
4272
ksp='bcgs'
4273
if output.startswith('tfqmr'):
4274
ksp='tfqmr'
4275
- print ksp
4276
+ print(ksp)
4277
"""
4278
# get PC recommendation
4279
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.pc.mp lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' cflini:' + cflini + ' snesrtol:' + snesrtol + ' kspmaxit:' + kspmaxit + ' ksp:' + ksp + ' ksprtol:' + ksprtol
4280
4281
4282
# make directories for results
4283
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes
4284
- print dirname
4285
+ print(dirname)
4286
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
4287
#print createdir
4288
- commands.getstatusoutput(createdir)
4289
- commands.getstatusoutput(createdir + '/default')
4290
- commands.getstatusoutput(createdir + '/' + ksp)
4291
+ subprocess.getstatusoutput(createdir)
4292
+ subprocess.getstatusoutput(createdir + '/default')
4293
+ subprocess.getstatusoutput(createdir + '/' + ksp)
4294
# run with default solver
4295
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
4296
# print mycommand
4297
start = time.time()
4298
- (status, output) = commands.getstatusoutput(mycommand)
4299
+ (status, output) = subprocess.getstatusoutput(mycommand)
4300
end = time.time()
4301
default = end - start
4302
- print 'DEFAULT: ', default
4303
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4304
+ print('DEFAULT: ', default)
4305
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4306
# run with recommendation
4307
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -snes_monitor_metadata -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
4308
start = time.time()
4309
- (status, output) = commands.getstatusoutput(mycommand)
4310
+ (status, output) = subprocess.getstatusoutput(mycommand)
4311
end = time.time()
4312
recommended = end - start
4313
# print output
4314
- print 'RECOMMENDED: ', recommended
4315
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4316
- f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + ksprtol + ',' + ksp + ',' + `default` + ',' + `recommended` + '\n')
4317
+ print('RECOMMENDED: ', recommended)
4318
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4319
+ f.write(lidvelocity + ',' + snesrtol + ',' + grashof + ',' + cflini + ',' + kspmaxit + ',' + gridsize + ',' + ksprtol + ',' + ksp + ',' + repr(default) + ',' + repr(recommended) + '\n')
4320
f.flush()
4321
4322
f.close()
4323
--- a/tools/src/perfexplorer/cqos/optimizer.py (original)
4324
--- b/tools/src/perfexplorer/cqos/optimizer.py (refactored)
4325
4326
global inApp
4327
global inExp
4328
global fileName
4329
- print "getting parameters..."
4330
+ print("getting parameters...")
4331
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
4332
keys = parameterMap.keySet()
4333
for key in keys:
4334
- print key, parameterMap.get(key)
4335
+ print(key, parameterMap.get(key))
4336
config = parameterMap.get("config")
4337
inApp = parameterMap.get("app")
4338
inExp = parameterMap.get("exp")
4339
fileName = parameterMap.get("fileName")
4340
- print "...done."
4341
+ print("...done.")
4342
4343
def loadTrials():
4344
- print "loading data..."
4345
+ print("loading data...")
4346
Utilities.setSession(config)
4347
trials = Utilities.getTrialsForExperiment(inApp, inExp)
4348
- print "...done."
4349
+ print("...done.")
4350
return trials
4351
4352
def loadExperiments():
4353
- print "loading data..."
4354
+ print("loading data...")
4355
Utilities.setSession(config)
4356
experiments = Utilities.getExperimentsForApplication(inApp)
4357
- print "...done."
4358
+ print("...done.")
4359
return experiments
4360
4361
def buildClassifier(results):
4362
- print "building classifier..."
4363
+ print("building classifier...")
4364
metadataFields = HashSet()
4365
metadataFields.add("Time")
4366
metadataFields.add("A")
4367
4368
# for performance
4369
classifier = LinearOptimizerOperation(results, "Time", metadataFields, "Time")
4370
classifier.processData()
4371
- print "...done."
4372
+ print("...done.")
4373
return classifier
4374
4375
4376
-print "--------------- JPython test script start ------------"
4377
+print("--------------- JPython test script start ------------")
4378
4379
#getParameters()
4380
results = ArrayList()
4381
4382
-print "getting trials..."
4383
+print("getting trials...")
4384
4385
trials = loadTrials()
4386
for trial in trials:
4387
loaded = TrialMeanResult(trial)
4388
results.add(loaded)
4389
4390
-print "...done."
4391
-print "Total Trials:", results.size()
4392
+print("...done.")
4393
+print("Total Trials:", results.size())
4394
4395
classifier = buildClassifier(results)
4396
r = classifier.getCoefficients()
4397
4398
-print r
4399
+print(r)
4400
for a in range(0,11):
4401
for b in range(0,11):
4402
for c in range(0,11):
4403
if a+b+c == 10:
4404
inputFields = HashMap()
4405
- inputFields.put("A", `a`)
4406
- inputFields.put("B", `b`)
4407
- inputFields.put("C", `c`)
4408
- print a, b, c, " = ", classifier.classifyInstance(inputFields)
4409
+ inputFields.put("A", repr(a))
4410
+ inputFields.put("B", repr(b))
4411
+ inputFields.put("C", repr(c))
4412
+ print(a, b, c, " = ", classifier.classifyInstance(inputFields))
4413
4414
-print "---------------- JPython test script end -------------"
4415
+print("---------------- JPython test script end -------------")
4416
--- a/tools/src/perfexplorer/cqos/pc.py (original)
4417
--- b/tools/src/perfexplorer/cqos/pc.py (refactored)
4418
4419
global inApp
4420
global inExp
4421
global fileName
4422
- print "getting parameters..."
4423
+ print("getting parameters...")
4424
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
4425
keys = parameterMap.keySet()
4426
for key in keys:
4427
- print key, parameterMap.get(key)
4428
+ print(key, parameterMap.get(key))
4429
config = parameterMap.get("config")
4430
inApp = parameterMap.get("app")
4431
inExp = parameterMap.get("exp")
4432
fileName = parameterMap.get("fileName")
4433
- print "...done."
4434
+ print("...done.")
4435
4436
def loadTrials():
4437
- print "loading data..."
4438
+ print("loading data...")
4439
Utilities.setSession(config)
4440
trials = Utilities.getTrialsForExperiment(inApp, inExp)
4441
- print "...done."
4442
+ print("...done.")
4443
return trials
4444
4445
def loadExperiments():
4446
- print "loading data..."
4447
+ print("loading data...")
4448
Utilities.setSession(config)
4449
experiments = Utilities.getExperimentsForApplication(inApp)
4450
- print "...done."
4451
+ print("...done.")
4452
return experiments
4453
4454
def buildClassifier(results):
4455
- print "building classifier..."
4456
+ print("building classifier...")
4457
metadataFields = HashSet()
4458
4459
# general properties
4460
4461
start = time.clock()
4462
classifier = CQoSClassifierOperation(results, "P_WALL_CLOCK_TIME", metadataFields, "pc")
4463
end = time.clock()
4464
- print end - start, " seconds to initialize classifier"
4465
+ print(end - start, " seconds to initialize classifier")
4466
classifier.setClassifierType(CQoSClassifierOperation.J48)
4467
start = time.clock()
4468
classifier.processData()
4469
end = time.clock()
4470
- print end - start, " seconds to build classifier"
4471
- print "validating classifier..."
4472
- start = time.clock()
4473
- print classifier.crossValidateModel()
4474
- end = time.clock()
4475
- print end - start, " seconds to validate classifier"
4476
+ print(end - start, " seconds to build classifier")
4477
+ print("validating classifier...")
4478
+ start = time.clock()
4479
+ print(classifier.crossValidateModel())
4480
+ end = time.clock()
4481
+ print(end - start, " seconds to validate classifier")
4482
classifier.writeClassifier(fileName + ".j48")
4483
classifier.setClassifierType(CQoSClassifierOperation.NAIVE_BAYES)
4484
start = time.clock()
4485
classifier.processData()
4486
end = time.clock()
4487
- print end - start, " seconds to build classifier"
4488
- print "validating classifier..."
4489
- start = time.clock()
4490
- print classifier.crossValidateModel()
4491
- end = time.clock()
4492
- print end - start, " seconds to validate classifier"
4493
+ print(end - start, " seconds to build classifier")
4494
+ print("validating classifier...")
4495
+ start = time.clock()
4496
+ print(classifier.crossValidateModel())
4497
+ end = time.clock()
4498
+ print(end - start, " seconds to validate classifier")
4499
classifier.writeClassifier(fileName + ".nb")
4500
classifier.setClassifierType(CQoSClassifierOperation.SUPPORT_VECTOR_MACHINE)
4501
start = time.clock()
4502
classifier.processData()
4503
end = time.clock()
4504
- print end - start, " seconds to build classifier"
4505
- print "validating classifier..."
4506
- start = time.clock()
4507
- print classifier.crossValidateModel()
4508
- end = time.clock()
4509
- print end - start, " seconds to validate classifier"
4510
+ print(end - start, " seconds to build classifier")
4511
+ print("validating classifier...")
4512
+ start = time.clock()
4513
+ print(classifier.crossValidateModel())
4514
+ end = time.clock()
4515
+ print(end - start, " seconds to validate classifier")
4516
classifier.writeClassifier(fileName + ".svm")
4517
classifier.setClassifierType(CQoSClassifierOperation.MULTILAYER_PERCEPTRON)
4518
start = time.clock()
4519
classifier.processData()
4520
end = time.clock()
4521
- print end - start, " seconds to build classifier"
4522
- print "validating classifier..."
4523
- start = time.clock()
4524
- print classifier.crossValidateModel()
4525
- end = time.clock()
4526
- print end - start, " seconds to validate classifier"
4527
+ print(end - start, " seconds to build classifier")
4528
+ print("validating classifier...")
4529
+ start = time.clock()
4530
+ print(classifier.crossValidateModel())
4531
+ end = time.clock()
4532
+ print(end - start, " seconds to validate classifier")
4533
classifier.writeClassifier(fileName + ".mp")
4534
- print "...done."
4535
+ print("...done.")
4536
return classifier
4537
4538
def testClassifier(classifier):
4539
4540
className = classifier.getClass(inputFields)
4541
confidence = classifier.getConfidence()
4542
if confidence != "bcgs":
4543
- print inputFields
4544
- print "\tSolver: ", className, confidence
4545
-
4546
-
4547
-
4548
-print "--------------- JPython test script start ------------"
4549
+ print(inputFields)
4550
+ print("\tSolver: ", className, confidence)
4551
+
4552
+
4553
+
4554
+print("--------------- JPython test script start ------------")
4555
4556
getParameters()
4557
results = ArrayList()
4558
4559
-print "getting trials..."
4560
+print("getting trials...")
4561
start = time.clock()
4562
4563
trials = loadTrials()
4564
index = 1
4565
totalTrials = trials.size()
4566
for trial in trials:
4567
- print "\rLoading trial ", index, "of", totalTrials,
4568
+ print("\rLoading trial ", index, "of", totalTrials, end=' ')
4569
loaded = TrialMeanResult(trial)
4570
"""
4571
# important - split the trial, because it's iterative, and each iteration
4572
4573
# loaded = TrialMeanResult(trial)
4574
# results.add(loaded)
4575
4576
-print "...done."
4577
+print("...done.")
4578
end = time.clock()
4579
-print end - start, " seconds to load data"
4580
-print "Total Trials:", results.size()
4581
+print(end - start, " seconds to load data")
4582
+print("Total Trials:", results.size())
4583
4584
classifier = buildClassifier(results)
4585
# classifier = CQoSClassifierOperation.readClassifier(fileName + ".j48")
4586
# testClassifier(classifier)
4587
4588
-print "---------------- JPython test script end -------------"
4589
+print("---------------- JPython test script end -------------")
4590
--- a/tools/src/perfexplorer/cqos/recreate.py (original)
4591
--- b/tools/src/perfexplorer/cqos/recreate.py (refactored)
4592
4593
import sys
4594
import time
4595
-import commands
4596
+import subprocess
4597
4598
True = 1
4599
False = 0
4600
4601
for grashof in ['100', '500', '1000']:
4602
for gridx in ['16', '32']:
4603
gridy=gridx
4604
- gridsize = `gridx` + 'x' + `gridy`
4605
+ gridsize = repr(gridx) + 'x' + repr(gridy)
4606
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4607
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.' + classifier + ' lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof
4608
# get KSP recommendation
4609
#print getrec
4610
- (status, output) = commands.getstatusoutput(getrec)
4611
+ (status, output) = subprocess.getstatusoutput(getrec)
4612
#print output
4613
if output.startswith('fgmres'):
4614
ksp='fgmres'
4615
4616
ksp='bcgs'
4617
if output.startswith('tfqmr'):
4618
ksp='tfqmr'
4619
- print ksp
4620
+ print(ksp)
4621
# get PC recommendation
4622
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.pc.' + classifier + ' lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' ksp:' + ksp
4623
#print getrec
4624
- (status, output) = commands.getstatusoutput(getrec)
4625
+ (status, output) = subprocess.getstatusoutput(getrec)
4626
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4627
#print output
4628
if output.startswith('jacobi'):
4629
4630
if output.startswith('icc'):
4631
pc='icc'
4632
procs='1'
4633
- print pc
4634
+ print(pc)
4635
4636
# make directories for results
4637
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes + '-' + 'pc' + pc
4638
- print dirname
4639
+ print(dirname)
4640
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
4641
#print createdir
4642
- commands.getstatusoutput(createdir)
4643
- commands.getstatusoutput(createdir + '/default')
4644
- commands.getstatusoutput(createdir + '/' + ksp)
4645
+ subprocess.getstatusoutput(createdir)
4646
+ subprocess.getstatusoutput(createdir + '/default')
4647
+ subprocess.getstatusoutput(createdir + '/' + ksp)
4648
# run with default solver
4649
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type gmres -pc_type ilu -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
4650
# print mycommand
4651
start = time.time()
4652
- (status, output) = commands.getstatusoutput(mycommand)
4653
+ (status, output) = subprocess.getstatusoutput(mycommand)
4654
end = time.time()
4655
default = end - start
4656
- print 'DEFAULT: ', default
4657
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4658
+ print('DEFAULT: ', default)
4659
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4660
# run with recommendation
4661
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -pc_type ' + pc + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
4662
start = time.time()
4663
- (status, output) = commands.getstatusoutput(mycommand)
4664
+ (status, output) = subprocess.getstatusoutput(mycommand)
4665
end = time.time()
4666
recommended = end - start
4667
# print output
4668
- print 'RECOMMENDED: ', recommended
4669
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4670
+ print('RECOMMENDED: ', recommended)
4671
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4672
#f.write('lidvelocity, grashof, gridsize, ksp, pc, default, recommended\n')
4673
- f.write(lidvelocity + ',' + grashof + ',' + gridsize + ',' + ksp + ',' + pc + ',' + `default` + ',' + `recommended` + '\n')
4674
+ f.write(lidvelocity + ',' + grashof + ',' + gridsize + ',' + ksp + ',' + pc + ',' + repr(default) + ',' + repr(recommended) + '\n')
4675
f.flush()
4676
4677
f.close()
4678
--- a/tools/src/perfexplorer/cqos/small.py (original)
4679
--- b/tools/src/perfexplorer/cqos/small.py (refactored)
4680
4681
import sys
4682
import time
4683
-import commands
4684
+import subprocess
4685
4686
True = 1
4687
False = 0
4688
4689
#for gridx in ['16', '32']:
4690
for gridx in ['16']:
4691
gridy=gridx
4692
- gridsize = `gridx` + 'x' + `gridy`
4693
+ gridsize = repr(gridx) + 'x' + repr(gridy)
4694
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4695
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.nosplit.' + classifier + ' lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof
4696
# get KSP recommendation
4697
#print getrec
4698
- (status, output) = commands.getstatusoutput(getrec)
4699
+ (status, output) = subprocess.getstatusoutput(getrec)
4700
#print output
4701
if output.startswith('fgmres'):
4702
ksp='fgmres'
4703
4704
ksp='bcgs'
4705
if output.startswith('tfqmr'):
4706
ksp='tfqmr'
4707
- print ksp
4708
+ print(ksp)
4709
# get PC recommendation
4710
getrec = 'java -cp /home/khuck/tau2/tools/src/perfexplorer/classifier.jar:/home/khuck/.ParaProf/weka.jar cqos.CQoSClassifier /tmp/classifier.pc.' + classifier + ' lidvelocity:' + lidvelocity + ' gridsize:' + gridx + 'x' + gridx + ' grashof:' + grashof + ' ksp:' + ksp
4711
#print getrec
4712
- (status, output) = commands.getstatusoutput(getrec)
4713
+ (status, output) = subprocess.getstatusoutput(getrec)
4714
#for pc in ['jacobi', 'bjacobi', 'none', 'sor', 'asm', 'cholesky']:
4715
#print output
4716
if output.startswith('jacobi'):
4717
4718
if output.startswith('icc'):
4719
pc='icc'
4720
procs='1'
4721
- print pc
4722
+ print(pc)
4723
4724
# make directories for results
4725
dirname = 'ex27'+'-'+ procs + '-' + 'x' + gridx + '-' + 'y' + gridy + '-' + 'lid' + lidvelocity + '-' + 'grh' + grashof + '-' + 'srtol' + snesrtol + '-' + 'krtol' + ksprtol + '-' + 'snes' + snes + '-' + 'pc' + pc
4726
- print dirname
4727
+ print(dirname)
4728
createdir= 'mkdir /home/khuck/data/petsc/' + classifier + '/' + dirname
4729
#print createdir
4730
- commands.getstatusoutput(createdir)
4731
- commands.getstatusoutput(createdir + '/default')
4732
- commands.getstatusoutput(createdir + '/' + ksp)
4733
+ subprocess.getstatusoutput(createdir)
4734
+ subprocess.getstatusoutput(createdir + '/default')
4735
+ subprocess.getstatusoutput(createdir + '/' + ksp)
4736
# run with default solver
4737
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type gmres -pc_type ilu -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default.log'
4738
# print mycommand
4739
start = time.time()
4740
- (status, output) = commands.getstatusoutput(mycommand)
4741
+ (status, output) = subprocess.getstatusoutput(mycommand)
4742
end = time.time()
4743
default = end - start
4744
- print 'DEFAULT: ', default
4745
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4746
+ print('DEFAULT: ', default)
4747
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/default/.')
4748
# run with recommendation
4749
mycommand = '$MPIEXEC -np ' + procs + ' /home/khuck/src/petsc/metadata/ex27 ' ' -snes_type ' + snes + ' -ksp_type ' + ksp + ' -pc_type ' + pc + ' -lidvelocity ' + lidvelocity + ' -da_grid_x ' + gridx + ' -da_grid_y ' + gridx + ' -print -snes_monitor -grashof ' + grashof + ' -cfl_ini ' + cflini + ' -snes_rtol ' + snesrtol + ' -ksp_rtol ' + ksprtol + ' -preload off' + ' >& /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + classifier + '.log'
4750
start = time.time()
4751
- (status, output) = commands.getstatusoutput(mycommand)
4752
+ (status, output) = subprocess.getstatusoutput(mycommand)
4753
end = time.time()
4754
recommended = end - start
4755
# print output
4756
- print 'RECOMMENDED: ', recommended
4757
- commands.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4758
+ print('RECOMMENDED: ', recommended)
4759
+ subprocess.getstatusoutput('mv profile.* /home/khuck/data/petsc/' + classifier + '/' + dirname + '/' + ksp + '/.')
4760
#f.write('lidvelocity, grashof, gridsize, ksp, pc, default, recommended\n')
4761
- f.write(lidvelocity + ',' + grashof + ',' + gridsize + ',' + ksp + ',' + pc + ',' + `default` + ',' + `recommended` + '\n')
4762
+ f.write(lidvelocity + ',' + grashof + ',' + gridsize + ',' + ksp + ',' + pc + ',' + repr(default) + ',' + repr(recommended) + '\n')
4763
f.flush()
4764
4765
f.close()
4766
--- a/tools/src/perfexplorer/cqos/test_linear_solver.py (original)
4767
--- b/tools/src/perfexplorer/cqos/test_linear_solver.py (refactored)
4768
4769
global inApp
4770
global inExp
4771
global fileName
4772
- print "getting parameters..."
4773
+ print("getting parameters...")
4774
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
4775
keys = parameterMap.keySet()
4776
for key in keys:
4777
- print key, parameterMap.get(key)
4778
+ print(key, parameterMap.get(key))
4779
config = parameterMap.get("config")
4780
inApp = parameterMap.get("app")
4781
inExp = parameterMap.get("exp")
4782
fileName = parameterMap.get("fileName")
4783
- print "...done."
4784
+ print("...done.")
4785
4786
def testClassifier(classifier):
4787
bcgs = 0
4788
4789
tfqmr+=1
4790
if className == "cg":
4791
cg+=1
4792
- print "bcgs", bcgs
4793
- print "fgmres", fgmres
4794
- print "gmres", gmres
4795
- print "tfqmr", tfqmr
4796
- print "cg", cg
4797
+ print("bcgs", bcgs)
4798
+ print("fgmres", fgmres)
4799
+ print("gmres", gmres)
4800
+ print("tfqmr", tfqmr)
4801
+ print("cg", cg)
4802
4803
4804
4805
-print "--------------- JPython test script start ------------"
4806
+print("--------------- JPython test script start ------------")
4807
4808
getParameters()
4809
4810
-print "TESTING J48"
4811
+print("TESTING J48")
4812
classifier = CQoSClassifierOperation.readClassifier(fileName + ".j48")
4813
testClassifier(classifier)
4814
4815
-print "TESTING MP"
4816
+print("TESTING MP")
4817
classifier = CQoSClassifierOperation.readClassifier(fileName + ".mp")
4818
testClassifier(classifier)
4819
4820
-print "TESTING NB"
4821
+print("TESTING NB")
4822
classifier = CQoSClassifierOperation.readClassifier(fileName + ".nb")
4823
testClassifier(classifier)
4824
4825
-print "TESTING SVM"
4826
+print("TESTING SVM")
4827
classifier = CQoSClassifierOperation.readClassifier(fileName + ".svm")
4828
testClassifier(classifier)
4829
4830
-print "---------------- JPython test script end -------------"
4831
+print("---------------- JPython test script end -------------")
4832
--- a/tools/src/perfexplorer/etc/bluegene.long.py (original)
4833
--- b/tools/src/perfexplorer/etc/bluegene.long.py (refactored)
4834
4835
True = 1
4836
4837
def glue():
4838
- print "doing long run test for ocracoke"
4839
+ print("doing long run test for ocracoke")
4840
# load the trial
4841
Utilities.setSession("perfdmf_test")
4842
trial1 = Utilities.getTrial("gtc_bench", "ocracoke.longrun", "256p_5000ts_100micell")
4843
4844
# grapher.setLogYAxis(True)
4845
grapher.processData()
4846
4847
-print "--------------- JPython test script start ------------"
4848
+print("--------------- JPython test script start ------------")
4849
4850
glue()
4851
4852
# pe.exit()
4853
4854
-print "---------------- JPython test script end -------------"
4855
+print("---------------- JPython test script end -------------")
4856
--- a/tools/src/perfexplorer/etc/bluemult.long.py (original)
4857
--- b/tools/src/perfexplorer/etc/bluemult.long.py (refactored)
4858
4859
True = 1
4860
4861
def glue():
4862
- print "doing long run test for ocracoke"
4863
+ print("doing long run test for ocracoke")
4864
# load the trial
4865
Utilities.setSession("perfdmf_test")
4866
trial1 = Utilities.getTrial("gtc_bench", "superphases", "64")
4867
4868
# grapher.setLogYAxis(True)
4869
grapher.processData()
4870
4871
-print "--------------- JPython test script start ------------"
4872
+print("--------------- JPython test script start ------------")
4873
4874
glue()
4875
4876
# pe.exit()
4877
4878
-print "---------------- JPython test script end -------------"
4879
+print("---------------- JPython test script end -------------")
4880
--- a/tools/src/perfexplorer/etc/bluemultphases.long.py (original)
4881
--- b/tools/src/perfexplorer/etc/bluemultphases.long.py (refactored)
4882
4883
True = 1
4884
4885
def glue():
4886
- print "doing long run test for ocracoke"
4887
+ print("doing long run test for ocracoke")
4888
# load the trial
4889
Utilities.setSession("perfdmf_test")
4890
# trial1 = Utilities.getTrial("gtc_bench", "superphases", "64")
4891
4892
extractor = ExtractEventOperation(result1, events)
4893
extracted = extractor.processData().get(0)
4894
4895
- print "extracted phases..."
4896
+ print("extracted phases...")
4897
4898
# get the Statistics
4899
dostats = BasicStatisticsOperation(extracted, False)
4900
stats = dostats.processData()
4901
4902
- print "got stats..."
4903
+ print("got stats...")
4904
4905
metrics = ArrayList()
4906
metrics.add("BGL_TIMERS")
4907
4908
# grapher.setLogYAxis(True)
4909
grapher.processData()
4910
4911
-print "--------------- JPython test script start ------------"
4912
+print("--------------- JPython test script start ------------")
4913
4914
glue()
4915
4916
# pe.exit()
4917
4918
-print "---------------- JPython test script end -------------"
4919
+print("---------------- JPython test script end -------------")
4920
--- a/tools/src/perfexplorer/etc/characterization.py (original)
4921
--- b/tools/src/perfexplorer/etc/characterization.py (refactored)
4922
4923
from edu.uoregon.tau.perfdmf import *
4924
4925
def glue(pe):
4926
- print "doing glue test"
4927
+ print("doing glue test")
4928
Utilities.setSession("peri_test")
4929
trial1 = Utilities.GetTrial("GTC_s_PAPI", "VN XT3", "004")
4930
trial2 = Utilities.GetTrial("GTC_s_PAPI", "VN XT3", "008")
4931
4932
for thread in diffs.getThreads():
4933
for event in diffs.getEvents():
4934
for metric in diffs.getMetrics():
4935
- print thread , event , metric
4936
+ print(thread , event , metric)
4937
# print diffs.getDataPoint(thread, event, metric, AbstractResult.EXCLUSIVE)
4938
4939
-print "--------------- JPython test script start ------------"
4940
+print("--------------- JPython test script start ------------")
4941
4942
pe = ScriptFacade()
4943
glue(pe)
4944
4945
# pe.exit()
4946
4947
-print "---------------- JPython test script end -------------"
4948
+print("---------------- JPython test script end -------------")
4949
--- a/tools/src/perfexplorer/etc/clusterTest.py (original)
4950
--- b/tools/src/perfexplorer/etc/clusterTest.py (refactored)
4951
4952
True = 1
4953
4954
def glue():
4955
- print "doing cluster test"
4956
+ print("doing cluster test")
4957
# load the trial
4958
Utilities.setSession("peri_s3d")
4959
trial = Utilities.getTrial("S3D", "hybrid-study", "hybrid")
4960
4961
kmeans = KMeansOperation(reduced, metric, AbstractResult.EXCLUSIVE, k)
4962
kmeans.processData()
4963
4964
-print "--------------- JPython test script start ------------"
4965
+print("--------------- JPython test script start ------------")
4966
4967
glue()
4968
4969
# pe.exit()
4970
4971
-print "---------------- JPython test script end -------------"
4972
+print("---------------- JPython test script end -------------")
4973
--- a/tools/src/perfexplorer/etc/computeDilation.py (original)
4974
--- b/tools/src/perfexplorer/etc/computeDilation.py (refactored)
4975
4976
inTrial = ""
4977
4978
def load():
4979
- print "loading data..."
4980
+ print("loading data...")
4981
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
4982
keys = parameterMap.keySet()
4983
for key in keys:
4984
- print key, parameterMap.get(key)
4985
+ print(key, parameterMap.get(key))
4986
config = parameterMap.get("config")
4987
inApp = parameterMap.get("app")
4988
inExp = parameterMap.get("exp")
4989
4990
Utilities.setSession(config)
4991
#trial = Utilities.getTrial("s3d", "intrepid-c2h4-spacemap", "1728")
4992
trial = Utilities.getTrial(inApp, inExp, inTrial)
4993
- print "...done."
4994
+ print("...done.")
4995
return trial
4996
4997
def computeDilation(networkSize, senderCoords, receiverCoords):
4998
4999
5000
return distX+distY+distZ
5001
5002
-print "--------------- JPython test script start ------------"
5003
+print("--------------- JPython test script start ------------")
5004
trial = load()
5005
start = time.clock()
5006
-print "getting thread metadata"
5007
+print("getting thread metadata")
5008
metadata = TrialThreadMetadata(trial)
5009
-print "getting common metadata"
5010
+print("getting common metadata")
5011
commonMetadata = TrialMetadata(trial)
5012
end = time.clock()
5013
-print "metadata time:", end-start, "seconds"
5014
+print("metadata time:", end-start, "seconds")
5015
5016
networkSize = commonMetadata.getCommonAttributes().get("BGP Size")
5017
tauConfig = commonMetadata.getCommonAttributes().get("TAU Config")
5018
5019
5020
start = time.clock()
5021
if haveSendData:
5022
- print "TAU_EACH_SEND data found."
5023
+ print("TAU_EACH_SEND data found.")
5024
start = time.clock()
5025
input = TrialResult(trial)
5026
end = time.clock()
5027
- print "loading time:", end-start, "seconds"
5028
+ print("loading time:", end-start, "seconds")
5029
for thread in input.getThreads():
5030
for event in input.getUserEvents():
5031
senderCoords = metadata.getNameValue(thread, "BGP Coords")
5032
5033
data.addColumn(0, dilation/6.0, dilation/6.0)
5034
5035
end = time.clock()
5036
-print "computation time:", end-start, "seconds"
5037
+print("computation time:", end-start, "seconds")
5038
5039
avgDilation = float(dilation) / float(count)
5040
-print avgDilation, worst
5041
+print(avgDilation, worst)
5042
PerfExplorerHistogramChart.doHistogram(data);
5043
5044
-print "---------------- JPython test script end -------------"
5045
+print("---------------- JPython test script end -------------")
5046
--- a/tools/src/perfexplorer/etc/correlation.py (original)
5047
--- b/tools/src/perfexplorer/etc/correlation.py (refactored)
5048
5049
from java.util import ArrayList
5050
5051
def glue():
5052
- print "doing correlation test"
5053
+ print("doing correlation test")
5054
Utilities.setSession("peri_gtc")
5055
trial = Utilities.getTrial("GTC", "ocracoke-O5", "2048")
5056
result = TrialResult(trial)
5057
5058
for event in result.getEvents():
5059
for metric in result.getMetrics():
5060
for thread in result.getThreads():
5061
- print event, CorrelationResult.typeToString(thread), metric, ":", AbstractResult.typeToString(type), result.getDataPoint(thread, event, metric, type)
5062
+ print(event, CorrelationResult.typeToString(thread), metric, ":", AbstractResult.typeToString(type), result.getDataPoint(thread, event, metric, type))
5063
5064
5065
-print "--------------- JPython test script start ------------"
5066
+print("--------------- JPython test script start ------------")
5067
5068
glue()
5069
5070
# pe.exit()
5071
5072
-print "---------------- JPython test script end -------------"
5073
+print("---------------- JPython test script end -------------")
5074
--- a/tools/src/perfexplorer/etc/difference.py (original)
5075
--- b/tools/src/perfexplorer/etc/difference.py (refactored)
5076
5077
False = 0
5078
5079
def diffs():
5080
- print "doing phase test for gtc on jaguar"
5081
+ print("doing phase test for gtc on jaguar")
5082
# load the trials
5083
Utilities.setSession("PERI_DB_production")
5084
baseline = Utilities.getTrial("gtc", "jaguar", "64")
5085
5086
diff.addInput(comparison)
5087
diff.processData()
5088
metaDiff = DifferenceMetadataOperation(baseline, comparison)
5089
- print metaDiff.differencesAsString()
5090
+ print(metaDiff.differencesAsString())
5091
5092
return
5093
5094
-print "--------------- JPython test script start ------------"
5095
+print("--------------- JPython test script start ------------")
5096
5097
diffs()
5098
5099
# pe.exit()
5100
5101
-print "---------------- JPython test script end -------------"
5102
+print("---------------- JPython test script end -------------")
5103
--- a/tools/src/perfexplorer/etc/expectation.rules.py (original)
5104
--- b/tools/src/perfexplorer/etc/expectation.rules.py (refactored)
5105
5106
True = 1
5107
5108
def glue():
5109
- print "Comparing performance between two trials, including metadata"
5110
+ print("Comparing performance between two trials, including metadata")
5111
5112
# load the trials
5113
Utilities.setSession("perfdmf_test")
5114
5115
ruleHarness.processRules()
5116
# print ruleHarness.getLog()
5117
5118
-print "--------------- JPython test script start ------------"
5119
+print("--------------- JPython test script start ------------")
5120
5121
glue()
5122
5123
# pe.exit()
5124
5125
-print "---------------- JPython test script end -------------"
5126
+print("---------------- JPython test script end -------------")
5127
--- a/tools/src/perfexplorer/etc/generalcharttest.py (original)
5128
--- b/tools/src/perfexplorer/etc/generalcharttest.py (refactored)
5129
5130
from edu.uoregon.tau.perfexplorer.common import TransformationType
5131
from edu.uoregon.tau.perfexplorer.common import AnalysisType
5132
5133
-print "--------------- JPython test script start ------------"
5134
+print("--------------- JPython test script start ------------")
5135
5136
pe = ScriptFacade()
5137
5138
5139
5140
# pe.exit()
5141
5142
-print "---------------- JPython test script end -------------"
5143
+print("---------------- JPython test script end -------------")
5144
--- a/tools/src/perfexplorer/etc/glue.py (original)
5145
--- b/tools/src/perfexplorer/etc/glue.py (refactored)
5146
5147
from edu.uoregon.tau.perfdmf import Trial
5148
5149
def glue():
5150
- print "doing glue test"
5151
+ print("doing glue test")
5152
Utilities.setSession("peri_gtc")
5153
trial = Utilities.getTrial("GTC", "ocracoke-O2", "64")
5154
trial2 = Utilities.getTrial("GTC", "ocracoke-O2", "128")
5155
5156
for thread in total.getThreads():
5157
for event in total.getEvents():
5158
for metric in total.getMetrics():
5159
- print thread , event , metric
5160
+ print(thread , event , metric)
5161
# print mean.getDataPoint(thread, event, metric, AbstractResult.EXCLUSIVE)
5162
5163
-print "--------------- JPython test script start ------------"
5164
+print("--------------- JPython test script start ------------")
5165
5166
glue()
5167
5168
# pe.exit()
5169
5170
-print "---------------- JPython test script end -------------"
5171
+print("---------------- JPython test script end -------------")
5172
--- a/tools/src/perfexplorer/etc/graphlongrun.py (original)
5173
--- b/tools/src/perfexplorer/etc/graphlongrun.py (refactored)
5174
5175
True = 1
5176
5177
def glue():
5178
- print "doing long run test"
5179
+ print("doing long run test")
5180
# load the trial
5181
Utilities.setSession("perfdmf_test")
5182
trial1 = Utilities.getTrial("gtc_bench", "jaguar.longrun", "64.first")
5183
5184
grapher.processData()
5185
5186
5187
-print "--------------- JPython test script start ------------"
5188
+print("--------------- JPython test script start ------------")
5189
5190
glue()
5191
5192
# pe.exit()
5193
5194
-print "---------------- JPython test script end -------------"
5195
+print("---------------- JPython test script end -------------")
5196
--- a/tools/src/perfexplorer/etc/graphphases.py (original)
5197
--- b/tools/src/perfexplorer/etc/graphphases.py (refactored)
5198
5199
True = 1
5200
5201
def glue():
5202
- print "doing long run test"
5203
+ print("doing long run test")
5204
# load the trial
5205
Utilities.setSession("perfdmf_test")
5206
trial = Utilities.getTrial("gtc_bench", "jaguar.longrun2", "64")
5207
5208
grapher.processData()
5209
5210
5211
-print "--------------- JPython test script start ------------"
5212
+print("--------------- JPython test script start ------------")
5213
5214
glue()
5215
5216
# pe.exit()
5217
5218
-print "---------------- JPython test script end -------------"
5219
+print("---------------- JPython test script end -------------")
5220
--- a/tools/src/perfexplorer/etc/gtc.py (original)
5221
--- b/tools/src/perfexplorer/etc/gtc.py (refactored)
5222
5223
pe.setChartHorizontal(1)
5224
pe.doGeneralChart()
5225
5226
-print "--------------- JPython test script start ------------"
5227
+print("--------------- JPython test script start ------------")
5228
5229
pe = ScriptFacade()
5230
Simple(pe)
5231
5232
5233
# pe.exit()
5234
5235
-print "---------------- JPython test script end -------------"
5236
+print("---------------- JPython test script end -------------")
5237
--- a/tools/src/perfexplorer/etc/heatmap.datasource.py (original)
5238
--- b/tools/src/perfexplorer/etc/heatmap.datasource.py (refactored)
5239
5240
True = 1
5241
False = 0
5242
5243
-print "--------------- JPython test script start ------------"
5244
+print("--------------- JPython test script start ------------")
5245
files = []
5246
#files.append("/home/khuck/tau2/examples/NPB2.3/bin")
5247
#input = DataSourceResult(DataSourceResult.TAUPROFILE, files, False);
5248
5249
start = time.clock()
5250
input = DataSourceResult(DataSourceResult.PPK, files, False);
5251
end = time.clock()
5252
-print "Loaded the data! Time:", end-start, "seconds"
5253
+print("Loaded the data! Time:", end-start, "seconds")
5254
messageHeatMap = BuildMessageHeatMap(input)
5255
messageHeatMap.processData()
5256
-print "---------------- JPython test script end -------------"
5257
+print("---------------- JPython test script end -------------")
5258
--- a/tools/src/perfexplorer/etc/heatmap.py (original)
5259
--- b/tools/src/perfexplorer/etc/heatmap.py (refactored)
5260
5261
True = 1
5262
False = 0
5263
5264
-print "--------------- JPython test script start ------------"
5265
+print("--------------- JPython test script start ------------")
5266
#Utilities.setSession("peris3d")
5267
#trial = Utilities.getTrial("s3d", "intrepid-c2h4-misc", "512_com")
5268
#trial = Utilities.getTrial("s3d", "intrepid-c2h4-misc", "8000_comm_callpath")
5269
5270
input = TrialResult(trial)
5271
messageHeatMap = BuildMessageHeatMap(input)
5272
messageHeatMap.processData()
5273
-print "---------------- JPython test script end -------------"
5274
+print("---------------- JPython test script end -------------")
5275
--- a/tools/src/perfexplorer/etc/hlrs_gtc.py (original)
5276
--- b/tools/src/perfexplorer/etc/hlrs_gtc.py (refactored)
5277
5278
False = 0
5279
5280
def load():
5281
- print "loading data..."
5282
+ print("loading data...")
5283
Utilities.setSession("perfdmf.demo")
5284
trial1 = Utilities.getTrial("gtc", "phases", "jaguar.64")
5285
result = TrialResult(trial1)
5286
- print "...done."
5287
+ print("...done.")
5288
return result
5289
5290
def first(input):
5291
# get the iteration inclusive totals
5292
5293
- print "searching for iteration events (no classpath)..."
5294
+ print("searching for iteration events (no classpath)...")
5295
events = ArrayList()
5296
for event in input.getEvents():
5297
#if event.find("Iteration") >= 0 and input.getEventGroupName(event).find("TAU_PHASE") < 0:
5298
if event.find("Iteration") >= 0 and event.find("=>") < 0:
5299
events.add(event)
5300
- print "...done."
5301
+ print("...done.")
5302
5303
- print "extracting phases..."
5304
+ print("extracting phases...")
5305
extractor = ExtractEventOperation(input, events)
5306
extracted = extractor.processData().get(0)
5307
- print "...done."
5308
+ print("...done.")
5309
5310
return extracted
5311
5312
5313
return input
5314
5315
def second(extracted):
5316
- print "deriving metrics..."
5317
+ print("deriving metrics...")
5318
extracted = deriveStat(extracted, "PAPI_L1_TCA", "PAPI_L1_TCM", DeriveMetricOperation.SUBTRACT)
5319
extracted = deriveStat(extracted, "(PAPI_L1_TCA-PAPI_L1_TCM)", "PAPI_L1_TCA", DeriveMetricOperation.DIVIDE)
5320
extracted = deriveStat(extracted, "PAPI_L1_TCM", "PAPI_L2_TCM", DeriveMetricOperation.SUBTRACT)
5321
extracted = deriveStat(extracted, "(PAPI_L1_TCM-PAPI_L2_TCM)", "PAPI_L1_TCM", DeriveMetricOperation.DIVIDE)
5322
extracted = deriveStat(extracted, "PAPI_FP_INS", "P_WALL_CLOCK_TIME", DeriveMetricOperation.DIVIDE)
5323
extracted = deriveStat(extracted, "PAPI_FP_INS", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
5324
- print "...done."
5325
+ print("...done.")
5326
5327
- print "getting stats..."
5328
+ print("getting stats...")
5329
dostats = BasicStatisticsOperation(extracted, False)
5330
stats = dostats.processData()
5331
- print "...done."
5332
+ print("...done.")
5333
5334
- print "drawing charts..."
5335
+ print("drawing charts...")
5336
for metric in stats.get(0).getMetrics():
5337
if metric != "(PAPI_L1_TCA-PAPI_L1_TCM)" and metric != "(PAPI_L1_TCM-PAPI_L2_TCM)" and metric != "PAPI_TOT_INS" and metric != "PAPI_L1_TCA" and metric != "PAPI_FP_INS":
5338
grapher = DrawMMMGraph(stats)
5339
5340
grapher.setYAxisLabel("Inclusive " + metric);
5341
# grapher.setLogYAxis(True)
5342
grapher.processData()
5343
- print "...done."
5344
+ print("...done.")
5345
5346
extracted = None
5347
stats = None
5348
5349
subsetevents.add("SHIFTI")
5350
5351
for subsetevent in subsetevents:
5352
- print "extracting callpath phases..."
5353
+ print("extracting callpath phases...")
5354
events = ArrayList()
5355
for event in input.getEvents():
5356
if event.find("Iteration") >= 0 and event.rfind(subsetevent) >= 0:
5357
5358
5359
extractor = ExtractEventOperation(input, events)
5360
extracted = extractor.processData().get(0)
5361
- print "...done."
5362
+ print("...done.")
5363
5364
# derive metrics
5365
5366
- print "deriving metrics..."
5367
+ print("deriving metrics...")
5368
extracted = deriveStat(extracted, "PAPI_L1_TCA", "PAPI_L1_TCM", DeriveMetricOperation.SUBTRACT)
5369
extracted = deriveStat(extracted, "(PAPI_L1_TCA-PAPI_L1_TCM)", "PAPI_L1_TCA", DeriveMetricOperation.DIVIDE)
5370
extracted = deriveStat(extracted, "PAPI_L1_TCM", "PAPI_L2_TCM", DeriveMetricOperation.SUBTRACT)
5371
extracted = deriveStat(extracted, "(PAPI_L1_TCM-PAPI_L2_TCM)", "PAPI_L1_TCM", DeriveMetricOperation.DIVIDE)
5372
extracted = deriveStat(extracted, "PAPI_FP_INS", "P_WALL_CLOCK_TIME", DeriveMetricOperation.DIVIDE)
5373
extracted = deriveStat(extracted, "PAPI_FP_INS", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
5374
- print "...done."
5375
+ print("...done.")
5376
5377
# get the Statistics
5378
- print "getting stats..."
5379
+ print("getting stats...")
5380
dostats = BasicStatisticsOperation(extracted, False)
5381
stats = dostats.processData()
5382
- print "...done."
5383
+ print("...done.")
5384
5385
- print "drawing charts..."
5386
+ print("drawing charts...")
5387
for metric in stats.get(0).getMetrics():
5388
if metric == "((PAPI_L1_TCA-PAPI_L1_TCM)/PAPI_L1_TCA)" or metric == "((PAPI_L1_TCM-PAPI_L2_TCM)/PAPI_L1_TCM)":
5389
grapher = DrawMMMGraph(stats)
5390
5391
grapher.setValueType(AbstractResult.INCLUSIVE)
5392
# grapher.setLogYAxis(True)
5393
grapher.processData()
5394
- print "...done."
5395
+ print("...done.")
5396
5397
extracted = None
5398
stats = None
5399
5400
5401
return
5402
5403
-print "--------------- JPython test script start ------------"
5404
+print("--------------- JPython test script start ------------")
5405
5406
loaded = load()
5407
extracted = first(loaded)
5408
5409
loaded = None
5410
System.gc()
5411
5412
-print "---------------- JPython test script end -------------"
5413
+print("---------------- JPython test script end -------------")
5414
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.chargei.py (original)
5415
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.chargei.py (refactored)
5416
5417
False = 0
5418
5419
def glue():
5420
- print "doing phase test for gtc on jaguar"
5421
+ print("doing phase test for gtc on jaguar")
5422
5423
operations = Provenance.getCurrent().getOperations()
5424
result1 = operations.get(0).getInputs().get(0)
5425
for operation in operations:
5426
- print operation.getClass().getName()
5427
+ print(operation.getClass().getName())
5428
if operation.getClass().getName() == "glue.BasicStatisticsOperation":
5429
stats = operation.getOutputs()
5430
5431
5432
# subsetevents.add("PUSHI")
5433
# subsetevents.add("SHIFTI")
5434
5435
- print "got data..."
5436
+ print("got data...")
5437
5438
for subsetevent in subsetevents:
5439
events = ArrayList()
5440
5441
extractor = ExtractEventOperation(result1, events)
5442
extracted = extractor.processData().get(0)
5443
5444
- print "extracted phases..."
5445
+ print("extracted phases...")
5446
5447
# derive metrics
5448
5449
5450
merger.addInput(derived)
5451
extracted = merger.processData().get(0)
5452
5453
- print "derived metrics..."
5454
+ print("derived metrics...")
5455
5456
# get the Statistics
5457
dostats = BasicStatisticsOperation(extracted, False)
5458
stats = dostats.processData()
5459
5460
- print "got stats..."
5461
+ print("got stats...")
5462
5463
for metric in stats.get(0).getMetrics():
5464
if metric != "PAPI_L1_TCA-PAPI_L1_TCM" and metric != "PAPI_L1_TCM-PAPI_L2_TCM":
5465
5466
5467
return
5468
5469
-print "--------------- JPython test script start ------------"
5470
+print("--------------- JPython test script start ------------")
5471
5472
glue()
5473
5474
# pe.exit()
5475
5476
-print "---------------- JPython test script end -------------"
5477
+print("---------------- JPython test script end -------------")
5478
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.first.py (original)
5479
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.first.py (refactored)
5480
5481
False = 0
5482
5483
def glue():
5484
- print "doing phase test for gtc on jaguar"
5485
+ print("doing phase test for gtc on jaguar")
5486
# load the trial
5487
Utilities.setSession("perigtc")
5488
trial1 = Utilities.getTrial("GTC", "Jaguar Compiler Options", "fastsse")
5489
result1 = TrialResult(trial1)
5490
5491
- print "got the data"
5492
+ print("got the data")
5493
5494
# get the iteration inclusive totals
5495
5496
5497
extractor = ExtractEventOperation(result1, events)
5498
extracted = extractor.processData().get(0)
5499
5500
- print "extracted phases"
5501
+ print("extracted phases")
5502
5503
return
5504
5505
5506
merger.addInput(derived)
5507
extracted = merger.processData().get(0)
5508
5509
- print "derived metrics..."
5510
+ print("derived metrics...")
5511
5512
# get the Statistics
5513
dostats = BasicStatisticsOperation(extracted, False)
5514
stats = dostats.processData()
5515
5516
- print "got stats..."
5517
+ print("got stats...")
5518
5519
return
5520
5521
5522
subsetevents.add("PUSHI")
5523
subsetevents.add("SHIFTI")
5524
5525
- print "got data..."
5526
+ print("got data...")
5527
5528
for subsetevent in subsetevents:
5529
events = ArrayList()
5530
5531
extractor = ExtractEventOperation(result1, events)
5532
extracted = extractor.processData().get(0)
5533
5534
- print "extracted phases..."
5535
+ print("extracted phases...")
5536
5537
# get the Statistics
5538
dostats = BasicStatisticsOperation(extracted, False)
5539
stats = dostats.processData()
5540
5541
- print "got stats..."
5542
+ print("got stats...")
5543
5544
for metric in stats.get(0).getMetrics():
5545
grapher = DrawMMMGraph(stats)
5546
5547
5548
return
5549
5550
-print "--------------- JPython test script start ------------"
5551
+print("--------------- JPython test script start ------------")
5552
5553
glue()
5554
5555
# pe.exit()
5556
5557
-print "---------------- JPython test script end -------------"
5558
+print("---------------- JPython test script end -------------")
5559
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.long.py (original)
5560
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.long.py (refactored)
5561
5562
False = 0
5563
5564
def glue():
5565
- print "doing phase test for gtc on jaguar"
5566
+ print("doing phase test for gtc on jaguar")
5567
# load the trial
5568
Utilities.setSession("perfdmf_test")
5569
trial1 = Utilities.getTrial("gtc_bench", "jaguar.longrun.sampled.phases", "64")
5570
5571
extractor = ExtractEventOperation(result1, events)
5572
extracted = extractor.processData().get(0)
5573
5574
- print "extracted phases..."
5575
+ print("extracted phases...")
5576
5577
# get the Statistics
5578
dostats = BasicStatisticsOperation(extracted, False)
5579
stats = dostats.processData()
5580
5581
- print "got stats..."
5582
+ print("got stats...")
5583
5584
for metric in stats.get(0).getMetrics():
5585
grapher = DrawMMMGraph(stats)
5586
5587
subsetevents.add("PUSHI")
5588
subsetevents.add("SHIFTI")
5589
5590
- print "got data..."
5591
+ print("got data...")
5592
5593
for subsetevent in subsetevents:
5594
events = ArrayList()
5595
5596
extractor = ExtractEventOperation(result1, events)
5597
extracted = extractor.processData().get(0)
5598
5599
- print "extracted phases..."
5600
+ print("extracted phases...")
5601
5602
# get the Statistics
5603
dostats = BasicStatisticsOperation(extracted, False)
5604
stats = dostats.processData()
5605
5606
- print "got stats..."
5607
+ print("got stats...")
5608
5609
for metric in stats.get(0).getMetrics():
5610
grapher = DrawMMMGraph(stats)
5611
5612
5613
return
5614
5615
-print "--------------- JPython test script start ------------"
5616
+print("--------------- JPython test script start ------------")
5617
5618
glue()
5619
5620
# pe.exit()
5621
5622
-print "---------------- JPython test script end -------------"
5623
+print("---------------- JPython test script end -------------")
5624
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.pushi.py (original)
5625
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.pushi.py (refactored)
5626
5627
False = 0
5628
5629
def glue():
5630
- print "doing phase test for gtc on jaguar"
5631
+ print("doing phase test for gtc on jaguar")
5632
5633
operations = Provenance.getCurrent().getOperations()
5634
result1 = operations.get(0).getInputs().get(0)
5635
for operation in operations:
5636
- print operation.getClass().getName()
5637
+ print(operation.getClass().getName())
5638
if operation.getClass().getName() == "glue.BasicStatisticsOperation":
5639
stats = operation.getOutputs()
5640
5641
5642
subsetevents.add("PUSHI")
5643
# subsetevents.add("SHIFTI")
5644
5645
- print "got data..."
5646
+ print("got data...")
5647
5648
for subsetevent in subsetevents:
5649
events = ArrayList()
5650
5651
extractor = ExtractEventOperation(result1, events)
5652
extracted = extractor.processData().get(0)
5653
5654
- print "extracted phases..."
5655
+ print("extracted phases...")
5656
5657
# derive metrics
5658
5659
5660
merger.addInput(derived)
5661
extracted = merger.processData().get(0)
5662
5663
- print "derived metrics..."
5664
+ print("derived metrics...")
5665
5666
# get the Statistics
5667
dostats = BasicStatisticsOperation(extracted, False)
5668
stats = dostats.processData()
5669
5670
- print "got stats..."
5671
+ print("got stats...")
5672
5673
for metric in stats.get(0).getMetrics():
5674
if metric != "PAPI_L1_TCA-PAPI_L1_TCM" and metric != "PAPI_L1_TCM-PAPI_L2_TCM":
5675
5676
5677
return
5678
5679
-print "--------------- JPython test script start ------------"
5680
+print("--------------- JPython test script start ------------")
5681
5682
glue()
5683
5684
# pe.exit()
5685
5686
-print "---------------- JPython test script end -------------"
5687
+print("---------------- JPython test script end -------------")
5688
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.py (original)
5689
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.py (refactored)
5690
5691
False = 0
5692
5693
def glue():
5694
- print "doing phase test for gtc on jaguar"
5695
+ print("doing phase test for gtc on jaguar")
5696
# load the trial
5697
- print "loading the data..."
5698
+ print("loading the data...")
5699
Utilities.setSession("perigtc")
5700
trial1 = Utilities.getTrial("GTC", "Jaguar Compiler Options", "fastsse")
5701
result1 = TrialResult(trial1)
5702
5703
# get the iteration inclusive totals
5704
5705
- print "getting phases..."
5706
+ print("getting phases...")
5707
events = ArrayList()
5708
for event in result1.getEvents():
5709
#if event.find("Iteration") >= 0 and result1.getEventGroupName(event).find("TAU_PHASE") < 0:
5710
if event.find("Iteration") >= 0 and event.find("=>") < 0:
5711
events.add(event)
5712
5713
- print "extracting phases..."
5714
+ print("extracting phases...")
5715
extractor = ExtractEventOperation(result1, events)
5716
extracted = extractor.processData().get(0)
5717
5718
# derive metrics
5719
5720
- print "deriving metrics (1)..."
5721
+ print("deriving metrics (1)...")
5722
derivor = DeriveMetricOperation(extracted, "PAPI_L1_TCA", "PAPI_L1_TCM", DeriveMetricOperation.SUBTRACT)
5723
derived = derivor.processData().get(0)
5724
merger = MergeTrialsOperation(extracted)
5725
merger.addInput(derived)
5726
extracted = merger.processData().get(0)
5727
- print "deriving metrics (2)..."
5728
+ print("deriving metrics (2)...")
5729
derivor = DeriveMetricOperation(extracted, "(PAPI_L1_TCA-PAPI_L1_TCM)", "PAPI_L1_TCA", DeriveMetricOperation.DIVIDE)
5730
derived = derivor.processData().get(0)
5731
merger = MergeTrialsOperation(extracted)
5732
merger.addInput(derived)
5733
extracted = merger.processData().get(0)
5734
- print "deriving metrics (3)..."
5735
+ print("deriving metrics (3)...")
5736
derivor = DeriveMetricOperation(extracted, "PAPI_L1_TCM", "PAPI_L2_TCM", DeriveMetricOperation.SUBTRACT)
5737
derived = derivor.processData().get(0)
5738
merger = MergeTrialsOperation(extracted)
5739
merger.addInput(derived)
5740
extracted = merger.processData().get(0)
5741
- print "deriving metrics (4)..."
5742
+ print("deriving metrics (4)...")
5743
derivor = DeriveMetricOperation(extracted, "(PAPI_L1_TCM-PAPI_L2_TCM)", "PAPI_L1_TCM", DeriveMetricOperation.DIVIDE)
5744
derived = derivor.processData().get(0)
5745
merger = MergeTrialsOperation(extracted)
5746
merger.addInput(derived)
5747
extracted = merger.processData().get(0)
5748
5749
- print "doing stats..."
5750
+ print("doing stats...")
5751
5752
# get the Statistics
5753
dostats = BasicStatisticsOperation(extracted, False)
5754
stats = dostats.processData()
5755
5756
- print "drawing..."
5757
+ print("drawing...")
5758
5759
for metric in stats.get(0).getMetrics():
5760
grapher = DrawMMMGraph(stats)
5761
5762
subsetevents.add("PUSHI")
5763
subsetevents.add("SHIFTI")
5764
5765
- print "got data..."
5766
+ print("got data...")
5767
5768
for subsetevent in subsetevents:
5769
events = ArrayList()
5770
5771
extractor = ExtractEventOperation(result1, events)
5772
extracted = extractor.processData().get(0)
5773
5774
- print "extracted phases..."
5775
+ print("extracted phases...")
5776
5777
# get the Statistics
5778
dostats = BasicStatisticsOperation(extracted, False)
5779
stats = dostats.processData()
5780
5781
- print "got stats..."
5782
+ print("got stats...")
5783
5784
for metric in stats.get(0).getMetrics():
5785
grapher = DrawMMMGraph(stats)
5786
5787
5788
return
5789
5790
-print "--------------- JPython test script start ------------"
5791
+print("--------------- JPython test script start ------------")
5792
5793
glue()
5794
5795
# pe.exit()
5796
5797
-print "---------------- JPython test script end -------------"
5798
+print("---------------- JPython test script end -------------")
5799
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.shifti.py (original)
5800
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.shifti.py (refactored)
5801
5802
False = 0
5803
5804
def glue():
5805
- print "doing phase test for gtc on jaguar"
5806
+ print("doing phase test for gtc on jaguar")
5807
5808
operations = Provenance.getCurrent().getOperations()
5809
result1 = operations.get(0).getInputs().get(0)
5810
for operation in operations:
5811
- print operation.getClass().getName()
5812
+ print(operation.getClass().getName())
5813
if operation.getClass().getName() == "glue.BasicStatisticsOperation":
5814
stats = operation.getOutputs()
5815
5816
5817
# subsetevents.add("PUSHI")
5818
subsetevents.add("SHIFTI")
5819
5820
- print "got data..."
5821
+ print("got data...")
5822
5823
for subsetevent in subsetevents:
5824
events = ArrayList()
5825
5826
extractor = ExtractEventOperation(result1, events)
5827
extracted = extractor.processData().get(0)
5828
5829
- print "extracted phases..."
5830
+ print("extracted phases...")
5831
5832
# derive metrics
5833
5834
5835
merger.addInput(derived)
5836
extracted = merger.processData().get(0)
5837
5838
- print "derived metrics..."
5839
+ print("derived metrics...")
5840
5841
# get the Statistics
5842
dostats = BasicStatisticsOperation(extracted, False)
5843
stats = dostats.processData()
5844
5845
- print "got stats..."
5846
+ print("got stats...")
5847
5848
for metric in stats.get(0).getMetrics():
5849
if metric != "PAPI_L1_TCA-PAPI_L1_TCM" and metric != "PAPI_L1_TCM-PAPI_L2_TCM":
5850
5851
5852
return
5853
5854
-print "--------------- JPython test script start ------------"
5855
+print("--------------- JPython test script start ------------")
5856
5857
glue()
5858
5859
# pe.exit()
5860
5861
-print "---------------- JPython test script end -------------"
5862
+print("---------------- JPython test script end -------------")
5863
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.spaceghost.first.py (original)
5864
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.spaceghost.first.py (refactored)
5865
5866
False = 0
5867
5868
def glue():
5869
- print "doing phase test for gtc on jaguar"
5870
+ print("doing phase test for gtc on jaguar")
5871
# load the trial
5872
Utilities.setSession("perfdmf.demo")
5873
trial1 = Utilities.getTrial("gtc_bench", "Jaguar Compiler Options", "fastsse")
5874
result1 = TrialResult(trial1)
5875
5876
- print "got the data"
5877
+ print("got the data")
5878
5879
# get the iteration inclusive totals
5880
5881
5882
extractor = ExtractEventOperation(result1, events)
5883
extracted = extractor.processData().get(0)
5884
5885
- print "extracted phases"
5886
+ print("extracted phases")
5887
5888
# derive metrics
5889
5890
5891
merger.addInput(derived)
5892
extracted = merger.processData().get(0)
5893
5894
- print "derived metrics..."
5895
+ print("derived metrics...")
5896
5897
return
5898
5899
5900
dostats = BasicStatisticsOperation(extracted, False)
5901
stats = dostats.processData()
5902
5903
- print "got stats..."
5904
+ print("got stats...")
5905
5906
"""for metric in stats.get(0).getMetrics():
5907
grapher = DrawMMMGraph(stats)
5908
5909
subsetevents.add("PUSHI")
5910
subsetevents.add("SHIFTI")
5911
5912
- print "got data..."
5913
+ print("got data...")
5914
5915
for subsetevent in subsetevents:
5916
events = ArrayList()
5917
5918
extractor = ExtractEventOperation(result1, events)
5919
extracted = extractor.processData().get(0)
5920
5921
- print "extracted phases..."
5922
+ print("extracted phases...")
5923
5924
# get the Statistics
5925
dostats = BasicStatisticsOperation(extracted, False)
5926
stats = dostats.processData()
5927
5928
- print "got stats..."
5929
+ print("got stats...")
5930
5931
for metric in stats.get(0).getMetrics():
5932
grapher = DrawMMMGraph(stats)
5933
5934
5935
return
5936
5937
-print "--------------- JPython test script start ------------"
5938
+print("--------------- JPython test script start ------------")
5939
5940
glue()
5941
5942
# pe.exit()
5943
5944
-print "---------------- JPython test script end -------------"
5945
+print("---------------- JPython test script end -------------")
5946
--- a/tools/src/perfexplorer/etc/jaguar.gtc.phases.third.py (original)
5947
--- b/tools/src/perfexplorer/etc/jaguar.gtc.phases.third.py (refactored)
5948
5949
False = 0
5950
5951
def glue():
5952
- print "doing phase test for gtc on jaguar"
5953
+ print("doing phase test for gtc on jaguar")
5954
5955
operations = Provenance.getCurrent().getOperations()
5956
result1 = operations.get(0).getInputs().get(0)
5957
for operation in operations:
5958
- print operation.getClass().getName()
5959
+ print(operation.getClass().getName())
5960
if operation.getClass().getName() == "glue.BasicStatisticsOperation":
5961
stats = operation.getOutputs()
5962
5963
5964
subsetevents.add("PUSHI")
5965
subsetevents.add("SHIFTI")
5966
5967
- print "got data..."
5968
+ print("got data...")
5969
5970
for subsetevent in subsetevents:
5971
events = ArrayList()
5972
5973
extractor = ExtractEventOperation(result1, events)
5974
extracted = extractor.processData().get(0)
5975
5976
- print "extracted phases..."
5977
+ print("extracted phases...")
5978
5979
# derive metrics
5980
5981
5982
merger.addInput(derived)
5983
extracted = merger.processData().get(0)
5984
5985
- print "derived metrics..."
5986
+ print("derived metrics...")
5987
5988
# get the Statistics
5989
dostats = BasicStatisticsOperation(extracted, False)
5990
stats = dostats.processData()
5991
5992
- print "got stats..."
5993
+ print("got stats...")
5994
5995
for metric in stats.get(0).getMetrics():
5996
grapher = DrawMMMGraph(stats)
5997
5998
5999
return
6000
6001
-print "--------------- JPython test script start ------------"
6002
+print("--------------- JPython test script start ------------")
6003
6004
glue()
6005
6006
# pe.exit()
6007
6008
-print "---------------- JPython test script end -------------"
6009
+print("---------------- JPython test script end -------------")
6010
--- a/tools/src/perfexplorer/etc/longrun.py (original)
6011
--- b/tools/src/perfexplorer/etc/longrun.py (refactored)
6012
6013
True = 1
6014
6015
def glue():
6016
- print "doing long run test"
6017
+ print("doing long run test")
6018
# load the trial
6019
Utilities.setSession("perfdmf_test")
6020
trial1 = Utilities.getTrial("gtc_bench", "jaguar.longrun", "64.first")
6021
6022
for metric in reduced.getMetrics():
6023
for thread in reduced.getThreads():
6024
if event.find("measurement") >= 0:
6025
- print metric, thread, reduced.getInclusive(thread, event, metric)
6026
+ print(metric, thread, reduced.getInclusive(thread, event, metric))
6027
6028
# do the correlation
6029
correlation = CorrelationOperation(reduced)
6030
6031
grapher.processData()
6032
6033
6034
-print "--------------- JPython test script start ------------"
6035
+print("--------------- JPython test script start ------------")
6036
6037
glue()
6038
6039
# pe.exit()
6040
6041
-print "---------------- JPython test script end -------------"
6042
+print("---------------- JPython test script end -------------")
6043
--- a/tools/src/perfexplorer/etc/lu-interactive.py (original)
6044
--- b/tools/src/perfexplorer/etc/lu-interactive.py (refactored)
6045
6046
returnEvents = {}
6047
events = pe.getEventList(trial, metricIndex)
6048
while events.hasNext():
6049
- event = events.next()
6050
+ event = next(events)
6051
if (groupName == "") or \
6052
(contains == True and event.getGroup().find(groupName) > -1) or \
6053
(contains == False and event.getGroup().find(groupName) < 0):
6054
6055
def findMain(events, metric):
6056
inclusive = 0.0
6057
main = {}
6058
- for key in events.keys():
6059
+ for key in list(events.keys()):
6060
data = events[key]
6061
if data.getInclusive(metric) > inclusive:
6062
inclusive = data.getInclusive(metric)
6063
6064
6065
def sort_by_value(d):
6066
""" Returns the keys of dictionary d sorted by their values """
6067
- items=d.items()
6068
+ items=list(d.items())
6069
backitems=[ [v[1],v[0]] for v in items]
6070
backitems.sort()
6071
return [ backitems[i][1] for i in range(0,len(backitems))]
6072
6073
def pairwiseEvent(baseEvents, otherEvents, i, j, filter):
6074
faster = {}
6075
slower = {}
6076
- for event in baseEvents.keys():
6077
+ for event in list(baseEvents.keys()):
6078
# print event
6079
baseValues = baseEvents[event]
6080
otherValues = otherEvents[event]
6081
6082
def pairwiseEventDerived(baseEvents, otherEvents, i, j, m, n, filter):
6083
faster = {}
6084
slower = {}
6085
- for event in baseEvents.keys():
6086
+ for event in list(baseEvents.keys()):
6087
if filter[event] == 1:
6088
baseValues = baseEvents[event]
6089
otherValues = otherEvents[event]
6090
6091
def mainReport(baseMain, otherMain, baseName, otherName):
6092
if baseMain["inclusive"] > otherMain["inclusive"]:
6093
tmp = "\nSelected trial (" + otherName + ") is relatively faster than baseline trial (" + baseName + ").\n"
6094
- print tmp
6095
+ print(tmp)
6096
percentage = (baseMain["inclusive"] - otherMain["inclusive"]) / otherMain["inclusive"]
6097
fasterSlower = -1
6098
elif baseMain["inclusive"] < otherMain["inclusive"]:
6099
tmp = "\nSelected trial (" + otherName + ") is relatively slower than baseline trial (" + baseName + ").\n"
6100
- print tmp
6101
+ print(tmp)
6102
percentage = (otherMain["inclusive"] - baseMain["inclusive"]) / baseMain["inclusive"]
6103
fasterSlower = 1
6104
else:
6105
- print "\nBaseline trial and second trial have the same execution time."
6106
+ print("\nBaseline trial and second trial have the same execution time.")
6107
fasterSlower = 0
6108
percentage = 0.0
6109
# print "\t", baseName, baseMain["name"], ":", baseMain["inclusive"], "seconds\n",
6110
# print "\t", otherName, otherMain["name"], ":", otherMain["inclusive"], "seconds\n",
6111
- print "\t", baseName, ":", baseMain["inclusive"]/million, "seconds\n",
6112
- print "\t", otherName, ":", otherMain["inclusive"]/million, "seconds\n",
6113
+ print("\t", baseName, ":", baseMain["inclusive"]/million, "seconds\n", end=' ')
6114
+ print("\t", otherName, ":", otherMain["inclusive"]/million, "seconds\n", end=' ')
6115
if fasterSlower > 0:
6116
- print "\t Relative Difference: ", percentage*100, "% slower\n"
6117
+ print("\t Relative Difference: ", percentage*100, "% slower\n")
6118
elif fasterSlower < 0:
6119
- print "\t Relative Difference: ", percentage*100, "% faster\n"
6120
- else:
6121
- print "\t Relative Difference: ", percentage*100, "%\n"
6122
+ print("\t Relative Difference: ", percentage*100, "% faster\n")
6123
+ else:
6124
+ print("\t Relative Difference: ", percentage*100, "%\n")
6125
return fasterSlower
6126
6127
def showSignificantTimeEvents(diffs, type, totalRuntime, significant, baseEvents, x):
6128
6129
percent = ( events[key]/baseEvents[key].getExclusive(x) ) * 100.0
6130
else:
6131
percent = 0.0
6132
- print "\t", key, ":", events[key]/million, "seconds", type.upper(), "than baseline (", percent, "% )"
6133
+ print("\t", key, ":", events[key]/million, "seconds", type.upper(), "than baseline (", percent, "% )")
6134
6135
significant[key] = 1
6136
shown += 1
6137
6138
percent = ( events[key]/baseEvents[key].getExclusive(x) ) * 100.0
6139
else:
6140
percent = 0.0
6141
- print "\t", key, ":", events[key]/million, "million", type, "than baseline (", percent, "% )"
6142
+ print("\t", key, ":", events[key]/million, "million", type, "than baseline (", percent, "% )")
6143
shown += 1
6144
return shown
6145
6146
6147
percent = ( events[key]/(baseGFLOP/baseSeconds) ) * 100.0
6148
else:
6149
percent = 0.0
6150
- print "\t", key, ":", events[key], type, "than baseline (", percent, "% )"
6151
+ print("\t", key, ":", events[key], type, "than baseline (", percent, "% )")
6152
shown += 1
6153
return shown
6154
6155
6156
if baseMain["name"] == otherMain["name"]:
6157
fasterSlower = mainReport(baseMain, otherMain, baseTrial.getName(), otherTrial.getName())
6158
else:
6159
- print "Main events do not match: ", baseMain["name"], ", ", otherMain["name"]
6160
+ print("Main events do not match: ", baseMain["name"], ", ", otherMain["name"])
6161
return
6162
6163
# compare the events for metric i
6164
6165
6166
# tell the user the significant differences
6167
significant = {}
6168
- print "Significant", baseMetrics[baseTime], "differences between trials:\n"
6169
+ print("Significant", baseMetrics[baseTime], "differences between trials:\n")
6170
shown = showSignificantTimeEvents(diffs, "faster", baseMain["inclusive"], significant, baseEvents, baseTime)
6171
if shown > 0:
6172
- print ""
6173
+ print("")
6174
shown += showSignificantTimeEvents(diffs, "slower", baseMain["inclusive"], significant, baseEvents, baseTime)
6175
if shown == 0:
6176
- print "\t None.\n"
6177
+ print("\t None.\n")
6178
6179
# significant is now populated with the names of events that are significant.
6180
# iterate through the metrics to locate possible causes for the time difference.
6181
6182
pass
6183
else:
6184
diffs = pairwiseEvent(baseEvents, otherEvents, x, y, significant)
6185
- print "\nSignificant", baseMetrics[x], "differences between trials:\n"
6186
+ print("\nSignificant", baseMetrics[x], "differences between trials:\n")
6187
shown = showSignificantEvents(diffs, "faster", significant, baseEvents, x)
6188
if shown > 0:
6189
- print ""
6190
+ print("")
6191
shown += showSignificantEvents(diffs, "slower", significant, baseEvents, x)
6192
if shown == 0:
6193
- print "\t None.\n"
6194
+ print("\t None.\n")
6195
if x == baseFlops:
6196
# also do GFLOP/Second per processor
6197
diffs = pairwiseEventDerived(baseEvents, otherEvents, x, y, baseTime, otherTime, significant)
6198
- print "\nSignificant GFLOP/sec per processor differences between trials:\n"
6199
+ print("\nSignificant GFLOP/sec per processor differences between trials:\n")
6200
shown = showSignificantEventsDerived(diffs, "faster", significant, baseEvents, x, baseTime)
6201
if shown > 0:
6202
- print ""
6203
+ print("")
6204
shown += showSignificantEventsDerived(diffs, "slower", significant, baseEvents, x, baseTime)
6205
if shown == 0:
6206
- print "\t None.\n"
6207
+ print("\t None.\n")
6208
x += 1
6209
6210
-print "--------------- JPython test script start ------------"
6211
+print("--------------- JPython test script start ------------")
6212
6213
pe = ScriptFacade()
6214
DoAnalysis(pe)
6215
6216
-print "\n"
6217
-print "---------------- JPython test script end -------------"
6218
+print("\n")
6219
+print("---------------- JPython test script end -------------")
6220
6221
# pe.exit()
6222
6223
--- a/tools/src/perfexplorer/etc/lu.py (original)
6224
--- b/tools/src/perfexplorer/etc/lu.py (refactored)
6225
6226
returnEvents = {}
6227
events = pe.getEventList(trial, metricIndex)
6228
while events.hasNext():
6229
- event = events.next()
6230
+ event = next(events)
6231
# print event.getName()
6232
returnEvents[event.getName()] = event.getMeanSummary()
6233
return returnEvents
6234
6235
def findMain(events, metric):
6236
inclusive = 0.0
6237
main = {}
6238
- for key in events.keys():
6239
+ for key in list(events.keys()):
6240
data = events[key]
6241
if data.getInclusive(metric) > inclusive:
6242
inclusive = data.getInclusive(metric)
6243
6244
6245
def sort_by_value(d):
6246
""" Returns the keys of dictionary d sorted by their values """
6247
- items=d.items()
6248
+ items=list(d.items())
6249
backitems=[ [v[1],v[0]] for v in items]
6250
backitems.sort()
6251
return [ backitems[i][1] for i in range(0,len(backitems))]
6252
6253
def pairwiseEvent(baseEvents, otherEvents, i, j, filter):
6254
faster = {}
6255
slower = {}
6256
- for event in baseEvents.keys():
6257
+ for event in list(baseEvents.keys()):
6258
# print event
6259
baseValues = baseEvents[event]
6260
otherValues = otherEvents[event]
6261
6262
def mainReport(baseMain, otherMain, baseName, otherName):
6263
if baseMain["inclusive"] > otherMain["inclusive"]:
6264
tmp = "\nSelected trial (" + otherName + ") is relatively faster than baseline trial (" + baseName + ").\n"
6265
- print tmp
6266
+ print(tmp)
6267
percentage = (baseMain["inclusive"] - otherMain["inclusive"]) / otherMain["inclusive"]
6268
fasterSlower = -1
6269
elif baseMain["inclusive"] < otherMain["inclusive"]:
6270
tmp = "\nSelected trial (" + otherName + ") is relatively slower than baseline trial (" + baseName + ").\n"
6271
- print tmp
6272
+ print(tmp)
6273
percentage = (otherMain["inclusive"] - baseMain["inclusive"]) / baseMain["inclusive"]
6274
fasterSlower = 1
6275
else:
6276
- print "\nBaseline trial and second trial have the same execution time."
6277
+ print("\nBaseline trial and second trial have the same execution time.")
6278
fasterSlower = 0
6279
percentage = 0.0
6280
# print "\t", baseName, baseMain["name"], ":", baseMain["inclusive"], "seconds\n",
6281
# print "\t", otherName, otherMain["name"], ":", otherMain["inclusive"], "seconds\n",
6282
- print "\t", baseName, ":", baseMain["inclusive"]/million, "seconds\n",
6283
- print "\t", otherName, ":", otherMain["inclusive"]/million, "seconds\n",
6284
+ print("\t", baseName, ":", baseMain["inclusive"]/million, "seconds\n", end=' ')
6285
+ print("\t", otherName, ":", otherMain["inclusive"]/million, "seconds\n", end=' ')
6286
if fasterSlower > 0:
6287
- print "\t Relative Difference: ", percentage*100, "% slower\n"
6288
+ print("\t Relative Difference: ", percentage*100, "% slower\n")
6289
elif fasterSlower < 0:
6290
- print "\t Relative Difference: ", percentage*100, "% faster\n"
6291
- else:
6292
- print "\t Relative Difference: ", percentage*100, "%\n"
6293
+ print("\t Relative Difference: ", percentage*100, "% faster\n")
6294
+ else:
6295
+ print("\t Relative Difference: ", percentage*100, "%\n")
6296
return fasterSlower
6297
6298
def showSignificantTimeEvents(diffs, type, totalRuntime, significant, baseEvents, x):
6299
6300
percent = ( events[key]/baseEvents[key].getExclusive(x) ) * 100.0
6301
else:
6302
percent = 0.0
6303
- print "\t", key, ":", events[key]/million, "seconds", type.upper(), "than baseline (", percent, "% )"
6304
+ print("\t", key, ":", events[key]/million, "seconds", type.upper(), "than baseline (", percent, "% )")
6305
6306
significant[key] = 1
6307
shown += 1
6308
6309
percent = ( events[key]/baseEvents[key].getExclusive(x) ) * 100.0
6310
else:
6311
percent = 0.0
6312
- print "\t", key, ":", events[key]/million, "million", type, "than baseline (", percent, "% )"
6313
+ print("\t", key, ":", events[key]/million, "million", type, "than baseline (", percent, "% )")
6314
shown += 1
6315
return shown
6316
6317
6318
baseEvents = getEvents(pe, baseTrial, baseTime)
6319
6320
# find the main event
6321
- print baseTime
6322
+ print(baseTime)
6323
baseMain = findMain(baseEvents, baseTime)
6324
# print baseMain
6325
6326
6327
# find the time metric
6328
metricMap = mapMetrics(baseMetrics, otherMetrics)
6329
otherTime = metricMap[baseTime]
6330
- print otherTime
6331
+ print(otherTime)
6332
6333
# get all the data for each event
6334
otherEvents = getEvents(pe, otherTrial, otherTime)
6335
6336
if baseMain["name"] == otherMain["name"]:
6337
fasterSlower = mainReport(baseMain, otherMain, baseTrial.getName(), otherTrial.getName())
6338
else:
6339
- print "Main events do not match: ", baseMain["name"], ", ", otherMain["name"]
6340
+ print("Main events do not match: ", baseMain["name"], ", ", otherMain["name"])
6341
return
6342
6343
# compare the events for metric i
6344
6345
6346
# tell the user the significant differences
6347
significant = {}
6348
- print "Significant", baseMetrics[baseTime], "differences between trials:\n"
6349
+ print("Significant", baseMetrics[baseTime], "differences between trials:\n")
6350
shown = showSignificantTimeEvents(diffs, "faster", baseMain["inclusive"], significant, baseEvents, baseTime)
6351
if shown > 0:
6352
- print ""
6353
+ print("")
6354
shown += showSignificantTimeEvents(diffs, "slower", baseMain["inclusive"], significant, baseEvents, baseTime)
6355
if shown == 0:
6356
- print "\t None.\n"
6357
+ print("\t None.\n")
6358
6359
# significant is now populated with the names of events that are significant.
6360
# iterate through the metrics to locate possible causes for the time difference.
6361
6362
pass
6363
else:
6364
diffs = pairwiseEvent(baseEvents, otherEvents, x, y, significant)
6365
- print "\nSignificant", baseMetrics[x], "differences between trials:\n"
6366
+ print("\nSignificant", baseMetrics[x], "differences between trials:\n")
6367
shown = showSignificantEvents(diffs, "faster", significant, baseEvents, x)
6368
if shown > 0:
6369
- print ""
6370
+ print("")
6371
shown += showSignificantEvents(diffs, "slower", significant, baseEvents, x)
6372
if shown == 0:
6373
- print "\t None.\n"
6374
+ print("\t None.\n")
6375
x += 1
6376
6377
-print "--------------- JPython test script start ------------"
6378
+print("--------------- JPython test script start ------------")
6379
6380
pe = ScriptFacade()
6381
DoAnalysis(pe)
6382
6383
-print "\n"
6384
-print "---------------- JPython test script end -------------"
6385
+print("\n")
6386
+print("---------------- JPython test script end -------------")
6387
6388
# pe.exit()
6389
6390
--- a/tools/src/perfexplorer/etc/milc.py (original)
6391
--- b/tools/src/perfexplorer/etc/milc.py (refactored)
6392
6393
pe.doGeneralChart()
6394
6395
6396
-print "--------------- JPython test script start ------------"
6397
+print("--------------- JPython test script start ------------")
6398
6399
pe = ScriptFacade()
6400
TotalExecutionTime(pe)
6401
6402
6403
# pe.exit()
6404
6405
-print "---------------- JPython test script end -------------"
6406
+print("---------------- JPython test script end -------------")
6407
--- a/tools/src/perfexplorer/etc/mm2.py (original)
6408
--- b/tools/src/perfexplorer/etc/mm2.py (refactored)
6409
6410
pe.setChartEventNoCallPath(1)
6411
pe.doGeneralChart()
6412
6413
-print "--------------- JPython test script start ------------"
6414
+print("--------------- JPython test script start ------------")
6415
6416
pe = ScriptFacade()
6417
TotalExecutionTime(pe)
6418
6419
6420
# pe.exit()
6421
6422
-print "---------------- JPython test script end -------------"
6423
+print("---------------- JPython test script end -------------")
6424
--- a/tools/src/perfexplorer/etc/optimization.py (original)
6425
--- b/tools/src/perfexplorer/etc/optimization.py (refactored)
6426
6427
inTrial = ""
6428
6429
def load():
6430
- print "loading data..."
6431
+ print("loading data...")
6432
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
6433
keys = parameterMap.keySet()
6434
for key in keys:
6435
- print key, parameterMap.get(key)
6436
+ print(key, parameterMap.get(key))
6437
config = parameterMap.get("config")
6438
inApp = parameterMap.get("app")
6439
inExp = parameterMap.get("exp")
6440
Utilities.setSession(config)
6441
trials = Utilities.getTrialsForExperiment(inApp, inExp)
6442
- print "...done."
6443
+ print("...done.")
6444
return trials
6445
6446
def extractMain(inputs):
6447
events = ArrayList()
6448
events.add(inputs.get(0).getMainEvent())
6449
6450
- print "extracting main event..."
6451
+ print("extracting main event...")
6452
extractor = ExtractEventOperation(inputs, events)
6453
extracted = extractor.processData()
6454
- print "...done."
6455
+ print("...done.")
6456
6457
return extracted
6458
6459
def getTop3(inputs):
6460
- print "extracting top 3 events..."
6461
+ print("extracting top 3 events...")
6462
reducer = TopXEvents(inputs, "Time", AbstractResult.EXCLUSIVE, 3)
6463
reduced = reducer.processData()
6464
return reduced
6465
6466
def drawGraph(results, inclusive):
6467
- print "drawing charts..."
6468
+ print("drawing charts...")
6469
for metric in results.get(0).getMetrics():
6470
grapher = DrawGraph(results)
6471
metrics = HashSet()
6472
6473
grapher.setYAxisLabel("Exclusive " + metric + " (seconds)")
6474
6475
grapher.processData()
6476
- print "...done."
6477
+ print("...done.")
6478
6479
return
6480
6481
-print "--------------- JPython test script start ------------"
6482
+print("--------------- JPython test script start ------------")
6483
6484
trials = load()
6485
results = ArrayList()
6486
6487
extracted = getTop3(results)
6488
drawGraph(extracted, False)
6489
6490
-print "---------------- JPython test script end -------------"
6491
+print("---------------- JPython test script end -------------")
6492
--- a/tools/src/perfexplorer/etc/params.py (original)
6493
--- b/tools/src/perfexplorer/etc/params.py (refactored)
6494
6495
inTrial = "lu.A.4"
6496
6497
def load():
6498
- print "loading data..."
6499
+ print("loading data...")
6500
Utilities.setSession(config)
6501
trial1 = Utilities.getTrial(inApp, inExp, inTrial)
6502
result = TrialResult(trial1)
6503
- print "...done."
6504
+ print("...done.")
6505
return result
6506
6507
def first(input):
6508
# get the iteration inclusive totals
6509
6510
- print "searching for iteration events (no classpath)..."
6511
+ print("searching for iteration events (no classpath)...")
6512
events = ArrayList()
6513
for event in input.getEvents():
6514
#if event.find("Iteration") >= 0 and input.getEventGroupName(event).find("TAU_PHASE") < 0:
6515
if event.find("MPI_Send") >= 0 and event.find("message size") >= 0:
6516
events.add(event)
6517
- print "...done."
6518
+ print("...done.")
6519
6520
- print "extracting phases..."
6521
+ print("extracting phases...")
6522
extractor = ExtractEventOperation(input, events)
6523
extracted = extractor.processData().get(0)
6524
- print "...done."
6525
+ print("...done.")
6526
6527
return extracted
6528
6529
def second(extracted):
6530
- print "getting stats..."
6531
+ print("getting stats...")
6532
dostats = BasicStatisticsOperation(extracted, False)
6533
stats = dostats.processData()
6534
- print "...done."
6535
+ print("...done.")
6536
6537
- print "drawing charts..."
6538
+ print("drawing charts...")
6539
for metric in stats.get(0).getMetrics():
6540
grapher = DrawMMMGraph(stats)
6541
metrics = HashSet()
6542
6543
grapher.setYAxisLabel("Exclusive " + metric);
6544
# grapher.setLogYAxis(True)
6545
grapher.processData()
6546
- print "...done."
6547
+ print("...done.")
6548
6549
return
6550
6551
-print "--------------- JPython test script start ------------"
6552
+print("--------------- JPython test script start ------------")
6553
6554
loaded = load()
6555
extracted = first(loaded)
6556
second(extracted)
6557
6558
-print "---------------- JPython test script end -------------"
6559
+print("---------------- JPython test script end -------------")
6560
--- a/tools/src/perfexplorer/etc/phases.py (original)
6561
--- b/tools/src/perfexplorer/etc/phases.py (refactored)
6562
6563
inTrial = "lu.A.4"
6564
6565
def load():
6566
- print "loading data..."
6567
+ print("loading data...")
6568
Utilities.setSession(config)
6569
trial1 = Utilities.getTrial(inApp, inExp, inTrial)
6570
result = TrialResult(trial1)
6571
- print "...done."
6572
+ print("...done.")
6573
return result
6574
6575
def first(input):
6576
# get the iteration inclusive totals
6577
6578
- print "searching for iteration events (no classpath)..."
6579
+ print("searching for iteration events (no classpath)...")
6580
events = ArrayList()
6581
for event in input.getEvents():
6582
if event.find("Iteration") >= 0 and event.find("=>") < 0 and event.find("**") < 0:
6583
events.add(event)
6584
- print "...done."
6585
+ print("...done.")
6586
6587
- print "extracting phases..."
6588
+ print("extracting phases...")
6589
extractor = ExtractEventOperation(input, events)
6590
extracted = extractor.processData().get(0)
6591
- print "...done."
6592
+ print("...done.")
6593
6594
return extracted
6595
6596
def second(extracted):
6597
- print "getting stats..."
6598
+ print("getting stats...")
6599
dostats = BasicStatisticsOperation(extracted, False)
6600
stats = dostats.processData()
6601
- print "...done."
6602
+ print("...done.")
6603
6604
- print "drawing charts..."
6605
+ print("drawing charts...")
6606
for metric in stats.get(0).getMetrics():
6607
grapher = DrawMMMGraph(stats)
6608
metrics = HashSet()
6609
6610
grapher.setYAxisLabel("Inclusive " + metric);
6611
# grapher.setLogYAxis(True)
6612
grapher.processData()
6613
- print "...done."
6614
+ print("...done.")
6615
6616
return
6617
6618
-print "--------------- JPython test script start ------------"
6619
+print("--------------- JPython test script start ------------")
6620
6621
loaded = load()
6622
extracted = first(loaded)
6623
second(extracted)
6624
6625
-print "---------------- JPython test script end -------------"
6626
+print("---------------- JPython test script end -------------")
6627
--- a/tools/src/perfexplorer/etc/pptest.py (original)
6628
--- b/tools/src/perfexplorer/etc/pptest.py (refactored)
6629
6630
from edu.uoregon.tau.perfexplorer.common import TransformationType
6631
from edu.uoregon.tau.perfexplorer.common import AnalysisType
6632
6633
-print "--------------- JPython test script start ------------"
6634
+print("--------------- JPython test script start ------------")
6635
6636
pe = ScriptFacade("/Users/khuck/.ParaProf/perfdmf.cfg")
6637
pe.doSomething()
6638
6639
6640
pe.exit()
6641
6642
-print "---------------- JPython test script end -------------"
6643
+print("---------------- JPython test script end -------------")
6644
--- a/tools/src/perfexplorer/etc/regressionTest.py (original)
6645
--- b/tools/src/perfexplorer/etc/regressionTest.py (refactored)
6646
6647
inTrial = ""
6648
6649
def load():
6650
- print "loading data..."
6651
+ print("loading data...")
6652
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
6653
keys = parameterMap.keySet()
6654
for key in keys:
6655
- print key, parameterMap.get(key)
6656
+ print(key, parameterMap.get(key))
6657
config = parameterMap.get("config")
6658
# inApp = parameterMap.get("app")
6659
# inExp = parameterMap.get("exp")
6660
Utilities.setSession(config)
6661
trials = Utilities.getTrialsForExperiment(inApp, inExp)
6662
- print "...done."
6663
+ print("...done.")
6664
return trials
6665
6666
def extractMain(inputs):
6667
events = ArrayList()
6668
events.add(inputs.get(0).getMainEvent())
6669
6670
- print "extracting main event..."
6671
+ print("extracting main event...")
6672
extractor = ExtractEventOperation(inputs, events)
6673
extracted = extractor.processData()
6674
- print "...done."
6675
+ print("...done.")
6676
6677
return extracted
6678
6679
def getTop1(inputs):
6680
- print "extracting top events..."
6681
+ print("extracting top events...")
6682
reducer = TopXEvents(inputs, "Time", AbstractResult.EXCLUSIVE, 5)
6683
# reducer = TopXPercentEvents(inputs, "Time", AbstractResult.EXCLUSIVE, 5.0)
6684
reduced = reducer.processData()
6685
6686
return reduced
6687
6688
def drawGraph(results, inclusive):
6689
- print "drawing charts..."
6690
+ print("drawing charts...")
6691
for metric in results.get(0).getMetrics():
6692
grapher = DrawGraph(results)
6693
metrics = HashSet()
6694
6695
grapher.drawChartToFile(outFile1)
6696
else:
6697
grapher.drawChartToFile(outFile2)
6698
- print "...done."
6699
+ print("...done.")
6700
6701
return
6702
6703
-print "--------------- JPython test script start ------------"
6704
+print("--------------- JPython test script start ------------")
6705
6706
trials = load()
6707
results = ArrayList()
6708
6709
pe.exit()
6710
6711
6712
-print "---------------- JPython test script end -------------"
6713
+print("---------------- JPython test script end -------------")
6714
pe = ScriptFacade()
6715
pe.exit()
6716
--- a/tools/src/perfexplorer/etc/rules.py (original)
6717
--- b/tools/src/perfexplorer/etc/rules.py (refactored)
6718
6719
pe.runComparisonRules(baseline, comparison)
6720
6721
6722
-print "--------------- JPython test script start ------------"
6723
+print("--------------- JPython test script start ------------")
6724
6725
pe = ScriptFacade()
6726
gyro(pe)
6727
6728
pe.exit()
6729
6730
-print "---------------- JPython test script end -------------"
6731
+print("---------------- JPython test script end -------------")
6732
--- a/tools/src/perfexplorer/etc/scalability.py (original)
6733
--- b/tools/src/perfexplorer/etc/scalability.py (refactored)
6734
6735
inTrial = ""
6736
6737
def load():
6738
- print "loading data..."
6739
+ print("loading data...")
6740
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
6741
keys = parameterMap.keySet()
6742
for key in keys:
6743
- print key, parameterMap.get(key)
6744
+ print(key, parameterMap.get(key))
6745
config = parameterMap.get("config")
6746
inApp = parameterMap.get("app")
6747
inExp = parameterMap.get("exp")
6748
Utilities.setSession(config)
6749
trials = Utilities.getTrialsForExperiment(inApp, inExp)
6750
- print "...done."
6751
+ print("...done.")
6752
return trials
6753
6754
def extractMain(inputs):
6755
events = ArrayList()
6756
events.add(inputs.get(0).getMainEvent())
6757
6758
- print "extracting main event..."
6759
+ print("extracting main event...")
6760
extractor = ExtractEventOperation(inputs, events)
6761
extracted = extractor.processData()
6762
- print "...done."
6763
+ print("...done.")
6764
6765
return extracted
6766
6767
def getTop8(inputs):
6768
- print "extracting top 8 events..."
6769
+ print("extracting top 8 events...")
6770
reducer = TopXEvents(inputs, "Time", AbstractResult.EXCLUSIVE, 8)
6771
reduced = reducer.processData()
6772
return reduced
6773
6774
def drawGraph(results, inclusive):
6775
- print "drawing charts..."
6776
+ print("drawing charts...")
6777
for metric in results.get(0).getMetrics():
6778
grapher = DrawGraph(results)
6779
metrics = HashSet()
6780
6781
grapher.setValueType(AbstractResult.EXCLUSIVE)
6782
grapher.setYAxisLabel("Exclusive " + metric + " (seconds)")
6783
grapher.processData()
6784
- print "...done."
6785
+ print("...done.")
6786
6787
return
6788
6789
-print "--------------- JPython test script start ------------"
6790
+print("--------------- JPython test script start ------------")
6791
6792
trials = load()
6793
results = ArrayList()
6794
6795
extracted = getTop8(results)
6796
drawGraph(extracted, False)
6797
6798
-print "---------------- JPython test script end -------------"
6799
+print("---------------- JPython test script end -------------")
6800
--- a/tools/src/perfexplorer/etc/selecttrialtest.py (original)
6801
--- b/tools/src/perfexplorer/etc/selecttrialtest.py (refactored)
6802
6803
from edu.uoregon.tau.perfexplorer.common import TransformationType
6804
from edu.uoregon.tau.perfexplorer.common import AnalysisType
6805
6806
-print "--------------- JPython test script start ------------"
6807
+print("--------------- JPython test script start ------------")
6808
6809
pe = ScriptFacade("/home/khuck/.ParaProf/perfdmf.cfg")
6810
pe.doSomething()
6811
6812
criteria = "trial.node_count > 32 and experiment.id = 80"
6813
trials = pe.getTrialList(criteria)
6814
for t in trials:
6815
- print t.getName()," ",t.getExperimentID()
6816
+ print(t.getName()," ",t.getExperimentID())
6817
6818
pe.exit()
6819
6820
-print "---------------- JPython test script end -------------"
6821
+print("---------------- JPython test script end -------------")
6822
--- a/tools/src/perfexplorer/etc/simple.py (original)
6823
--- b/tools/src/perfexplorer/etc/simple.py (refactored)
6824
6825
returnEvents = {}
6826
events = pe.getEventList(trial, metricIndex)
6827
while events.hasNext():
6828
- event = events.next()
6829
+ event = next(events)
6830
# print event.getName()
6831
returnEvents[event.getName()] = event.getMeanSummary()
6832
return returnEvents
6833
6834
def findMain(events, metric):
6835
inclusive = 0.0
6836
main = {}
6837
- for key in events.keys():
6838
+ for key in list(events.keys()):
6839
data = events[key]
6840
if data.getInclusive() > inclusive:
6841
inclusive = data.getInclusive()
6842
6843
def pairwiseEvent(baseEvents, otherEvents, i, j, filter):
6844
faster = {}
6845
slower = {}
6846
- for event in baseEvents.keys():
6847
+ for event in list(baseEvents.keys()):
6848
# print event
6849
baseValues = baseEvents[event]
6850
otherValues = otherEvents[event]
6851
6852
else:
6853
slower[event] = abs(diff)
6854
results = {}
6855
- items = faster.items()
6856
+ items = list(faster.items())
6857
items.sort()
6858
results["faster"] = items
6859
- items = slower.items()
6860
+ items = list(slower.items())
6861
items.sort()
6862
results["slower"] = items
6863
return results
6864
6865
def mainReport(baseMain, otherMain, baseName, otherName):
6866
if baseMain["inclusive"] > otherMain["inclusive"]:
6867
- print "\nBaseline trial is relatively slower than second trial.\n"
6868
+ print("\nBaseline trial is relatively slower than second trial.\n")
6869
percentage = (baseMain["inclusive"] - otherMain["inclusive"]) / otherMain["inclusive"]
6870
fasterSlower = -1
6871
elif baseMain["inclusive"] < otherMain["inclusive"]:
6872
- print "\nBaseline trial is relatively faster than second trial.\n"
6873
+ print("\nBaseline trial is relatively faster than second trial.\n")
6874
percentage = (otherMain["inclusive"] - baseMain["inclusive"]) / baseMain["inclusive"]
6875
fasterSlower = 1
6876
else:
6877
- print "\nBaseline trial and second trial have the same execution time."
6878
+ print("\nBaseline trial and second trial have the same execution time.")
6879
fasterSlower = 0
6880
percentage = 0.0
6881
# print "\t", baseName, baseMain["name"], ":", baseMain["inclusive"], "seconds\n",
6882
# print "\t", otherName, otherMain["name"], ":", otherMain["inclusive"], "seconds\n",
6883
- print "\t", baseName, ":", baseMain["inclusive"]/1000000, "seconds\n",
6884
- print "\t", otherName, ":", otherMain["inclusive"]/1000000, "seconds\n",
6885
+ print("\t", baseName, ":", baseMain["inclusive"]/1000000, "seconds\n", end=' ')
6886
+ print("\t", otherName, ":", otherMain["inclusive"]/1000000, "seconds\n", end=' ')
6887
if fasterSlower < 0:
6888
- print "\t Relative Difference: ", percentage*100, "% slower\n"
6889
+ print("\t Relative Difference: ", percentage*100, "% slower\n")
6890
elif fasterSlower > 0:
6891
- print "\t Relative Difference: ", percentage*100, "% faster\n"
6892
- else:
6893
- print "\t Relative Difference: ", percentage*100, "%\n"
6894
+ print("\t Relative Difference: ", percentage*100, "% faster\n")
6895
+ else:
6896
+ print("\t Relative Difference: ", percentage*100, "%\n")
6897
return fasterSlower
6898
6899
def showSignificantTimeEvents(diffs, type, significant):
6900
6901
break
6902
# don't show insignificant differences
6903
if event[1] > 1000000:
6904
- print "\t", event[0], ":", event[1]/1000000, "seconds", type, "than baseline"
6905
+ print("\t", event[0], ":", event[1]/1000000, "seconds", type, "than baseline")
6906
significant.append(event[0])
6907
shown += 1
6908
x += 1
6909
6910
break
6911
# don't show insignificant differences
6912
if event[1] > 1000000:
6913
- print "\t", event[0], ":", event[1]/1000000, "million", type, "than baseline"
6914
+ print("\t", event[0], ":", event[1]/1000000, "million", type, "than baseline")
6915
shown += 1
6916
x += 1
6917
return shown
6918
6919
if baseMain["name"] == otherMain["name"]:
6920
fasterSlower = mainReport(baseMain, otherMain, baseTrial.getName(), otherTrial.getName())
6921
else:
6922
- print "Main events do not match: ", baseMain["name"], ", ", otherMain["name"]
6923
+ print("Main events do not match: ", baseMain["name"], ", ", otherMain["name"])
6924
return
6925
6926
# compare the events for metric i
6927
6928
6929
# tell the user the significant differences
6930
significant = []
6931
- print "Significant", baseMetrics[baseTime], "differences between trials:\n"
6932
+ print("Significant", baseMetrics[baseTime], "differences between trials:\n")
6933
shown = showSignificantTimeEvents(diffs, "faster", significant)
6934
shown += showSignificantTimeEvents(diffs, "slower", significant)
6935
if shown == 0:
6936
- print "\t None.\n"
6937
+ print("\t None.\n")
6938
6939
# significant is now populated with the names of events that are significant.
6940
# iterate through the metrics to locate possible causes for the time difference.
6941
6942
if x != baseTime:
6943
y = metricMap[x]
6944
diffs = pairwiseEvent(baseEvents, otherEvents, x, y, significant)
6945
- print "\nSignificant", baseMetrics[x], "differences between trials:\n"
6946
+ print("\nSignificant", baseMetrics[x], "differences between trials:\n")
6947
shown = showSignificantEvents(diffs, "faster")
6948
shown += showSignificantEvents(diffs, "slower")
6949
if shown == 0:
6950
- print "\t None.\n"
6951
+ print("\t None.\n")
6952
x += 1
6953
6954
-print "--------------- JPython test script start ------------"
6955
+print("--------------- JPython test script start ------------")
6956
6957
pe = ScriptFacade()
6958
DoAnalysis(pe)
6959
6960
-print "\n"
6961
-print "---------------- JPython test script end -------------"
6962
+print("\n")
6963
+print("---------------- JPython test script end -------------")
6964
6965
pe.exit()
6966
6967
--- a/tools/src/perfexplorer/etc/simplegraphs.py (original)
6968
--- b/tools/src/perfexplorer/etc/simplegraphs.py (refactored)
6969
6970
pe.setChartYAxisName("avg(interval_mean_summary.exclusive)", metric + " (million)")
6971
pe.doGeneralChart()
6972
6973
-print "--------------- JPython test script start ------------"
6974
+print("--------------- JPython test script start ------------")
6975
6976
pe = ScriptFacade()
6977
TotalExecutionTime1(pe, "P_WALL_CLOCK_TIME")
6978
6979
TotalExecutionTime2(pe, "PAPI_FP_INS")
6980
TotalExecutionTime2(pe, "PAPI_L1_TCM")
6981
6982
-print "---------------- JPython test script end -------------"
6983
+print("---------------- JPython test script end -------------")
6984
6985
6986
--- a/tools/src/perfexplorer/etc/test.py (original)
6987
--- b/tools/src/perfexplorer/etc/test.py (refactored)
6988
6989
from edu.uoregon.tau.perfexplorer.common import TransformationType
6990
from edu.uoregon.tau.perfexplorer.common import AnalysisType
6991
6992
-print "--------------- JPython test script start ------------"
6993
+print("--------------- JPython test script start ------------")
6994
6995
x = 2 + 5
6996
-print x
6997
+print(x)
6998
6999
pe = ScriptFacade()
7000
pe.doSomething()
7001
7002
7003
# pe.exit()
7004
7005
-print "---------------- JPython test script end -------------"
7006
+print("---------------- JPython test script end -------------")
7007
--- a/tools/src/perfexplorer/examples/ApexClassification/elMedico.py (original)
7008
--- b/tools/src/perfexplorer/examples/ApexClassification/elMedico.py (refactored)
7009
7010
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
7011
keys = parameterMap.keySet()
7012
for key in keys:
7013
- print key, parameterMap.get(key)
7014
+ print(key, parameterMap.get(key))
7015
mode = parameterMap.get("mode")
7016
inputData = parameterMap.get("inputData")
7017
classifierFilename = parameterMap.get("classifierFilename")
7018
7019
wrapper.setClassifierType(WekaClassifierWrapper.J48)
7020
wrapper.buildClassifier()
7021
end = time.clock()
7022
- print end - start, " seconds to build classifier"
7023
+ print(end - start, " seconds to build classifier")
7024
start = time.clock()
7025
- print wrapper.crossValidateModel(10);
7026
+ print(wrapper.crossValidateModel(10));
7027
end = time.clock()
7028
- print end - start, " seconds to validate classifier"
7029
+ print(end - start, " seconds to validate classifier")
7030
WekaClassifierWrapper.writeClassifier(classifierFilename, wrapper)
7031
- print classifierFilename, "created."
7032
+ print(classifierFilename, "created.")
7033
7034
def testClassifier():
7035
global inputData
7036
7037
# print className
7038
7039
def main(argv):
7040
- print "--------------- JPython test script start ------------"
7041
+ print("--------------- JPython test script start ------------")
7042
7043
getParameters()
7044
7045
if mode == "build":
7046
- print "building classifier"
7047
+ print("building classifier")
7048
buildClassifier()
7049
else:
7050
- print "using classifier"
7051
+ print("using classifier")
7052
testClassifier()
7053
7054
- print "...done."
7055
+ print("...done.")
7056
7057
- print "---------------- JPython test script end -------------"
7058
+ print("---------------- JPython test script end -------------")
7059
7060
if __name__ == "__main__":
7061
main(sys.argv[1:])
7062
--- a/tools/src/perfexplorer/examples/ClassBreakdown/classAggregation.py (original)
7063
--- b/tools/src/perfexplorer/examples/ClassBreakdown/classAggregation.py (refactored)
7064
7065
tmp = parameterMap.get("tauData")
7066
if tmp != None:
7067
tauData = tmp
7068
- print "Performance data: " + tauData
7069
- else:
7070
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7071
+ print("Performance data: " + tauData)
7072
+ else:
7073
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7074
7075
def loadFile(fileName):
7076
# load the trial
7077
7078
return input
7079
7080
def dumpResult(ebd):
7081
- print ebd.fullName
7082
- print "type: " + ebd.type
7083
- print "returnType: " + ebd.returnType
7084
+ print(ebd.fullName)
7085
+ print("type: " + ebd.type)
7086
+ print("returnType: " + ebd.returnType)
7087
if ebd.reference:
7088
- print "reference: True"
7089
- else:
7090
- print "reference: False"
7091
- print "namespace: " + ebd.nameSpace
7092
- print "className: " + ebd.className
7093
- print "classTemplates: " + ebd.classTemplates
7094
- print "method: " + ebd.method
7095
- print "methodTemplate: " + ebd.methodTemplates
7096
- print "arguments: " + ebd.arguments
7097
+ print("reference: True")
7098
+ else:
7099
+ print("reference: False")
7100
+ print("namespace: " + ebd.nameSpace)
7101
+ print("className: " + ebd.className)
7102
+ print("classTemplates: " + ebd.classTemplates)
7103
+ print("method: " + ebd.method)
7104
+ print("methodTemplate: " + ebd.methodTemplates)
7105
+ print("arguments: " + ebd.arguments)
7106
if ebd.const:
7107
- print "const: True"
7108
- else:
7109
- print "const: False"
7110
- print "file: " + ebd.file
7111
- print "line: " + ebd.line
7112
- print "inclusive: ", ebd.inclusive
7113
- print "exclusive: ", ebd.exclusive
7114
+ print("const: True")
7115
+ else:
7116
+ print("const: False")
7117
+ print("file: " + ebd.file)
7118
+ print("line: " + ebd.line)
7119
+ print("inclusive: ", ebd.inclusive)
7120
+ print("exclusive: ", ebd.exclusive)
7121
7122
def checkParents(ebd, full, ebds):
7123
# iterate over the callpath events
7124
7125
# Also, Multiple methods could have different templated instances.
7126
7127
methods = dict()
7128
- for event,ebd in ebds.items():
7129
+ for event,ebd in list(ebds.items()):
7130
if ebd.className == className:
7131
value = 0.0
7132
if ebd.type == "UNWIND":
7133
7134
showmax=5 # set to 0 to show all methods
7135
for m in sorted(methods, key=methods.get, reverse=True):
7136
if showmax > 0:
7137
- print "\tMethod '%s' : %f" % (m,methods[m]/1000000)
7138
+ print("\tMethod '%s' : %f" % (m,methods[m]/1000000))
7139
else:
7140
othervalue = othervalue + methods[m]
7141
showmax = showmax - 1
7142
if showmax < 0:
7143
- print "\tAll other methods : %f" % (othervalue/1000000)
7144
+ print("\tAll other methods : %f" % (othervalue/1000000))
7145
7146
def main():
7147
global filename
7148
global tauData
7149
global doInclusive
7150
- print "--------------- JPython test script start ------------"
7151
+ print("--------------- JPython test script start ------------")
7152
# get the parameters
7153
getParameters()
7154
# load the data
7155
7156
mainEvent = result.getMainEvent()
7157
7158
# then, extract those events from the actual data
7159
- print "Extracting non-callpath data..."
7160
+ print("Extracting non-callpath data...")
7161
flatten = ExtractNonCallpathEventOperation(result)
7162
flat = flatten.processData().get(0)
7163
7164
- print "Computing statistics..."
7165
+ print("Computing statistics...")
7166
statmaker = BasicStatisticsOperation(flat, False)
7167
statmaker.setIncludeNull(False)
7168
stats = statmaker.processData().get(BasicStatisticsOperation.MEAN)
7169
7170
# get the callpath events
7171
- print "Extracting callpath data..."
7172
+ print("Extracting callpath data...")
7173
fullen = ExtractCallpathEventOperation(result)
7174
full = fullen.processData().get(0)
7175
7176
7177
ebds[event] = ebd
7178
7179
classes = dict()
7180
- for event,ebd in ebds.items():
7181
+ for event,ebd in list(ebds.items()):
7182
value = 0
7183
if ebd.type == "UNWIND":
7184
value = checkParents(ebd,full,ebds)
7185
7186
for c in sorted(classes, key=classes.get, reverse=True):
7187
#if len(c) > 0:
7188
if showmax > 0:
7189
- print "\nClass '%s' : %f" % (c,classes[c]/1000000)
7190
+ print("\nClass '%s' : %f" % (c,classes[c]/1000000))
7191
showmax = showmax - 1
7192
showChildren(ebds,c,full)
7193
else:
7194
othervalue = othervalue + classes[c]
7195
# get the application total from the original profile, thread 0. It is the true application main.
7196
- print "\nAll other classes : %f, application total : %f" % (othervalue/1000000, result.getInclusive(0,mainEvent,metric)/1000000)
7197
- print "(inclusive aggregation of unwound samples and means without NULLs can add up to more than application total)"
7198
- print "\nMetric:", metric, "/ 1,000,000"
7199
-
7200
- print "---------------- JPython test script end -------------"
7201
+ print("\nAll other classes : %f, application total : %f" % (othervalue/1000000, result.getInclusive(0,mainEvent,metric)/1000000))
7202
+ print("(inclusive aggregation of unwound samples and means without NULLs can add up to more than application total)")
7203
+ print("\nMetric:", metric, "/ 1,000,000")
7204
+
7205
+ print("---------------- JPython test script end -------------")
7206
7207
if __name__ == "__main__":
7208
main()
7209
--- a/tools/src/perfexplorer/examples/ClassLightweights/classAggregation.py (original)
7210
--- b/tools/src/perfexplorer/examples/ClassLightweights/classAggregation.py (refactored)
7211
7212
tmp = parameterMap.get("tauData")
7213
if tmp != None:
7214
tauData = tmp
7215
- print "Performance data: " + tauData
7216
- else:
7217
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7218
+ print("Performance data: " + tauData)
7219
+ else:
7220
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7221
tmp = parameterMap.get("threshold1")
7222
if tmp != None:
7223
threshold = float(tmp)
7224
- print "Max per call threshold: %f" % (threshold)
7225
+ print("Max per call threshold: %f" % (threshold))
7226
tmp = parameterMap.get("threshold2")
7227
if tmp != None:
7228
percentClass = float(tmp)
7229
- print "Min percent of class: %f" % (percentClass)
7230
+ print("Min percent of class: %f" % (percentClass))
7231
7232
def loadFile(fileName):
7233
# load the trial
7234
7235
def dumpResult(ebd):
7236
global stats
7237
global metric
7238
- print ebd.fullName
7239
- print "type: " + ebd.type
7240
- print "returnType: " + ebd.returnType
7241
+ print(ebd.fullName)
7242
+ print("type: " + ebd.type)
7243
+ print("returnType: " + ebd.returnType)
7244
if ebd.reference:
7245
- print "reference: True"
7246
- else:
7247
- print "reference: False"
7248
- print "namespace: " + ebd.nameSpace
7249
- print "className: " + ebd.className
7250
- print "classTemplates: " + ebd.classTemplates
7251
- print "method: " + ebd.method
7252
- print "methodTemplate: " + ebd.methodTemplates
7253
- print "arguments: " + ebd.arguments
7254
+ print("reference: True")
7255
+ else:
7256
+ print("reference: False")
7257
+ print("namespace: " + ebd.nameSpace)
7258
+ print("className: " + ebd.className)
7259
+ print("classTemplates: " + ebd.classTemplates)
7260
+ print("method: " + ebd.method)
7261
+ print("methodTemplate: " + ebd.methodTemplates)
7262
+ print("arguments: " + ebd.arguments)
7263
if ebd.const:
7264
- print "const: True"
7265
- else:
7266
- print "const: False"
7267
- print "file: " + ebd.file
7268
- print "line: " + ebd.line
7269
- print "inclusive: ", stats.getInclusive(0,ebd.fullName,metric)
7270
- print "exclusive: ", stats.getExclusive(0,ebd.fullName,metric)
7271
+ print("const: True")
7272
+ else:
7273
+ print("const: False")
7274
+ print("file: " + ebd.file)
7275
+ print("line: " + ebd.line)
7276
+ print("inclusive: ", stats.getInclusive(0,ebd.fullName,metric))
7277
+ print("exclusive: ", stats.getExclusive(0,ebd.fullName,metric))
7278
7279
def checkParents(ebd, full, ebds):
7280
global stats
7281
7282
7283
methods = dict()
7284
methodcalls = dict()
7285
- for event,ebd in ebds.items():
7286
+ for event,ebd in list(ebds.items()):
7287
if ebd.className == className:
7288
value = 0.0
7289
calls = 0
7290
7291
if classTotal > 0:
7292
perclass = methods[m] / classTotal
7293
if percall < threshold and perclass > percentClass:
7294
- print "\tMethod '%s' : total = %.2e, calls = %.2e, percall = %.2f, %%class = %.2f%%" % (m,methods[m],methodcalls[m],percall,perclass)
7295
+ print("\tMethod '%s' : total = %.2e, calls = %.2e, percall = %.2f, %%class = %.2f%%" % (m,methods[m],methodcalls[m],percall,perclass))
7296
7297
def main():
7298
global filename
7299
7300
global stats
7301
global metric
7302
global threshold
7303
- print "--------------- JPython test script start ------------"
7304
+ print("--------------- JPython test script start ------------")
7305
# get the parameters
7306
getParameters()
7307
# load the data
7308
7309
metrics = result.getMetrics().toArray()
7310
metric = metrics[0]
7311
7312
- print "Using metric:", metric
7313
+ print("Using metric:", metric)
7314
type = result.EXCLUSIVE
7315
mainEvent = result.getMainEvent()
7316
7317
# then, extract those events from the actual data
7318
- print "Extracting non-callpath data...",
7319
+ print("Extracting non-callpath data...", end=' ')
7320
flatten = ExtractNonCallpathEventOperation(result)
7321
flat = flatten.processData().get(0)
7322
- print "done."
7323
-
7324
- print "Computing statistics...",
7325
+ print("done.")
7326
+
7327
+ print("Computing statistics...", end=' ')
7328
statmaker = BasicStatisticsOperation(flat, False)
7329
statmaker.setIncludeNull(False)
7330
stats = statmaker.processData().get(BasicStatisticsOperation.MEAN)
7331
- print "done."
7332
+ print("done.")
7333
7334
# get the callpath events
7335
- print "Extracting callpath data...",
7336
+ print("Extracting callpath data...", end=' ')
7337
fullen = ExtractCallpathEventOperation(result)
7338
full = fullen.processData().get(0)
7339
- print "done."
7340
-
7341
- print "Iterating over methods...",
7342
+ print("done.")
7343
+
7344
+ print("Iterating over methods...", end=' ')
7345
# Iterate over all methods, and parse out the class for each method
7346
ebds = dict()
7347
for event in flat.getEvents():
7348
7349
elif event != ".TAU application":
7350
ebd = EventBreakdown(event)
7351
ebds[event] = ebd
7352
- print "done."
7353
+ print("done.")
7354
7355
# iterate over the parsed events, and aggreate them by class
7356
classes = dict()
7357
progress = 0.0
7358
- print "Aggregating by class...",
7359
- for event,ebd in ebds.items():
7360
+ print("Aggregating by class...", end=' ')
7361
+ for event,ebd in list(ebds.items()):
7362
progress = progress + 1.0
7363
- print "\rAggregating by class... %.2f%% (%d of %d methods)" % ((progress / len(ebds))*100.0,progress,len(ebds)),
7364
+ print("\rAggregating by class... %.2f%% (%d of %d methods)" % ((progress / len(ebds))*100.0,progress,len(ebds)), end=' ')
7365
value = 0
7366
if ebd.type == "UNWIND":
7367
value = checkParents(ebd,full,ebds)
7368
7369
classes[ebd.className] = classes[ebd.className] + value
7370
else:
7371
classes[ebd.className] = value
7372
- print "done. %d classes found." % (len(classes))
7373
+ print("done. %d classes found." % (len(classes)))
7374
7375
appTotal = result.getInclusive(0,mainEvent,metric) / 100.0 # for scaling to percent
7376
othervalue = 0
7377
7378
for c in sorted(classes, key=classes.get, reverse=True):
7379
#if len(c) > 0:
7380
if showmax > 0:
7381
- print "\nClass '%s' : total = %.2e, %% application = %.2f%%" % (c,classes[c],classes[c]/appTotal)
7382
+ print("\nClass '%s' : total = %.2e, %% application = %.2f%%" % (c,classes[c],classes[c]/appTotal))
7383
showmax = showmax - 1
7384
showChildren(ebds,c,full,classes[c]/100.0) # for scaling to percent
7385
else:
7386
othervalue = othervalue + classes[c]
7387
# get the application total from the original profile, thread 0. It is the true application main.
7388
- print "\nAll other classes : %.2e, application total : %.2e" % (othervalue, appTotal * 100.0) # scale it back
7389
- print "(inclusive aggregation of unwound samples and means without NULLs can add up to more than application total)"
7390
- print "\nMetric:", metric
7391
-
7392
- print "---------------- JPython test script end -------------"
7393
+ print("\nAll other classes : %.2e, application total : %.2e" % (othervalue, appTotal * 100.0)) # scale it back
7394
+ print("(inclusive aggregation of unwound samples and means without NULLs can add up to more than application total)")
7395
+ print("\nMetric:", metric)
7396
+
7397
+ print("---------------- JPython test script end -------------")
7398
7399
if __name__ == "__main__":
7400
main()
7401
--- a/tools/src/perfexplorer/examples/ClusterAndDifference/clusterTest.py (original)
7402
--- b/tools/src/perfexplorer/examples/ClusterAndDifference/clusterTest.py (refactored)
7403
7404
tmp = parameterMap.get("tauData")
7405
if tmp != None:
7406
tauData = tmp
7407
- print "Performance data: " + tauData
7408
+ print("Performance data: " + tauData)
7409
else:
7410
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7411
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7412
7413
def loadFile(fileName):
7414
# load the trial
7415
7416
baseline = extracted.get(0)
7417
comparison = extracted.get(1)
7418
7419
- print "Baseline: ", baselineName
7420
- print "Comparison: ", comparisonName
7421
+ print("Baseline: ", baselineName)
7422
+ print("Comparison: ", comparisonName)
7423
7424
# get the stats
7425
statMakerBaseline = BasicStatisticsOperation(baseline)
7426
7427
7428
max = 10
7429
if type == AbstractResult.EXCLUSIVE:
7430
- print "\nExclusive:\n"
7431
+ print("\nExclusive:\n")
7432
elif type == AbstractResult.INCLUSIVE:
7433
- print "\nInclusive:\n"
7434
+ print("\nInclusive:\n")
7435
max = 20
7436
else:
7437
- print "\nNumber of Calls:\n"
7438
+ print("\nNumber of Calls:\n")
7439
7440
# get the top 10?
7441
topXmaker = TopXEvents(diffs, baseline.getTimeMetric(), type, max)
7442
top10 = topXmaker.processData().get(0)
7443
7444
- print "B_Time C_Time D_Time %_Diff Event"
7445
- print "------ ------ ------ ------ ------"
7446
+ print("B_Time C_Time D_Time %_Diff Event")
7447
+ print("------ ------ ------ ------ ------")
7448
for thread in top10.getThreads():
7449
for event in top10.getEvents():
7450
for metric in top10.getMetrics():
7451
7452
diff = diff * 0.000001
7453
if baselineVal > comparisonVal:
7454
diff = diff * -1.0
7455
- print "%.2f\t%.2f\t%.2f\t%.2f\t%s" % (baselineVal, comparisonVal, diff, (diff/baselineVal)*100.0, event)
7456
+ print("%.2f\t%.2f\t%.2f\t%.2f\t%s" % (baselineVal, comparisonVal, diff, (diff/baselineVal)*100.0, event))
7457
7458
return
7459
7460
def main():
7461
global filename
7462
- print "--------------- JPython test script start ------------"
7463
- print "doing cluster test"
7464
+ print("--------------- JPython test script start ------------")
7465
+ print("doing cluster test")
7466
# get the parameters
7467
getParameters()
7468
# load the data
7469
7470
# cluster the data
7471
clusterer = DBSCANOperation(reduced, metric, type, 1.0)
7472
clusterResult = clusterer.processData()
7473
- print "Estimated value for k:", str(clusterResult.get(0).getThreads().size())
7474
+ print("Estimated value for k:", str(clusterResult.get(0).getThreads().size()))
7475
clusterIDs = clusterResult.get(4)
7476
7477
# split the trial into the clusters
7478
7479
7480
7481
7482
- print "---------------- JPython test script end -------------"
7483
+ print("---------------- JPython test script end -------------")
7484
7485
if __name__ == "__main__":
7486
main()
7487
--- a/tools/src/perfexplorer/examples/ClusterAndDifference/difference.py (original)
7488
--- b/tools/src/perfexplorer/examples/ClusterAndDifference/difference.py (refactored)
7489
7490
baseline = extracted.get(0)
7491
comparison = extracted.get(1)
7492
7493
- print "Baseline: ", baselineName
7494
- print "Comparison: ", comparisonName
7495
+ print("Baseline: ", baselineName)
7496
+ print("Comparison: ", comparisonName)
7497
7498
# get the stats
7499
statMakerBaseline = BasicStatisticsOperation(baseline)
7500
7501
7502
max = 10
7503
if type == AbstractResult.EXCLUSIVE:
7504
- print "\nExclusive:\n"
7505
+ print("\nExclusive:\n")
7506
elif type == AbstractResult.INCLUSIVE:
7507
- print "\nInclusive:\n"
7508
+ print("\nInclusive:\n")
7509
max = 20
7510
else:
7511
- print "\nNumber of Calls:\n"
7512
+ print("\nNumber of Calls:\n")
7513
7514
# get the top 10?
7515
topXmaker = TopXEvents(diffs, baseline.getTimeMetric(), type, max)
7516
top10 = topXmaker.processData().get(0)
7517
7518
- print "B_Time C_Time D_Time %_Diff Event"
7519
- print "------ ------ ------ ------ ------"
7520
+ print("B_Time C_Time D_Time %_Diff Event")
7521
+ print("------ ------ ------ ------ ------")
7522
for thread in top10.getThreads():
7523
for event in top10.getEvents():
7524
for metric in top10.getMetrics():
7525
7526
diff = diff * 0.000001
7527
if baselineVal > comparisonVal:
7528
diff = diff * -1.0
7529
- print "%.2f\t%.2f\t%.2f\t%.2f\t%s" % (baselineVal, comparisonVal, diff, (diff/baselineVal)*100.0, event)
7530
+ print("%.2f\t%.2f\t%.2f\t%.2f\t%s" % (baselineVal, comparisonVal, diff, (diff/baselineVal)*100.0, event))
7531
7532
return
7533
7534
def main():
7535
- print "--------------- JPython test script start ------------"
7536
+ print("--------------- JPython test script start ------------")
7537
7538
diffs()
7539
7540
- print "\n---------------- JPython test script end -------------"
7541
+ print("\n---------------- JPython test script end -------------")
7542
7543
if __name__ == "__main__":
7544
main()
7545
--- a/tools/src/perfexplorer/examples/ClusterAndLoadBalance/clusterTest.py (original)
7546
--- b/tools/src/perfexplorer/examples/ClusterAndLoadBalance/clusterTest.py (refactored)
7547
7548
tmp = parameterMap.get("tauData")
7549
if tmp != None:
7550
tauData = tmp
7551
- print "Performance data: " + tauData
7552
+ print("Performance data: " + tauData)
7553
else:
7554
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7555
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7556
7557
def loadFile(fileName):
7558
# load the trial
7559
7560
7561
def main():
7562
global filename
7563
- print "--------------- JPython test script start ------------"
7564
- print "doing cluster test"
7565
+ print("--------------- JPython test script start ------------")
7566
+ print("doing cluster test")
7567
# get the parameters
7568
getParameters()
7569
# load the data
7570
7571
clusterResult = clusterer.processData()
7572
k = str(clusterResult.get(0).getThreads().size())
7573
clusters = ArrayList()
7574
- print "Estimated value for k:", k
7575
+ print("Estimated value for k:", k)
7576
if k > 0:
7577
clusterIDs = clusterResult.get(4)
7578
7579
7580
else:
7581
clusters.put(result)
7582
7583
- print "\nCluster\t Procs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
7584
+ print("\nCluster\t Procs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
7585
clusterID = 0
7586
7587
for trial in clusters:
7588
7589
stddev = stddevs.getExclusive(thread, event, metric)
7590
ratio = ratios.getExclusive(thread, event, metric)
7591
7592
- print "%d\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (clusterID, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
7593
+ print("%d\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (clusterID, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
7594
clusterID = clusterID + 1
7595
7596
- print "---------------- JPython test script end -------------"
7597
+ print("---------------- JPython test script end -------------")
7598
7599
if __name__ == "__main__":
7600
main()
7601
--- a/tools/src/perfexplorer/examples/ClusterAndMicroLoadImbalance/clusterTest.py (original)
7602
--- b/tools/src/perfexplorer/examples/ClusterAndMicroLoadImbalance/clusterTest.py (refactored)
7603
7604
tmp = parameterMap.get("tauData")
7605
if tmp != None:
7606
tauData = tmp
7607
- print "Performance data: " + tauData
7608
- else:
7609
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7610
+ print("Performance data: " + tauData)
7611
+ else:
7612
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7613
7614
tmp = parameterMap.get("prefix")
7615
if tmp != None:
7616
iterationPrefix = tmp
7617
- print "Iteration Prefix: " + iterationPrefix
7618
- else:
7619
- print "Iteration Prefix not specified... using", iterationPrefix
7620
+ print("Iteration Prefix: " + iterationPrefix)
7621
+ else:
7622
+ print("Iteration Prefix not specified... using", iterationPrefix)
7623
7624
def loadFile(fileName):
7625
# load the trial
7626
7627
stddev = stddevs.getExclusive(thread, event, metric)
7628
ratio = ratios.getExclusive(thread, event, metric)
7629
7630
- print "%d\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (clusterID, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
7631
+ print("%d\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (clusterID, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
7632
clusterID = clusterID + 1
7633
return clusterID
7634
7635
7636
#print mean, max, min, stddev, ratio
7637
7638
if callpath:
7639
- print "%s\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
7640
- else:
7641
- print "%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
7642
+ print("%s\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
7643
+ else:
7644
+ print("%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
7645
7646
return mean, max, min, stddev
7647
7648
7649
def main():
7650
global filename
7651
global iterationPrefix
7652
- print "--------------- JPython test script start ------------"
7653
- print "doing cluster test"
7654
+ print("--------------- JPython test script start ------------")
7655
+ print("doing cluster test")
7656
# get the parameters
7657
getParameters()
7658
# load the data
7659
7660
extracted = extractor.processData().get(0)
7661
7662
# split communication and computation
7663
- print "splitting communication and computation"
7664
+ print("splitting communication and computation")
7665
splitter = SplitCommunicationComputationOperation(extracted)
7666
outputs = splitter.processData()
7667
computation = outputs.get(SplitCommunicationComputationOperation.COMPUTATION)
7668
7669
#computation = result
7670
7671
# do some basic statistics first
7672
- print "doing stats"
7673
+ print("doing stats")
7674
stats = BasicStatisticsOperation(computation)
7675
means = stats.processData().get(BasicStatisticsOperation.MEAN)
7676
7677
# then, using the stats, find the top X event names
7678
- print "getting top X events"
7679
+ print("getting top X events")
7680
reducer = TopXEvents(means, metric, type, 10)
7681
reduced = reducer.processData().get(0)
7682
7683
# then, extract those events from the actual data
7684
- print "extracting events"
7685
+ print("extracting events")
7686
tmpEvents = ArrayList(reduced.getEvents())
7687
reducer = ExtractEventOperation(computation, tmpEvents)
7688
reduced = reducer.processData().get(0)
7689
7690
# cluster the data
7691
- print "clustering data"
7692
+ print("clustering data")
7693
clusterer = DBSCANOperation(reduced, metric, type, 1.0)
7694
clusterResult = clusterer.processData()
7695
k = str(clusterResult.get(0).getThreads().size())
7696
clusters = ArrayList()
7697
- print "Estimated value for k:", k
7698
+ print("Estimated value for k:", k)
7699
if k > 0:
7700
clusterIDs = clusterResult.get(4)
7701
7702
# split the trial into the clusters
7703
- print "splitting clusters into", k, "trials"
7704
+ print("splitting clusters into", k, "trials")
7705
splitter = SplitTrialClusters(result, clusterResult)
7706
splitter.setIncludeNoisePoints(True)
7707
clusters = splitter.processData()
7708
7709
clusters.put(result)
7710
7711
clusterID = -1
7712
- print "\nCluster\t Procs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
7713
+ print("\nCluster\t Procs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
7714
clusterID = doLoadImbalance(result, clusterID)
7715
7716
for trial in clusters:
7717
7718
7719
clusterID = 0
7720
for trial in clusters:
7721
- print "\n\nSplitting phases in cluster", clusterID
7722
+ print("\n\nSplitting phases in cluster", clusterID)
7723
splitter = SplitTrialPhasesOperation(trial, iterationPrefix)
7724
phases = splitter.processData()
7725
#print phases.size()
7726
7727
totalStddev = 0.0
7728
totalRatio = 0.0
7729
7730
- print "LoopID\t\t Procs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
7731
+ print("LoopID\t\t Procs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
7732
for phase in phases:
7733
#print "main event:", phase.getMainEvent()
7734
#for event in phase.getEvents():
7735
7736
avgRatio = avgMean / avgMax
7737
7738
event = LoadImbalanceOperation.KERNEL_COMPUTATION
7739
- print "%s\t\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % ("Average", trial.getThreads().size(), event, avgMean*100, avgMax*100, avgMin*100, avgStddev*100, avgRatio*100)
7740
+ print("%s\t\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % ("Average", trial.getThreads().size(), event, avgMean*100, avgMax*100, avgMin*100, avgStddev*100, avgRatio*100))
7741
clusterID = clusterID + 1
7742
7743
- print "---------------- JPython test script end -------------"
7744
+ print("---------------- JPython test script end -------------")
7745
7746
if __name__ == "__main__":
7747
main()
7748
--- a/tools/src/perfexplorer/examples/ClusterAndMicroLoadImbalance/clusterTestActual.py (original)
7749
--- b/tools/src/perfexplorer/examples/ClusterAndMicroLoadImbalance/clusterTestActual.py (refactored)
7750
7751
tmp = parameterMap.get("tauData")
7752
if tmp != None:
7753
tauData = tmp
7754
- print "Performance data: " + tauData
7755
- else:
7756
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7757
+ print("Performance data: " + tauData)
7758
+ else:
7759
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7760
7761
tmp = parameterMap.get("prefix")
7762
if tmp != None:
7763
iterationPrefix = tmp
7764
- print "Iteration Prefix: " + iterationPrefix
7765
- else:
7766
- print "Iteration Prefix not specified... using", iterationPrefix
7767
+ print("Iteration Prefix: " + iterationPrefix)
7768
+ else:
7769
+ print("Iteration Prefix not specified... using", iterationPrefix)
7770
7771
def loadFile(fileName):
7772
# load the trial
7773
7774
ratio = ratios.getExclusive(thread, event, metric)
7775
7776
inclusive = masterMeans.getInclusive(0, mainEventLong, metric) * conversion
7777
- print "%d\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t" % (clusterID, trial.getThreads().size(), inclusive, event, mean, max, min, stddev, ratio)
7778
+ print("%d\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t" % (clusterID, trial.getThreads().size(), inclusive, event, mean, max, min, stddev, ratio))
7779
clusterID = clusterID + 1
7780
return clusterID
7781
7782
7783
inclusive = masterMeans.getInclusive(0, mainEventLong, metric) * conversion
7784
7785
if numphases < 100:
7786
- print "%s\t %d\t %.2f\t %ls\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t" % (mainEvent, trial.getThreads().size(), inclusive, event, mean, max, min, stddev, ratio)
7787
+ print("%s\t %d\t %.2f\t %ls\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t" % (mainEvent, trial.getThreads().size(), inclusive, event, mean, max, min, stddev, ratio))
7788
7789
return mean, max, min, stddev, inclusive
7790
7791
7792
global filename
7793
global iterationPrefix
7794
global masterMeans
7795
- print "--------------- JPython test script start ------------"
7796
- print "doing cluster test"
7797
+ print("--------------- JPython test script start ------------")
7798
+ print("doing cluster test")
7799
# get the parameters
7800
getParameters()
7801
# load the data
7802
7803
extracted = extractor.processData().get(0)
7804
7805
extracted.setIgnoreWarnings(True)
7806
- print "Getting basic statistics..."
7807
+ print("Getting basic statistics...")
7808
statter = BasicStatisticsOperation(extracted)
7809
masterStats = statter.processData()
7810
masterMeans = masterStats.get(BasicStatisticsOperation.MEAN)
7811
7812
# split communication and computation
7813
- print "splitting communication and computation"
7814
+ print("splitting communication and computation")
7815
splitter = SplitCommunicationComputationOperation(extracted)
7816
outputs = splitter.processData()
7817
computation = outputs.get(SplitCommunicationComputationOperation.COMPUTATION)
7818
7819
#computation = result
7820
7821
# do some basic statistics first
7822
- print "doing stats"
7823
+ print("doing stats")
7824
stats = BasicStatisticsOperation(computation)
7825
means = stats.processData().get(BasicStatisticsOperation.MEAN)
7826
7827
# then, using the stats, find the top X event names
7828
- print "getting top X events"
7829
+ print("getting top X events")
7830
reducer = TopXEvents(means, metric, type, 10)
7831
reduced = reducer.processData().get(0)
7832
7833
# then, extract those events from the actual data
7834
- print "extracting events"
7835
+ print("extracting events")
7836
tmpEvents = ArrayList(reduced.getEvents())
7837
reducer = ExtractEventOperation(computation, tmpEvents)
7838
reduced = reducer.processData().get(0)
7839
7840
# cluster the data
7841
- print "clustering data"
7842
+ print("clustering data")
7843
clusterer = DBSCANOperation(reduced, metric, type, 1.0)
7844
clusterResult = clusterer.processData()
7845
k = str(clusterResult.get(0).getThreads().size())
7846
clusters = ArrayList()
7847
- print "Estimated value for k:", k
7848
+ print("Estimated value for k:", k)
7849
if k > 0:
7850
clusterIDs = clusterResult.get(4)
7851
7852
# split the trial into the clusters
7853
- print "splitting clusters into", k, "trials"
7854
+ print("splitting clusters into", k, "trials")
7855
splitter = SplitTrialClusters(result, clusterResult)
7856
splitter.setIncludeNoisePoints(True)
7857
clusters = splitter.processData()
7858
7859
clusters.put(result)
7860
7861
clusterID = -1
7862
- print "\nCluster\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
7863
+ print("\nCluster\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
7864
clusterID = doLoadImbalance(result, clusterID)
7865
7866
for trial in clusters:
7867
7868
#masterStats = statter.processData()
7869
#masterMeans = masterStats.get(BasicStatisticsOperation.MEAN)
7870
7871
- print "\n\nSplitting phases in cluster", clusterID
7872
+ print("\n\nSplitting phases in cluster", clusterID)
7873
splitter = SplitTrialPhasesOperation(trial, iterationPrefix)
7874
phases = splitter.processData()
7875
#print phases.size()
7876
7877
totalStddev = 0.0
7878
totalRatio = 0.0
7879
7880
- print "LoopID\t\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
7881
+ print("LoopID\t\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
7882
for phase in phases:
7883
#print "main event:", phase.getMainEvent()
7884
#for event in phase.getEvents():
7885
7886
7887
#event = LoadImbalanceOperation.KERNEL_COMPUTATION
7888
event = LoadImbalanceOperation.COMPUTATION
7889
- print "%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t" % ("Average", trial.getThreads().size(), avgInclusive, event, avgMean, avgMax, avgMin, avgStddev, avgRatio)
7890
+ print("%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t" % ("Average", trial.getThreads().size(), avgInclusive, event, avgMean, avgMax, avgMin, avgStddev, avgRatio))
7891
clusterID = clusterID + 1
7892
7893
- print "---------------- JPython test script end -------------"
7894
+ print("---------------- JPython test script end -------------")
7895
7896
if __name__ == "__main__":
7897
main()
7898
--- a/tools/src/perfexplorer/examples/ClusterAndMicroLoadImbalance/newClusterTestActual.py (original)
7899
--- b/tools/src/perfexplorer/examples/ClusterAndMicroLoadImbalance/newClusterTestActual.py (refactored)
7900
7901
tmp = parameterMap.get("tauData")
7902
if tmp != None:
7903
tauData = tmp
7904
- print "Performance data: " + tauData
7905
- else:
7906
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
7907
+ print("Performance data: " + tauData)
7908
+ else:
7909
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
7910
7911
tmp = parameterMap.get("prefix")
7912
if tmp != None:
7913
iterationPrefix = tmp
7914
- print "Iteration Prefix: " + iterationPrefix
7915
- else:
7916
- print "Iteration Prefix not specified... using", iterationPrefix
7917
+ print("Iteration Prefix: " + iterationPrefix)
7918
+ else:
7919
+ print("Iteration Prefix not specified... using", iterationPrefix)
7920
7921
def loadFile(fileName):
7922
global gprof
7923
7924
return 0, 0, 0, 0, 0, 0
7925
if callpath:
7926
if numPhases < 100:
7927
- print "%s\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (trunc(mainEvent, max_pos=15), threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio)
7928
+ print("%s\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (trunc(mainEvent, max_pos=15), threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio))
7929
#print "%s\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, 100, 100, 100, 100)
7930
else:
7931
- print "%d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio)
7932
+ print("%d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio))
7933
7934
splits = loadBalance.get(LoadImbalanceOperation.COMPUTATION_SPLITS)
7935
for thread in splits.getThreads():
7936
7937
global vectorT_i
7938
global vectorT
7939
7940
- print "Getting basic statistics..."
7941
+ print("Getting basic statistics...")
7942
trial.setIgnoreWarnings(True)
7943
statter = BasicStatisticsOperation(trial)
7944
masterStats = statter.processData()
7945
7946
#print "Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
7947
#computeLoadBalance(trial, False, 1)
7948
7949
- print
7950
+ print()
7951
7952
splitter = SplitTrialPhasesOperation(trial, iterationPrefix)
7953
phases = splitter.processData()
7954
7955
totalStddev = 0.0
7956
totalRatio = 0.0
7957
7958
- print "LoopID\t\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t CommEff AVG/MAX"
7959
- print "------------------------------------------------------------------------------------------------"
7960
+ print("LoopID\t\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t CommEff AVG/MAX")
7961
+ print("------------------------------------------------------------------------------------------------")
7962
for phase in phases:
7963
mean, max, min, stddev, inclusive, commEff = computeLoadBalance(phase, True, phases.size())
7964
if mean == max == min == stddev == 0:
7965
7966
7967
event = LoadImbalanceOperation.COMPUTATION
7968
#print "%s\t\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % ("Average", trial.getThreads().size(), event, avgMean*100, avgMax*100, avgMin*100, avgStddev*100, avgRatio*100)
7969
- print "------------------------------------------------------------------------------------------------"
7970
- print "%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f" % ("Totals", trial.getThreads().size(), totalInclusive, event, totalMean, totalMax, totalMin, math.sqrt(totalStddev), totalCommEff, totalMean / totalMax)
7971
- print "%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % ("Average", trial.getThreads().size(), avgInclusive, event, avgMean, avgMax, avgMin, avgStddev, avgCommEff, avgRatio)
7972
+ print("------------------------------------------------------------------------------------------------")
7973
+ print("%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f" % ("Totals", trial.getThreads().size(), totalInclusive, event, totalMean, totalMax, totalMin, math.sqrt(totalStddev), totalCommEff, totalMean / totalMax))
7974
+ print("%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % ("Average", trial.getThreads().size(), avgInclusive, event, avgMean, avgMax, avgMin, avgStddev, avgCommEff, avgRatio))
7975
7976
# the total time spent in the loop. Essentially, for each
7977
# iteration of the loop, get the total time for each process. Accumulate
7978
# that vector over the whole loop. The process with the longest time spent
7979
# computing (aggregated over all iterations) is the T.
7980
- print "\nT:\t\t", T
7981
+ print("\nT:\t\t", T)
7982
# the total time spent computing, collapsed. Essentially, for each
7983
# iteration of the loop, get the computing time for each process. Accumulate
7984
# that vector over the whole loop. The process with the longest time spent
7985
# computing (aggregated over all iterations) is the max(T_i).
7986
- print "max(T_i):\t", maxT_i
7987
- print "avg(T_i):\t", avgT_i
7988
- print "maxEff:\t\t", maxEff
7989
- print "CommEff:\t", commEff, "(should be same as maxEff)"
7990
+ print("max(T_i):\t", maxT_i)
7991
+ print("avg(T_i):\t", avgT_i)
7992
+ print("maxEff:\t\t", maxEff)
7993
+ print("CommEff:\t", commEff, "(should be same as maxEff)")
7994
# the load balance for the loop. This is the sum of all efficiencies for
7995
# all processes, divided by the number of processes times the maxiumum
7996
# efficiency. This can be (and is) simplified, by summing the mean
7997
# computing times, and dividing by the max computing times.
7998
- print "avgEff:\t\t", avgEff
7999
- print "LB:\t\t", LB
8000
+ print("avgEff:\t\t", avgEff)
8001
+ print("LB:\t\t", LB)
8002
8003
# the total time spent computing in the loop, serialized. Essentially, for each
8004
# iteration of the loop, get the max computing time in that loop. Add
8005
# those together. Because of overlapping iterations, this can be larger
8006
# than the actual time in the loop. If there were
8007
# no time spent in communication, this is how long the loop should take.
8008
- print "T ideal:\t", totalMax
8009
+ print("T ideal:\t", totalMax)
8010
# the micro load balance is the process with the highest computation time
8011
# divided by the ideal total loop execution time.
8012
- print "microLB:\t", maxT_i / totalMax
8013
+ print("microLB:\t", maxT_i / totalMax)
8014
# the transfer term is the total time spent in the ideal loop divided by
8015
# the actual time spent in the loop.
8016
- print "Transfer:\t", totalMax / T
8017
+ print("Transfer:\t", totalMax / T)
8018
# finally, compute the efficiency. == LB * microLB * Transfer * IPC
8019
- print "n:\t\t", LB * (maxT_i / totalMax) * (totalMax / T) * 1.0, "\n"
8020
+ print("n:\t\t", LB * (maxT_i / totalMax) * (totalMax / T) * 1.0, "\n")
8021
8022
8023
def main():
8024
8025
global vectorT_i
8026
global vectorT
8027
8028
- print "--------------- JPython test script start ------------"
8029
- print "--- Looking for load imbalances --- "
8030
+ print("--------------- JPython test script start ------------")
8031
+ print("--- Looking for load imbalances --- ")
8032
8033
# get the parameters
8034
getParameters()
8035
8036
extracted.setIgnoreWarnings(True)
8037
8038
# split communication and computation
8039
- print "splitting communication and computation"
8040
+ print("splitting communication and computation")
8041
splitter = SplitCommunicationComputationOperation(extracted)
8042
outputs = splitter.processData()
8043
computation = outputs.get(SplitCommunicationComputationOperation.COMPUTATION)
8044
8045
# do some basic statistics first
8046
- print "doing stats"
8047
+ print("doing stats")
8048
simplestats = BasicStatisticsOperation(computation)
8049
simplemeans = simplestats.processData().get(BasicStatisticsOperation.MEAN)
8050
8051
# get top 10 events
8052
- print "getting top X events"
8053
+ print("getting top X events")
8054
reducer = TopXEvents(simplemeans, metric, type, 10)
8055
reduced = reducer.processData().get(0)
8056
- print "extracting events"
8057
+ print("extracting events")
8058
tmpEvents = ArrayList(reduced.getEvents())
8059
reducer = ExtractEventOperation(computation, tmpEvents)
8060
reduced = reducer.processData().get(0)
8061
8062
# cluster
8063
- print "clustering data"
8064
+ print("clustering data")
8065
clusterer = DBSCANOperation(reduced, metric, type, 1.0)
8066
clusterResult = clusterer.processData()
8067
k = str(clusterResult.get(0).getThreads().size())
8068
clusters = ArrayList()
8069
- print "Estimated value for k:", k
8070
+ print("Estimated value for k:", k)
8071
if k > 0:
8072
clusterIDs = clusterResult.get(4)
8073
8074
# split the trial into the clusters
8075
- print "splitting clusters into", k, "trials"
8076
+ print("splitting clusters into", k, "trials")
8077
splitter = SplitTrialClusters(result, clusterResult)
8078
splitter.setIncludeNoisePoints(True)
8079
clusters = splitter.processData()
8080
8081
for trial in clusters:
8082
processCluster(trial, result)
8083
8084
- print "---------------- JPython test script end -------------"
8085
+ print("---------------- JPython test script end -------------")
8086
8087
if __name__ == "__main__":
8088
main()
8089
--- a/tools/src/perfexplorer/examples/ClusterAndTopX/clusterTest.py (original)
8090
--- b/tools/src/perfexplorer/examples/ClusterAndTopX/clusterTest.py (refactored)
8091
8092
tmp = parameterMap.get("tauData")
8093
if tmp != None:
8094
tauData = tmp
8095
- print "Performance data: " + tauData
8096
+ print("Performance data: " + tauData)
8097
else:
8098
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
8099
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
8100
tmp = parameterMap.get("threshold")
8101
if tmp != None:
8102
threshold = float(tmp)
8103
8104
if tmp != None:
8105
callsCutoff = float(tmp)
8106
8107
- print "Max Functions Threshold:\t", threshold
8108
- print "Max Calls Threshold:\t", callsCutoff
8109
+ print("Max Functions Threshold:\t", threshold)
8110
+ print("Max Calls Threshold:\t", callsCutoff)
8111
8112
def loadFile(fileName):
8113
# load the trial
8114
8115
global tauData
8116
global threshold
8117
global callsCutoff
8118
- print "--------------- JPython test script start ------------"
8119
- print "doing cluster test"
8120
+ print("--------------- JPython test script start ------------")
8121
+ print("doing cluster test")
8122
# get the parameters
8123
getParameters()
8124
# load the data
8125
8126
# cluster the data
8127
clusterer = DBSCANOperation(reduced, metric, type, 1.0)
8128
clusterResult = clusterer.processData()
8129
- print "Estimated value for k:", str(clusterResult.get(0).getThreads().size())
8130
+ print("Estimated value for k:", str(clusterResult.get(0).getThreads().size()))
8131
clusterIDs = clusterResult.get(4)
8132
8133
# split the trial into the clusters
8134
8135
for input in clusters:
8136
8137
# extract the non-callpath data
8138
- print "Extracting non-callpath data..."
8139
+ print("Extracting non-callpath data...")
8140
input.setIgnoreWarnings(True)
8141
extractor = ExtractNonCallpathEventOperation(input)
8142
extracted = extractor.processData().get(0)
8143
8144
# extract computation code (remove MPI)
8145
myEvents = ArrayList()
8146
- print "Filtering out MPI calls..."
8147
+ print("Filtering out MPI calls...")
8148
#print "And functions called less than 1000 times..."
8149
for event in extracted.getEvents():
8150
if not event.startswith("MPI_"):
8151
8152
extracted = extractor.processData().get(0)
8153
8154
# generate statistics
8155
- print "Generating stats..."
8156
+ print("Generating stats...")
8157
doStats = BasicStatisticsOperation(extracted, False)
8158
mean = doStats.processData().get(BasicStatisticsOperation.MEAN)
8159
8160
for type in AbstractResult.EXCLUSIVE, AbstractResult.INCLUSIVE:
8161
8162
# get the top X events
8163
- print "Extracting top events..."
8164
+ print("Extracting top events...")
8165
mean.setIgnoreWarnings(True)
8166
topper = TopXEvents(mean, mean.getTimeMetric(), type, threshold)
8167
topped = topper.processData().get(0)
8168
8169
calls = topped.getCalls(0,event)
8170
if calls < callsCutoff:
8171
if calls == 0.0:
8172
- print "%00.2f%%\t %d\t %0.5f%%\t %s" % (percentage, calls, 0.0, shortEvent)
8173
+ print("%00.2f%%\t %d\t %0.5f%%\t %s" % (percentage, calls, 0.0, shortEvent))
8174
else:
8175
- print "%00.2f%%\t %d\t %0.5f%%\t %s" % (percentage, calls, percentage / float(calls), shortEvent)
8176
+ print("%00.2f%%\t %d\t %0.5f%%\t %s" % (percentage, calls, percentage / float(calls), shortEvent))
8177
8178
functionSet.add(shortEvent)
8179
myFile = open(functions, 'w')
8180
8181
myFile.write(shortEvent + "\n")
8182
myFile.close()
8183
8184
- print "---------------- JPython test script end -------------"
8185
+ print("---------------- JPython test script end -------------")
8186
8187
if __name__ == "__main__":
8188
main()
8189
--- a/tools/src/perfexplorer/examples/Correlation/autotuning.py (original)
8190
--- b/tools/src/perfexplorer/examples/Correlation/autotuning.py (refactored)
8191
8192
grapher.processData()
8193
8194
def main():
8195
- print "--------------- JPython test script start ------------"
8196
+ print("--------------- JPython test script start ------------")
8197
# load the data
8198
global tauData
8199
global prefix
8200
8201
8202
# draw the graph
8203
drawGraph(extracted)
8204
- print "---------------- JPython test script end -------------"
8205
+ print("---------------- JPython test script end -------------")
8206
8207
if __name__ == "__main__":
8208
main()
8209
--- a/tools/src/perfexplorer/examples/Correlation/phases.py (original)
8210
--- b/tools/src/perfexplorer/examples/Correlation/phases.py (refactored)
8211
8212
tmp = parameterMap.get("tauData")
8213
if tmp != None:
8214
tauData = tmp
8215
- print "Performance data: " + tauData
8216
+ print("Performance data: " + tauData)
8217
else:
8218
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
8219
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
8220
8221
tmp = parameterMap.get("prefix")
8222
if tmp != None:
8223
prefix = tmp
8224
else:
8225
- print "Prefix not specified. Using default."
8226
- print "Prefix: " + prefix
8227
+ print("Prefix not specified. Using default.")
8228
+ print("Prefix: " + prefix)
8229
8230
8231
def loadFile(fileName):
8232
8233
return input
8234
8235
def main():
8236
- print "--------------- JPython test script start ------------"
8237
+ print("--------------- JPython test script start ------------")
8238
# load the data
8239
global tauData
8240
global prefix
8241
8242
8243
corr = CorrelationOperation(extracted)
8244
mycorr = corr.processData().get(0)
8245
- print mycorr.getCorrelation()
8246
+ print(mycorr.getCorrelation())
8247
8248
- print "---------------- JPython test script end -------------"
8249
+ print("---------------- JPython test script end -------------")
8250
8251
if __name__ == "__main__":
8252
main()
8253
--- a/tools/src/perfexplorer/examples/DBSCANOperation/clusterTest.py (original)
8254
--- b/tools/src/perfexplorer/examples/DBSCANOperation/clusterTest.py (refactored)
8255
8256
tmp = parameterMap.get("tauData")
8257
if tmp != None:
8258
tauData = tmp
8259
- print "Performance data: " + tauData
8260
+ print("Performance data: " + tauData)
8261
else:
8262
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
8263
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
8264
8265
def loadFile(fileName):
8266
# load the trial
8267
8268
8269
def main():
8270
global filename
8271
- print "--------------- JPython test script start ------------"
8272
- print "doing cluster test"
8273
+ print("--------------- JPython test script start ------------")
8274
+ print("doing cluster test")
8275
# get the parameters
8276
getParameters()
8277
# load the data
8278
8279
# cluster the data - on calls!
8280
clusterer = DBSCANOperation(reduced, metric, type, 1.0)
8281
clusterResult = clusterer.processData()
8282
- print "Estimated value for k:", str(clusterResult.get(0).getThreads().size())
8283
- print "---------------- JPython test script end -------------"
8284
+ print("Estimated value for k:", str(clusterResult.get(0).getThreads().size()))
8285
+ print("---------------- JPython test script end -------------")
8286
8287
if __name__ == "__main__":
8288
main()
8289
--- a/tools/src/perfexplorer/examples/DeleteTrials/super.py (original)
8290
--- b/tools/src/perfexplorer/examples/DeleteTrials/super.py (refactored)
8291
8292
continue
8293
8294
def main():
8295
- print "--------------- JPython test script start ------------"
8296
+ print("--------------- JPython test script start ------------")
8297
# load the data
8298
inputs = deleteFromDB()
8299
- print "---------------- JPython test script end -------------"
8300
+ print("---------------- JPython test script end -------------")
8301
8302
if __name__ == "__main__":
8303
main()
8304
--- a/tools/src/perfexplorer/examples/DrawGraph/graph.py (original)
8305
--- b/tools/src/perfexplorer/examples/DrawGraph/graph.py (refactored)
8306
8307
grapher.processData()
8308
8309
def main():
8310
- print "--------------- JPython test script start ------------"
8311
+ print("--------------- JPython test script start ------------")
8312
# load the data
8313
inputs = loadFromFiles()
8314
8315
8316
extracted = extractor.processData()
8317
8318
drawGraph(extracted)
8319
- print "---------------- JPython test script end -------------"
8320
+ print("---------------- JPython test script end -------------")
8321
8322
if __name__ == "__main__":
8323
main()
8324
--- a/tools/src/perfexplorer/examples/ExportData/export.py (original)
8325
--- b/tools/src/perfexplorer/examples/ExportData/export.py (refactored)
8326
8327
f.write(str(means.getExclusive(0,event,metric)))
8328
f.write('\"\n')
8329
f.close()
8330
- print "Data written to export.csv"
8331
+ print("Data written to export.csv")
8332
8333
def main():
8334
- print "--------------- JPython test script start ------------"
8335
+ print("--------------- JPython test script start ------------")
8336
# load the data
8337
inputs = loadFromFiles()
8338
8339
8340
extracted = extract(inputs)
8341
8342
export(extracted)
8343
- print "---------------- JPython test script end -------------"
8344
+ print("---------------- JPython test script end -------------")
8345
8346
if __name__ == "__main__":
8347
main()
8348
--- a/tools/src/perfexplorer/examples/ExtractChildrenOperation/topXexclusive.py (original)
8349
--- b/tools/src/perfexplorer/examples/ExtractChildrenOperation/topXexclusive.py (refactored)
8350
8351
tmp = parameterMap.get("tauData")
8352
if tmp != None:
8353
tauData = tmp
8354
- print "Performance data: " + tauData
8355
+ print("Performance data: " + tauData)
8356
else:
8357
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
8358
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
8359
tmp = parameterMap.get("fileName")
8360
if tmp != None:
8361
functions = tmp
8362
- print "Output filename: " + functions
8363
+ print("Output filename: " + functions)
8364
else:
8365
- print "Output filename not specified... using " + functions
8366
+ print("Output filename not specified... using " + functions)
8367
tmp = parameterMap.get("threshold")
8368
if tmp != None:
8369
threshold = int(tmp)
8370
- print "Threshold: " + str(threshold)
8371
+ print("Threshold: " + str(threshold))
8372
else:
8373
- print "Threshold not specified... using " + str(threshold)
8374
+ print("Threshold not specified... using " + str(threshold))
8375
8376
def loadFile(fileName):
8377
global gprof
8378
8379
global threshold
8380
global gprof
8381
8382
- print "--------------- JPython test script start ------------"
8383
+ print("--------------- JPython test script start ------------")
8384
8385
# get the parameters
8386
getParameters()
8387
8388
input = loadFile(tauData)
8389
8390
# extract the non-callpath data
8391
- print "Extracting non-callpath data..."
8392
+ print("Extracting non-callpath data...")
8393
input.setIgnoreWarnings(True)
8394
extractor = ExtractNonCallpathEventOperation(input)
8395
extracted = extractor.processData().get(0)
8396
8397
# extract computation code (remove MPI)
8398
myEvents = ArrayList()
8399
- print "Filtering out MPI calls..."
8400
+ print("Filtering out MPI calls...")
8401
for event in extracted.getEvents():
8402
if not event.startswith("MPI_"):
8403
myEvents.add(event)
8404
8405
extracted = extractor.processData().get(0)
8406
8407
# generate statistics
8408
- print "Generating stats..."
8409
+ print("Generating stats...")
8410
doStats = BasicStatisticsOperation(extracted, False)
8411
mean = doStats.processData().get(BasicStatisticsOperation.MEAN)
8412
8413
8414
8415
for type in [AbstractResult.EXCLUSIVE, AbstractResult.INCLUSIVE]:
8416
# get the top X events
8417
- print "Extracting top events..."
8418
+ print("Extracting top events...")
8419
mean.setIgnoreWarnings(True)
8420
topper = TopXEvents(mean, mean.getTimeMetric(), type, threshold)
8421
topped = topper.processData().get(0)
8422
8423
# fix TAU names
8424
else:
8425
shortEvent = Utilities.shortenEventName(event)
8426
- print "%00.2f%%\t %d\t %s" % (topped.getDataPoint(0,event,topped.getTimeMetric(), type) / mean.getInclusive(0,mean.getMainEvent(),mean.getTimeMetric()) * 100.0, mean.getCalls(0,event), shortEvent)
8427
+ print("%00.2f%%\t %d\t %s" % (topped.getDataPoint(0,event,topped.getTimeMetric(), type) / mean.getInclusive(0,mean.getMainEvent(),mean.getTimeMetric()) * 100.0, mean.getCalls(0,event), shortEvent))
8428
myFile.write(shortEvent + "\n")
8429
8430
myFile.close()
8431
8432
- print "---------------- JPython test script end -------------"
8433
+ print("---------------- JPython test script end -------------")
8434
8435
if __name__ == "__main__":
8436
main()
8437
--- a/tools/src/perfexplorer/examples/ExtractChildrenOperation/topXofParent.py (original)
8438
--- b/tools/src/perfexplorer/examples/ExtractChildrenOperation/topXofParent.py (refactored)
8439
8440
tmp = parameterMap.get("tauData")
8441
if tmp != None:
8442
tauData = tmp
8443
- print "Performance data: " + tauData
8444
+ print("Performance data: " + tauData)
8445
else:
8446
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
8447
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
8448
tmp = parameterMap.get("fileName")
8449
if tmp != None:
8450
functions = tmp
8451
- print "Output filename: " + functions
8452
+ print("Output filename: " + functions)
8453
else:
8454
- print "Output filename not specified... using " + functions
8455
+ print("Output filename not specified... using " + functions)
8456
tmp = parameterMap.get("threshold")
8457
if tmp != None:
8458
threshold = int(tmp)
8459
- print "Threshold: " + str(threshold)
8460
+ print("Threshold: " + str(threshold))
8461
else:
8462
- print "Threshold not specified... using " + str(threshold)
8463
+ print("Threshold not specified... using " + str(threshold))
8464
tmp = parameterMap.get("parent")
8465
if tmp != None:
8466
parent = tmp
8467
- print "Parent function: " + functions
8468
+ print("Parent function: " + functions)
8469
else:
8470
- print "Parent not specified... exiting. "
8471
+ print("Parent not specified... exiting. ")
8472
System.exit(1)
8473
8474
def loadFile(fileName):
8475
8476
global threshold
8477
global gprof
8478
8479
- print "--------------- JPython test script start ------------"
8480
+ print("--------------- JPython test script start ------------")
8481
8482
# get the parameters
8483
getParameters()
8484
8485
input = loadFile(tauData)
8486
8487
# extract the non-callpath data
8488
- print "Extracting children of", parent, "..."
8489
+ print("Extracting children of", parent, "...")
8490
input.setIgnoreWarnings(True)
8491
extractor = ExtractChildrenOperation(input, parent)
8492
extracted = extractor.processData().get(0)
8493
8494
# extract computation code (remove MPI)
8495
myEvents = ArrayList()
8496
- print "Filtering out MPI calls..."
8497
+ print("Filtering out MPI calls...")
8498
for event in extracted.getEvents():
8499
if not event.startswith("MPI_"):
8500
myEvents.add(event)
8501
8502
extracted = extractor.processData().get(0)
8503
8504
# generate statistics
8505
- print "Generating stats..."
8506
+ print("Generating stats...")
8507
doStats = BasicStatisticsOperation(extracted, False)
8508
mean = doStats.processData().get(BasicStatisticsOperation.MEAN)
8509
doStats = BasicStatisticsOperation(input, False)
8510
8511
meanTotal = fullMean.getInclusive(0,fullMean.getMainEvent(),fullMean.getTimeMetric())
8512
8513
# get the top X events
8514
- print "Extracting top events by INCLUSIVE value..."
8515
+ print("Extracting top events by INCLUSIVE value...")
8516
mean.setIgnoreWarnings(True)
8517
topper = TopXEvents(mean, mean.getTimeMetric(), AbstractResult.INCLUSIVE, threshold)
8518
topped = topper.processData().get(0)
8519
8520
# fix TAU names
8521
else:
8522
shortEvent = Utilities.shortenEventName(event)
8523
- print "%00.2f%%\t %s" % (topped.getInclusive(0,event,topped.getTimeMetric()) / meanTotal * 100.0, event)
8524
+ print("%00.2f%%\t %s" % (topped.getInclusive(0,event,topped.getTimeMetric()) / meanTotal * 100.0, event))
8525
myFile.write(shortEvent + "\n")
8526
myFile.close()
8527
8528
- print "---------------- JPython test script end -------------"
8529
+ print("---------------- JPython test script end -------------")
8530
8531
if __name__ == "__main__":
8532
main()
8533
--- a/tools/src/perfexplorer/examples/GetTrialsFromDatabase/graph.py (original)
8534
--- b/tools/src/perfexplorer/examples/GetTrialsFromDatabase/graph.py (refactored)
8535
8536
grapher.processData()
8537
8538
def main():
8539
- print "--------------- JPython test script start ------------"
8540
+ print("--------------- JPython test script start ------------")
8541
# load the data
8542
inputs = loadFromDB()
8543
8544
drawGraph(inputs)
8545
- print "---------------- JPython test script end -------------"
8546
+ print("---------------- JPython test script end -------------")
8547
8548
if __name__ == "__main__":
8549
main()
8550
--- a/tools/src/perfexplorer/examples/GetTrialsFromDatabase/graph2.py (original)
8551
--- b/tools/src/perfexplorer/examples/GetTrialsFromDatabase/graph2.py (refactored)
8552
8553
grapher.processData()
8554
8555
def main():
8556
- print "--------------- JPython test script start ------------"
8557
+ print("--------------- JPython test script start ------------")
8558
# load the data
8559
inputs = loadFromDB()
8560
8561
drawGraph(inputs)
8562
- print "---------------- JPython test script end -------------"
8563
+ print("---------------- JPython test script end -------------")
8564
8565
if __name__ == "__main__":
8566
main()
8567
--- a/tools/src/perfexplorer/examples/GetTrialsFromDatabase/super.py (original)
8568
--- b/tools/src/perfexplorer/examples/GetTrialsFromDatabase/super.py (refactored)
8569
8570
grapher.processData()
8571
8572
def main():
8573
- print "--------------- JPython test script start ------------"
8574
+ print("--------------- JPython test script start ------------")
8575
# load the data
8576
inputs = loadFromDB()
8577
8578
8579
"""
8580
8581
drawGraph(inputs)
8582
- print "---------------- JPython test script end -------------"
8583
+ print("---------------- JPython test script end -------------")
8584
8585
if __name__ == "__main__":
8586
main()
8587
--- a/tools/src/perfexplorer/examples/GetTrialsFromDatabase/super2.py (original)
8588
--- b/tools/src/perfexplorer/examples/GetTrialsFromDatabase/super2.py (refactored)
8589
8590
metrics = ArrayList()
8591
metrics.add("WALL_CLOCK_TIME")
8592
baseline = TrialMeanResult(trials.get(0), None, None, False)
8593
- print baseline.getOriginalThreads()
8594
+ print(baseline.getOriginalThreads())
8595
extractor = TopXEvents(baseline, metrics.get(0), AbstractResult.INCLUSIVE, 1)
8596
topx = extractor.processData().get(0)
8597
inputs = ArrayList()
8598
8599
grapher.processData()
8600
8601
def main():
8602
- print "--------------- JPython test script start ------------"
8603
+ print("--------------- JPython test script start ------------")
8604
# load the data
8605
inputs = loadFromDB()
8606
8607
drawGraph(inputs)
8608
- print "---------------- JPython test script end -------------"
8609
+ print("---------------- JPython test script end -------------")
8610
8611
if __name__ == "__main__":
8612
main()
8613
--- a/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/getTopX.py (original)
8614
--- b/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/getTopX.py (refactored)
8615
8616
# extract computation code (remove MPI)
8617
if filterMPI:
8618
myEvents = ArrayList()
8619
- print "Filtering out MPI calls..."
8620
+ print("Filtering out MPI calls...")
8621
for event in inTrial.getEvents():
8622
if not event.startswith("MPI_"):
8623
myEvents.add(event)
8624
8625
myEvents = ArrayList()
8626
8627
# get the top X events
8628
- print "Extracting top events..."
8629
+ print("Extracting top events...")
8630
extracted.setIgnoreWarnings(True)
8631
if metric is None:
8632
metric = extracted.getTimeMetric()
8633
8634
if tmp > 0:
8635
exclusivePercent = topped.getDataPoint(0,event,metric, timerType) / tmp * 100.0
8636
if (exclusivePercent > 1.0):
8637
- print "%00.2f%%\t %d\t %s" % (exclusivePercent, extracted.getCalls(0,event), shortEvent)
8638
+ print("%00.2f%%\t %d\t %s" % (exclusivePercent, extracted.getCalls(0,event), shortEvent))
8639
myEvents.add(event)
8640
return myEvents
8641
--- a/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/parsetrial.py (original)
8642
--- b/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/parsetrial.py (refactored)
8643
8644
Utilities.setSession("local")
8645
files = []
8646
files.append(inputData)
8647
- print "Parsing files:", files, "..."
8648
+ print("Parsing files:", files, "...")
8649
trial = DataSourceResult(DataSourceResult.PPK, files, False)
8650
trial.setIgnoreWarnings(True)
8651
- print "Computing stats..."
8652
+ print("Computing stats...")
8653
stats = BasicStatisticsOperation(trial)
8654
mean = stats.processData().get(BasicStatisticsOperation.MEAN)
8655
extractor = ExtractNonCallpathEventOperation(mean)
8656
--- a/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/process.py (original)
8657
--- b/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/process.py (refactored)
8658
8659
inTrial.setIgnoreWarnings(True)
8660
8661
mainEvent = inTrial.getMainEvent()
8662
- print "Main Event:", mainEvent
8663
+ print("Main Event:", mainEvent)
8664
8665
#metadata = TrialThreadMetadata(inTrial)
8666
#for name in metadata.getFields():
8667
# print name, ":", metadata.getNameValue(0,name)
8668
8669
# cpuMHz = metadata.getNameValue(0,"CPU MHz")
8670
- print "Using CPU MHz", cpuMHz
8671
+ print("Using CPU MHz", cpuMHz)
8672
8673
for metric in inTrial.getMetrics():
8674
- print "Found Metric:", metric
8675
+ print("Found Metric:", metric)
8676
8677
return inTrial
8678
8679
8680
global cpuMHz
8681
global mainEvent
8682
# derive the Cycles
8683
- print "Computing Cycles from TIME..."
8684
+ print("Computing Cycles from TIME...")
8685
deriver = ScaleMetricOperation(inTrial, cpuMHz, "TIME", DeriveMetricOperation.MULTIPLY)
8686
deriver.setNewName("PAPI_TOT_CYC")
8687
tmp = deriver.processData();
8688
8689
merger = MergeTrialsOperation(tmp)
8690
merged = merger.processData().get(0)
8691
8692
- print "Computing Cycles per Instruction..."
8693
+ print("Computing Cycles per Instruction...")
8694
deriver = DeriveMetricOperation(merged, "PAPI_TOT_CYC", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8695
deriver.setNewName("TOT_CYC/TOT_INS")
8696
tmp = deriver.processData()
8697
8698
- print "Computing FLOPs per Instruction..."
8699
+ print("Computing FLOPs per Instruction...")
8700
deriver = DeriveMetricOperation(merged, "PAPI_FP_INS", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8701
deriver.setNewName("FP_INS/TOT_INS")
8702
tmp2 = deriver.processData().get(0)
8703
8704
deriver.setNewName("% FP_INS ")
8705
tmp.add(deriver.processData().get(0))
8706
8707
- print "Computing Branch Mispredictions per Instruction..."
8708
+ print("Computing Branch Mispredictions per Instruction...")
8709
deriver = DeriveMetricOperation(merged, "PAPI_BR_MSP", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8710
deriver.setNewName("BR_MSP/TOT_INS")
8711
tmp.add(deriver.processData().get(0))
8712
8713
- print "Computing Cache Misses per Instruction..."
8714
+ print("Computing Cache Misses per Instruction...")
8715
deriver = DeriveMetricOperation(merged, "PAPI_" + cacheMissMetric, "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8716
deriver.setNewName(cacheMissMetric + "/TOT_INS")
8717
tmp.add(deriver.processData().get(0))
8718
8719
merged = merger.processData().get(0)
8720
8721
for metric in merged.getMetrics():
8722
- print "Found Metric:", metric, merged.getInclusive(0,mainEvent,metric)
8723
+ print("Found Metric:", metric, merged.getInclusive(0,mainEvent,metric))
8724
8725
return merged
8726
8727
8728
global mainEvent
8729
for metric in derived.getMetrics():
8730
mainValue = derived.getInclusive(0,mainEvent,metric)
8731
- print "\n===", metric, "( ", mainValue, ") ==="
8732
+ print("\n===", metric, "( ", mainValue, ") ===")
8733
for event in events:
8734
shortName = Utilities.shortenEventName(event)
8735
eventValue = derived.getExclusive(0,event,metric)
8736
tmp = 1.0
8737
if mainValue > 0:
8738
tmp = eventValue/mainValue
8739
- print "%s\t%0.3f\t%00.2f%%" % (shortName, eventValue, (tmp * 100.0))
8740
+ print("%s\t%0.3f\t%00.2f%%" % (shortName, eventValue, (tmp * 100.0)))
8741
8742
#########################################################################################
8743
8744
8745
fact = FactWrapper("Overall", "CPI Stack", cpiStack)
8746
handle = ruleHarness.assertObject(fact)
8747
fact.setFactHandle(handle)
8748
- print verbose
8749
+ print(verbose)
8750
if verbose == "yes":
8751
factDebug = FactWrapper("Dump CPI", "CPI Stack", cpiStack)
8752
handleDebug = ruleHarness.assertObject(factDebug)
8753
8754
global mainEvent
8755
index = 1
8756
for event in events:
8757
- print index, Utilities.shortenEventName(event)
8758
+ print(index, Utilities.shortenEventName(event))
8759
index = index + 1
8760
- print ""
8761
+ print("")
8762
8763
index = 1
8764
- print "Timer",
8765
+ print("Timer", end=' ')
8766
for metric in raw.getMetrics():
8767
- print "\t", metric,
8768
- print ""
8769
+ print("\t", metric, end=' ')
8770
+ print("")
8771
for event in events:
8772
- print index, "\t",
8773
+ print(index, "\t", end=' ')
8774
for metric in raw.getMetrics():
8775
value = raw.getExclusive(0,event,metric) / raw.getInclusive(0,mainEvent,metric)
8776
if value < 0.1:
8777
- print " %0.2f %%\t\t" % ((value * 100.0)),
8778
+ print(" %0.2f %%\t\t" % ((value * 100.0)), end=' ')
8779
else:
8780
- print "%0.2f %%\t\t" % ((value * 100.0)),
8781
+ print("%0.2f %%\t\t" % ((value * 100.0)), end=' ')
8782
index = index + 1
8783
- print ""
8784
- print ""
8785
-
8786
- print "Timer",
8787
+ print("")
8788
+ print("")
8789
+
8790
+ print("Timer", end=' ')
8791
for metric in derived.getMetrics():
8792
- print "\t", metric,
8793
- print ""
8794
+ print("\t", metric, end=' ')
8795
+ print("")
8796
8797
index = 1
8798
for event in events:
8799
- print index, "\t",
8800
+ print(index, "\t", end=' ')
8801
for metric in derived.getMetrics():
8802
value = derived.getExclusive(0,event,metric)
8803
if "%" in metric:
8804
if value < 10.0:
8805
- print " %0.2f %%\t\t" % (value),
8806
+ print(" %0.2f %%\t\t" % (value), end=' ')
8807
else:
8808
- print "%0.2f %%\t\t" % (value),
8809
+ print("%0.2f %%\t\t" % (value), end=' ')
8810
else:
8811
- print "%0.5f\t\t" % (value),
8812
+ print("%0.5f\t\t" % (value), end=' ')
8813
index = index + 1
8814
- print ""
8815
-
8816
- print "Avg.\t",
8817
+ print("")
8818
+
8819
+ print("Avg.\t", end=' ')
8820
for metric in derived.getMetrics():
8821
value = derived.getInclusive(0,mainEvent,metric)
8822
if "%" in metric:
8823
if value < 10.0:
8824
- print " %0.2f %%\t\t" % (value),
8825
+ print(" %0.2f %%\t\t" % (value), end=' ')
8826
else:
8827
- print "%0.2f %%\t\t" % (value),
8828
+ print("%0.2f %%\t\t" % (value), end=' ')
8829
else:
8830
- print "%0.5f\t\t" % (value),
8831
- print "\n"
8832
+ print("%0.5f\t\t" % (value), end=' ')
8833
+ print("\n")
8834
8835
#########################################################################################
8836
8837
def main(argv):
8838
- print "--------------- JPython test script start ------------"
8839
+ print("--------------- JPython test script start ------------")
8840
getParameters()
8841
global fractionThreshold
8842
inTrial = initialize(inputData)
8843
8844
derived = computeDerivedMetrics(inTrial)
8845
dump(inTrial, derived, topXevents)
8846
checkRatios(derived, topXevents)
8847
- print "\n--- Examining Time Top Events ---"
8848
+ print("\n--- Examining Time Top Events ---")
8849
topXevents = getTopX.getTopX(inTrial, 10, AbstractResult.EXCLUSIVE, "TIME", False)
8850
- print "\n--- Examining", cacheMissMetric, "Top Events ---"
8851
+ print("\n--- Examining", cacheMissMetric, "Top Events ---")
8852
topXevents = getTopX.getTopX(inTrial, 10, AbstractResult.EXCLUSIVE, "PAPI_" + cacheMissMetric)
8853
- print "\n--- Examining BR MSP Top Events ---"
8854
+ print("\n--- Examining BR MSP Top Events ---")
8855
topXevents = getTopX.getTopX(inTrial, 10, AbstractResult.EXCLUSIVE, "PAPI_BR_MSP")
8856
- print "\n--- Examining FP INS Top Events ---"
8857
+ print("\n--- Examining FP INS Top Events ---")
8858
topXevents = getTopX.getTopX(inTrial, 10, AbstractResult.EXCLUSIVE, "PAPI_FP_INS")
8859
- print "\n---------------- JPython test script end -------------"
8860
+ print("\n---------------- JPython test script end -------------")
8861
8862
#########################################################################################
8863
8864
--- a/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/samples.py (original)
8865
--- b/tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/samples.py (refactored)
8866
8867
def preProcessSamples(inTrial):
8868
# keep the main timer
8869
mainEvent = inTrial.getMainEvent()
8870
- print "Pre-processing Samples..."
8871
+ print("Pre-processing Samples...")
8872
haveSamples = False
8873
aggregators = ArrayList()
8874
newEvents = DefaultResult(inTrial)
8875
--- a/tools/src/perfexplorer/examples/HeatMapSamples/heatmap.py (original)
8876
--- b/tools/src/perfexplorer/examples/HeatMapSamples/heatmap.py (refactored)
8877
8878
Utilities.setSession("local")
8879
files = []
8880
files.append(inputData)
8881
- print "Parsing files:", files, "..."
8882
+ print("Parsing files:", files, "...")
8883
trial = DataSourceResult(DataSourceResult.PPK, files, False)
8884
mainEvent = trial.getMainEvent()
8885
trial.setIgnoreWarnings(True)
8886
8887
# extract computation code (remove MPI)
8888
if filterMPI:
8889
myEvents = ArrayList()
8890
- print "Filtering out MPI calls..."
8891
+ print("Filtering out MPI calls...")
8892
for event in inTrial.getEvents():
8893
if not event.startswith("MPI_"):
8894
myEvents.add(event)
8895
8896
myEvents = ArrayList()
8897
8898
# get the top X events
8899
- print "Extracting top events..."
8900
+ print("Extracting top events...")
8901
extracted.setIgnoreWarnings(True)
8902
if metric is None:
8903
metric = extracted.getTimeMetric()
8904
8905
shortEvent = Utilities.shortenEventName(event)
8906
exclusivePercent = topped.getDataPoint(0,event,metric, timerType) / extracted.getInclusive(0,extracted.getMainEvent(),metric) * 100.0
8907
if (exclusivePercent > 1.0):
8908
- print "%00.2f%%\t %d\t %s" % (exclusivePercent, extracted.getCalls(0,event), shortEvent)
8909
+ print("%00.2f%%\t %d\t %s" % (exclusivePercent, extracted.getCalls(0,event), shortEvent))
8910
myEvents.add(event)
8911
return myEvents
8912
#########################################################################################
8913
8914
parameterMap = PerfExplorerModel.getModel().getScriptParameters()
8915
keys = parameterMap.keySet()
8916
for key in keys:
8917
- print key, parameterMap.get(key)
8918
+ print(key, parameterMap.get(key))
8919
inputData = parameterMap.get("inputData")
8920
8921
#########################################################################################
8922
8923
def initialize(inputData):
8924
global cpuMHz
8925
global mainEvent
8926
- print "Parsing: ", inputData
8927
+ print("Parsing: ", inputData)
8928
inTrial = parsetrial(inputData)
8929
inTrial.setIgnoreWarnings(True)
8930
8931
#mainEvent = inTrial.getMainEvent()
8932
- print "Main Event:", mainEvent
8933
+ print("Main Event:", mainEvent)
8934
8935
for metric in inTrial.getMetrics():
8936
- print "Found Metric:", metric
8937
+ print("Found Metric:", metric)
8938
8939
return inTrial
8940
8941
8942
global cpuMHz
8943
global mainEvent
8944
# derive the Cycles
8945
- print "Computing Cycles from TIME..."
8946
+ print("Computing Cycles from TIME...")
8947
deriver = ScaleMetricOperation(inTrial, cpuMHz, "TIME", DeriveMetricOperation.MULTIPLY)
8948
deriver.setNewName("PAPI_TOT_CYC")
8949
tmp = deriver.processData();
8950
8951
merger = MergeTrialsOperation(tmp)
8952
merged = merger.processData().get(0)
8953
8954
- print "Computing Cycles per Instruction..."
8955
+ print("Computing Cycles per Instruction...")
8956
deriver = DeriveMetricOperation(merged, "PAPI_TOT_CYC", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8957
deriver.setNewName("TOT_CYC/TOT_INS")
8958
tmp = deriver.processData()
8959
8960
- print "Computing FLOPs per Instruction..."
8961
+ print("Computing FLOPs per Instruction...")
8962
deriver = DeriveMetricOperation(merged, "PAPI_FP_INS", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8963
deriver.setNewName("FP_INS/TOT_INS")
8964
tmp2 = deriver.processData().get(0)
8965
8966
#deriver.setNewName("BR_MSP/TOT_INS")
8967
#tmp.add(deriver.processData().get(0))
8968
8969
- print "Computing Cache Misses per Instruction..."
8970
+ print("Computing Cache Misses per Instruction...")
8971
deriver = DeriveMetricOperation(merged, "PAPI_L1_TCM", "PAPI_TOT_INS", DeriveMetricOperation.DIVIDE)
8972
deriver.setNewName("L1_TCM/TOT_INS")
8973
tmp.add(deriver.processData().get(0))
8974
8975
merged = merger.processData().get(0)
8976
8977
for metric in merged.getMetrics():
8978
- print "Found Metric:", metric, merged.getInclusive(0,mainEvent,metric)
8979
+ print("Found Metric:", metric, merged.getInclusive(0,mainEvent,metric))
8980
8981
return merged
8982
8983
8984
global mainEvent
8985
for metric in derived.getMetrics():
8986
mainValue = derived.getInclusive(0,mainEvent,metric)
8987
- print "===", metric, "( ", mainValue, ") ==="
8988
+ print("===", metric, "( ", mainValue, ") ===")
8989
for event in events:
8990
shortName = Utilities.shortenEventName(event)
8991
eventValue = derived.getExclusive(0,event,metric)
8992
- print "%s\t%0.3f\t%00.2f%%" % (shortName, eventValue, ((eventValue / mainValue) * 100.0))
8993
+ print("%s\t%0.3f\t%00.2f%%" % (shortName, eventValue, ((eventValue / mainValue) * 100.0)))
8994
8995
#########################################################################################
8996
8997
8998
global mainEvent
8999
index = 1
9000
for event in events:
9001
- print index, Utilities.shortenEventName(event)
9002
+ print(index, Utilities.shortenEventName(event))
9003
index = index + 1
9004
- print ""
9005
+ print("")
9006
9007
index = 1
9008
- print "Timer",
9009
+ print("Timer", end=' ')
9010
for metric in raw.getMetrics():
9011
- print "\t", metric,
9012
- print ""
9013
+ print("\t", metric, end=' ')
9014
+ print("")
9015
for event in events:
9016
- print index, "\t",
9017
+ print(index, "\t", end=' ')
9018
for metric in raw.getMetrics():
9019
value = raw.getExclusive(0,event,metric) / raw.getInclusive(0,mainEvent,metric)
9020
if value < 0.1:
9021
- print " %0.2f %%\t\t" % ((value * 100.0)),
9022
+ print(" %0.2f %%\t\t" % ((value * 100.0)), end=' ')
9023
else:
9024
- print "%0.2f %%\t\t" % ((value * 100.0)),
9025
+ print("%0.2f %%\t\t" % ((value * 100.0)), end=' ')
9026
index = index + 1
9027
- print ""
9028
- print ""
9029
-
9030
- print "Timer",
9031
+ print("")
9032
+ print("")
9033
+
9034
+ print("Timer", end=' ')
9035
for metric in derived.getMetrics():
9036
- print "\t", metric,
9037
- print ""
9038
+ print("\t", metric, end=' ')
9039
+ print("")
9040
9041
index = 1
9042
for event in events:
9043
- print index, "\t",
9044
+ print(index, "\t", end=' ')
9045
for metric in derived.getMetrics():
9046
value = derived.getExclusive(0,event,metric)
9047
if "%" in metric:
9048
if value < 10.0:
9049
- print " %0.2f %%\t\t" % (value),
9050
+ print(" %0.2f %%\t\t" % (value), end=' ')
9051
else:
9052
- print "%0.2f %%\t\t" % (value),
9053
+ print("%0.2f %%\t\t" % (value), end=' ')
9054
else:
9055
- print "%0.5f\t\t" % (value),
9056
+ print("%0.5f\t\t" % (value), end=' ')
9057
index = index + 1
9058
- print ""
9059
-
9060
- print "Avg.\t",
9061
+ print("")
9062
+
9063
+ print("Avg.\t", end=' ')
9064
for metric in derived.getMetrics():
9065
value = derived.getInclusive(0,mainEvent,metric)
9066
if "%" in metric:
9067
if value < 10.0:
9068
- print " %0.2f %%\t\t" % (value),
9069
+ print(" %0.2f %%\t\t" % (value), end=' ')
9070
else:
9071
- print "%0.2f %%\t\t" % (value),
9072
+ print("%0.2f %%\t\t" % (value), end=' ')
9073
else:
9074
- print "%0.5f\t\t" % (value),
9075
- print "\n"
9076
+ print("%0.5f\t\t" % (value), end=' ')
9077
+ print("\n")
9078
9079
#########################################################################################
9080
9081
def main(argv):
9082
- print "--------------- JPython test script start ------------"
9083
+ print("--------------- JPython test script start ------------")
9084
getParameters()
9085
global fractionThreshold
9086
global inputData
9087
inTrial = initialize(inputData)
9088
- print "Making basic stats..."
9089
+ print("Making basic stats...")
9090
statmaker = BasicStatisticsOperation(inTrial)
9091
stats = statmaker.processData().get(BasicStatisticsOperation.MEAN)
9092
- print "Extracting Flat Profile..."
9093
+ print("Extracting Flat Profile...")
9094
extractor = ExtractNonCallpathEventOperation(stats)
9095
flat = extractor.processData().get(0)
9096
- print "Extracting Callpath Profile..."
9097
+ print("Extracting Callpath Profile...")
9098
extractor = ExtractCallpathEventOperation(stats)
9099
callpath = extractor.processData().get(0)
9100
- print "Finding CONTEXT events..."
9101
+ print("Finding CONTEXT events...")
9102
contexts = ArrayList()
9103
samples = ArrayList()
9104
for event in flat.getEvents():
9105
9106
gp.write('set ylabel "SAMPLES"\n')
9107
gp.write('set tic scale 0\n')
9108
gp.write('set palette rgbformulae 22,13,10\n')
9109
- print "contexts:", contexts.size()
9110
+ print("contexts:", contexts.size())
9111
gp.write("set xtics (")
9112
for i in range(contexts.size()):
9113
if i > 0:
9114
9115
a = Utilities.shortenEventName(contexts[i]).replace("[CONTEXT] ","")
9116
gp.write("\"%s\" %d" % (a, i))
9117
gp.write(") rotate by 45 right\n\n")
9118
- print "samples:", samples.size()
9119
+ print("samples:", samples.size())
9120
gp.write("set ytics (")
9121
for i in range(samples.size()):
9122
if i > 0:
9123
--- a/tools/src/perfexplorer/examples/LoadImbalance/loadImbalanceCauses.py (original)
9124
--- b/tools/src/perfexplorer/examples/LoadImbalance/loadImbalanceCauses.py (refactored)
9125
9126
tmp = parameterMap.get("tauData")
9127
if tmp != None:
9128
tauData = tmp
9129
- print "Performance data: " + tauData
9130
+ print("Performance data: " + tauData)
9131
else:
9132
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
9133
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
9134
9135
tmp = parameterMap.get("rules")
9136
if tmp != None:
9137
ruleFile = tmp
9138
else:
9139
- print "Rule file not specified. Using default."
9140
- print "Rules: " + ruleFile
9141
+ print("Rule file not specified. Using default.")
9142
+ print("Rules: " + ruleFile)
9143
9144
9145
def loadFile(fileName):
9146
9147
def main():
9148
global tauData
9149
global ruleFile
9150
- print "--------------- JPython test script start ------------"
9151
- print "--- Looking for load imbalances --- "
9152
+ print("--------------- JPython test script start ------------")
9153
+ print("--- Looking for load imbalances --- ")
9154
9155
# get the parameters
9156
getParameters()
9157
9158
# create a rulebase for processing
9159
- print "Loading Rules..."
9160
+ print("Loading Rules...")
9161
ruleHarness = RuleHarness.useGlobalRules(ruleFile)
9162
9163
# load the trial
9164
- print "loading the data..."
9165
+ print("loading the data...")
9166
9167
# load the data
9168
trial = loadFile(tauData)
9169
9170
maxs = stats.get(BasicStatisticsOperation.MAX)
9171
totals = stats.get(BasicStatisticsOperation.TOTAL)
9172
mainEvent = means.getMainEvent()
9173
- print "Main Event: ", mainEvent
9174
+ print("Main Event: ", mainEvent)
9175
9176
# get the ratio between average and max
9177
ratioMaker = RatioOperation(means, maxs)
9178
9179
for event in ratios.getEvents():
9180
for metric in ratios.getMetrics():
9181
MeanEventFact.evaluateLoadBalance(means, ratios, event, metric)
9182
- print
9183
+ print()
9184
9185
# add the callpath event names to the facts in the rulebase.
9186
9187
9188
# process the rules
9189
RuleHarness.getInstance().processRules()
9190
9191
- print "---------------- JPython test script end -------------"
9192
+ print("---------------- JPython test script end -------------")
9193
9194
9195
if __name__ == "__main__":
9196
--- a/tools/src/perfexplorer/examples/LoadImbalance/loadimbalance.py (original)
9197
--- b/tools/src/perfexplorer/examples/LoadImbalance/loadimbalance.py (refactored)
9198
9199
9200
###################################################################
9201
9202
-print "--------------- JPython test script start ------------"
9203
-print "--- Looking for load imbalances --- "
9204
+print("--------------- JPython test script start ------------")
9205
+print("--- Looking for load imbalances --- ")
9206
9207
# load the trial
9208
-print "loading the data..."
9209
+print("loading the data...")
9210
9211
Utilities.setSession("local")
9212
trials = ArrayList()
9213
9214
files.append(str(i) + ".ppk")
9215
trials.add(DataSourceResult(DataSourceResult.PPK, files, False))
9216
9217
-print "Procs\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
9218
+print("Procs\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
9219
9220
ratioList = ArrayList()
9221
meanList = ArrayList()
9222
9223
stddev = stddevs.getExclusive(thread, event, metric)
9224
ratio = ratios.getExclusive(thread, event, metric)
9225
9226
- print "%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
9227
+ print("%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
9228
9229
ratioList.add(ratios)
9230
meanList.add(means)
9231
9232
minList.add(mins)
9233
stddevList.add(stddevs)
9234
9235
-print
9236
+print()
9237
9238
# graph the ratios, showing the inefficiency as the app scales
9239
9240
9241
drawit(meanList, "Mean (AVG), 1.0 is better", "Mean")
9242
drawit(ratioList, "Load Balance (AVG/MAX), 1.0 is better", "Load Balance")
9243
9244
-print "---------------- JPython test script end -------------"
9245
+print("---------------- JPython test script end -------------")
9246
--- a/tools/src/perfexplorer/examples/MergedExperimentsChart/graph.py (original)
9247
--- b/tools/src/perfexplorer/examples/MergedExperimentsChart/graph.py (refactored)
9248
9249
grapher.processData()
9250
9251
def main():
9252
- print "--------------- JPython test script start ------------"
9253
+ print("--------------- JPython test script start ------------")
9254
# load the data
9255
#inputs = loadFromFiles()
9256
inputs = loadFromDB()
9257
9258
before.putInclusive(thread, event + " : blocksize 10", metric, after.getInclusive(thread, event, metric))
9259
9260
drawGraph(extracted)
9261
- print "---------------- JPython test script end -------------"
9262
+ print("---------------- JPython test script end -------------")
9263
9264
if __name__ == "__main__":
9265
main()
9266
--- a/tools/src/perfexplorer/examples/MetadataCorrelation/correlate.py (original)
9267
--- b/tools/src/perfexplorer/examples/MetadataCorrelation/correlate.py (refactored)
9268
9269
tmp = parameterMap.get("tauData")
9270
if tmp != None:
9271
tauData = tmp
9272
- print "Performance data: " + tauData
9273
+ print("Performance data: " + tauData)
9274
else:
9275
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
9276
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
9277
9278
def loadFile(fileName):
9279
# load the trial
9280
9281
neighbors.close()
9282
9283
def main():
9284
- print "--------------- JPython test script start ------------"
9285
+ print("--------------- JPython test script start ------------")
9286
# load the data
9287
global tauData
9288
global path
9289
9290
9291
dumpData(inputData, metadata)
9292
9293
- print "---------------- JPython test script end -------------"
9294
+ print("---------------- JPython test script end -------------")
9295
9296
if __name__ == "__main__":
9297
main()
9298
--- a/tools/src/perfexplorer/examples/MicroLoadImbalance/loadimbalanceTime.py (original)
9299
--- b/tools/src/perfexplorer/examples/MicroLoadImbalance/loadimbalanceTime.py (refactored)
9300
9301
9302
###################################################################
9303
9304
-print "--------------- JPython test script start ------------"
9305
-print "--- Looking for load imbalances --- "
9306
+print("--------------- JPython test script start ------------")
9307
+print("--- Looking for load imbalances --- ")
9308
9309
# load the trial
9310
-print "loading the data..."
9311
+print("loading the data...")
9312
9313
Utilities.setSession("local")
9314
files = []
9315
files.append("justtime.ppk")
9316
trial = DataSourceResult(DataSourceResult.PPK, files, False)
9317
9318
-print "\nProcs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
9319
+print("\nProcs\t Type\t\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
9320
9321
trial.setIgnoreWarnings(True)
9322
9323
9324
min = mins.getExclusive(thread, event, metric)
9325
stddev = stddevs.getExclusive(thread, event, metric)
9326
ratio = ratios.getExclusive(thread, event, metric)
9327
-print "%d\t %s\t\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
9328
+print("%d\t %s\t\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
9329
event = kernNonMPI
9330
mean = means.getExclusive(thread, event, metric)
9331
max = maxs.getExclusive(thread, event, metric)
9332
min = mins.getExclusive(thread, event, metric)
9333
stddev = stddevs.getExclusive(thread, event, metric)
9334
ratio = ratios.getExclusive(thread, event, metric)
9335
-print "%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\n" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
9336
-print "Communication Efficiency (kernel only):\t%.3f" % max
9337
-print "Load Balance (kernel only):\t\t%.3f" % ratio
9338
+print("%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\n" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
9339
+print("Communication Efficiency (kernel only):\t%.3f" % max)
9340
+print("Load Balance (kernel only):\t\t%.3f" % ratio)
9341
#print "Average IPC (kernel only):\t\t%.10f" % kernAvgIPC
9342
-print "Total Instructions(kernel only):\t%.3f" % kernAvgInstructions
9343
-
9344
-print "\nNext Step: Computing Micro Load Imbalance.\n"
9345
-
9346
-
9347
-print "Searching for loop events..."
9348
+print("Total Instructions(kernel only):\t%.3f" % kernAvgInstructions)
9349
+
9350
+print("\nNext Step: Computing Micro Load Imbalance.\n")
9351
+
9352
+
9353
+print("Searching for loop events...")
9354
# get a list of the loop names
9355
metric = "TIME"
9356
loopPrefix = "loop ["
9357
9358
if event.find(loopPrefix) > -1:
9359
loopNames.add(event)
9360
9361
-print "Extracting callpath events..."
9362
+print("Extracting callpath events...")
9363
# extract the callpath events
9364
extractor = ExtractCallpathEventOperation(trial)
9365
extracted = extractor.processData().get(0)
9366
9367
-print "Generating Statistics..."
9368
+print("Generating Statistics...")
9369
statMaker = BasicStatisticsOperation(extracted, False)
9370
stats = statMaker.processData()
9371
stddevs = stats.get(BasicStatisticsOperation.STDDEV)
9372
9373
maxs = stats.get(BasicStatisticsOperation.MAX)
9374
mins = stats.get(BasicStatisticsOperation.MIN)
9375
9376
-print "Iterating over main loop..."
9377
-print "Loop ID:\t RealCommEff\t uLB\t\t CommEff"
9378
+print("Iterating over main loop...")
9379
+print("Loop ID:\t RealCommEff\t uLB\t\t CommEff")
9380
# iterate over the iterations
9381
totalLoopTimeIdeal = 0
9382
totalLoopTime = 0
9383
9384
realCommEff = loopTimeIdeal / loopTime
9385
uLB = loopTimeIdeal / maxTi
9386
commEff = realCommEff * uLB
9387
- print "%s:\t %.5f\t %.5f\t %.5f" % (loopName, realCommEff, uLB, commEff)
9388
+ print("%s:\t %.5f\t %.5f\t %.5f" % (loopName, realCommEff, uLB, commEff))
9389
totalLoopTimeIdeal = totalLoopTimeIdeal + loopTimeIdeal
9390
totalLoopTime = totalLoopTime + loopTime
9391
totalMaxTi = totalMaxTi + maxTi
9392
9393
uLB = totalLoopTimeIdeal / totalMaxTi
9394
#uLB = totalMaxTi / totalLoopTimeIdeal
9395
commEff = realCommEff * uLB
9396
-print "\n\nLoop ID:\t RealCommEff\t uLB\t\t CommEff"
9397
-print "Total: \t\t %.5f\t %.5f\t %.5f\n" % (realCommEff, uLB, commEff)
9398
+print("\n\nLoop ID:\t RealCommEff\t uLB\t\t CommEff")
9399
+print("Total: \t\t %.5f\t %.5f\t %.5f\n" % (realCommEff, uLB, commEff))
9400
9401
-print "---------------- JPython test script end -------------"
9402
+print("---------------- JPython test script end -------------")
9403
--- a/tools/src/perfexplorer/examples/MicroLoadImbalance/microLoadImbalance.py (original)
9404
--- b/tools/src/perfexplorer/examples/MicroLoadImbalance/microLoadImbalance.py (refactored)
9405
9406
tmp = parameterMap.get("tauData")
9407
if tmp != None:
9408
tauData = tmp
9409
- print "Performance data: " + tauData
9410
+ print("Performance data: " + tauData)
9411
else:
9412
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
9413
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
9414
9415
def loadFile(fileName):
9416
global gprof
9417
9418
#print mean, max, min, stddev, ratio
9419
9420
if callpath:
9421
- print "%s\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
9422
+ print("%s\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
9423
#print "%s\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, 100, 100, 100, 100)
9424
else:
9425
- print "%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100)
9426
+ print("%d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (trial.getThreads().size(), event, mean*100, max*100, min*100, stddev*100, ratio*100))
9427
9428
return mean, max, min, stddev
9429
9430
9431
def main():
9432
global tauData
9433
9434
- print "--------------- JPython test script start ------------"
9435
- print "--- Looking for load imbalances --- "
9436
+ print("--------------- JPython test script start ------------")
9437
+ print("--- Looking for load imbalances --- ")
9438
9439
# get the parameters
9440
getParameters()
9441
9442
# load the data
9443
trial = loadFile(tauData)
9444
9445
- print "Procs\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
9446
+ print("Procs\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
9447
computeLoadBalance(trial, False)
9448
9449
- print
9450
+ print()
9451
9452
#for phaseName in ["int main", "Iteration"]:
9453
for phaseName in ["Iteration"]:
9454
9455
totalStddev = 0.0
9456
totalRatio = 0.0
9457
9458
- print "LoopID\t\t Procs\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
9459
+ print("LoopID\t\t Procs\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX")
9460
for phase in phases:
9461
mean, max, min, stddev = computeLoadBalance(phase, True)
9462
if mean == max == min == stddev == 0:
9463
9464
9465
#event = LoadImbalanceOperation.KERNEL_COMPUTATION
9466
event = LoadImbalanceOperation.COMPUTATION
9467
- print "%s\t\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % ("Average", trial.getThreads().size(), event, avgMean*100, avgMax*100, avgMin*100, avgStddev*100, avgRatio*100)
9468
+ print("%s\t\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % ("Average", trial.getThreads().size(), event, avgMean*100, avgMax*100, avgMin*100, avgStddev*100, avgRatio*100))
9469
9470
- print "---------------- JPython test script end -------------"
9471
+ print("---------------- JPython test script end -------------")
9472
9473
if __name__ == "__main__":
9474
main()
9475
--- a/tools/src/perfexplorer/examples/MicroLoadImbalance/microLoadImbalanceActual.py (original)
9476
--- b/tools/src/perfexplorer/examples/MicroLoadImbalance/microLoadImbalanceActual.py (refactored)
9477
9478
tmp = parameterMap.get("tauData")
9479
if tmp != None:
9480
tauData = tmp
9481
- print "Performance data: " + tauData
9482
+ print("Performance data: " + tauData)
9483
else:
9484
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
9485
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
9486
9487
tmp = parameterMap.get("prefix")
9488
if tmp != None:
9489
iterationPrefix = tmp
9490
- print "Iteration Prefix: " + iterationPrefix
9491
+ print("Iteration Prefix: " + iterationPrefix)
9492
else:
9493
- print "Iteration Prefix not specified... using", iterationPrefix
9494
+ print("Iteration Prefix not specified... using", iterationPrefix)
9495
9496
def loadFile(fileName):
9497
global gprof
9498
9499
threads = trial.getThreads().size()
9500
if callpath:
9501
if numPhases < 100:
9502
- print "%s\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (trunc(mainEvent, max_pos=10), threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio)
9503
+ print("%s\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (trunc(mainEvent, max_pos=10), threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio))
9504
#print "%s\t %d\t %s\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % (mainEvent, trial.getThreads().size(), event, mean*100, 100, 100, 100, 100)
9505
else:
9506
- print "%d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio)
9507
+ print("%d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % (threads, inclusive, event, mean, max, min, stddev, max/inclusive, ratio))
9508
9509
splits = loadBalance.get(LoadImbalanceOperation.COMPUTATION_SPLITS)
9510
for thread in splits.getThreads():
9511
9512
global vectorT_i
9513
global vectorT
9514
9515
- print "--------------- JPython test script start ------------"
9516
- print "--- Looking for load imbalances --- "
9517
+ print("--------------- JPython test script start ------------")
9518
+ print("--- Looking for load imbalances --- ")
9519
9520
# get the parameters
9521
getParameters()
9522
9523
# load the data
9524
trial = loadFile(tauData)
9525
trial.setIgnoreWarnings(True)
9526
- print "Getting basic statistics..."
9527
+ print("Getting basic statistics...")
9528
statter = BasicStatisticsOperation(trial)
9529
masterStats = statter.processData()
9530
masterMeans = masterStats.get(BasicStatisticsOperation.MEAN)
9531
9532
#print "Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t AVG/MAX"
9533
#computeLoadBalance(trial, False, 1)
9534
9535
- print
9536
+ print()
9537
9538
splitter = SplitTrialPhasesOperation(trial, iterationPrefix)
9539
phases = splitter.processData()
9540
9541
totalStddev = 0.0
9542
totalRatio = 0.0
9543
9544
- print "LoopID\t\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t CommEff AVG/MAX"
9545
- print "------------------------------------------------------------------------------------------------"
9546
+ print("LoopID\t\t Procs\t Incl.\t Type\t\t AVG\t MAX\t MIN\t STDEV\t CommEff AVG/MAX")
9547
+ print("------------------------------------------------------------------------------------------------")
9548
for phase in phases:
9549
mean, max, min, stddev, inclusive, commEff = computeLoadBalance(phase, True, phases.size())
9550
if mean == max == min == stddev == 0:
9551
9552
9553
event = LoadImbalanceOperation.COMPUTATION
9554
#print "%s\t\t %d\t %ls\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t %.2f%%\t" % ("Average", trial.getThreads().size(), event, avgMean*100, avgMax*100, avgMin*100, avgStddev*100, avgRatio*100)
9555
- print "------------------------------------------------------------------------------------------------"
9556
- print "%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f" % ("Totals", trial.getThreads().size(), totalInclusive, event, totalMean, totalMax, totalMin, math.sqrt(totalStddev), totalCommEff, totalMean / totalMax)
9557
- print "%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % ("Average", trial.getThreads().size(), avgInclusive, event, avgMean, avgMax, avgMin, avgStddev, avgCommEff, avgRatio)
9558
+ print("------------------------------------------------------------------------------------------------")
9559
+ print("%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f" % ("Totals", trial.getThreads().size(), totalInclusive, event, totalMean, totalMax, totalMin, math.sqrt(totalStddev), totalCommEff, totalMean / totalMax))
9560
+ print("%s\t\t %d\t %.2f\t %s\t %.2f\t %.2f\t %.2f\t %.2f\t %.4f\t %.4f" % ("Average", trial.getThreads().size(), avgInclusive, event, avgMean, avgMax, avgMin, avgStddev, avgCommEff, avgRatio))
9561
9562
# the total time spent in the loop. Essentially, for each
9563
# iteration of the loop, get the total time for each process. Accumulate
9564
# that vector over the whole loop. The process with the longest time spent
9565
# computing (aggregated over all iterations) is the T.
9566
- print "\nT:\t\t", T
9567
+ print("\nT:\t\t", T)
9568
# the total time spent computing, collapsed. Essentially, for each
9569
# iteration of the loop, get the computing time for each process. Accumulate
9570
# that vector over the whole loop. The process with the longest time spent
9571
# computing (aggregated over all iterations) is the max(T_i).
9572
- print "max(T_i):\t", maxT_i
9573
- print "avg(T_i):\t", avgT_i
9574
- print "maxEff:\t\t", maxEff
9575
- print "CommEff:\t", commEff, "(should be same as maxEff)"
9576
+ print("max(T_i):\t", maxT_i)
9577
+ print("avg(T_i):\t", avgT_i)
9578
+ print("maxEff:\t\t", maxEff)
9579
+ print("CommEff:\t", commEff, "(should be same as maxEff)")
9580
# the load balance for the loop. This is the sum of all efficiencies for
9581
# all processes, divided by the number of processes times the maxiumum
9582
# efficiency. This can be (and is) simplified, by summing the mean
9583
# computing times, and dividing by the max computing times.
9584
- print "avgEff:\t\t", avgEff
9585
- print "LB:\t\t", LB
9586
+ print("avgEff:\t\t", avgEff)
9587
+ print("LB:\t\t", LB)
9588
9589
# the total time spent computing in the loop, serialized. Essentially, for each
9590
# iteration of the loop, get the max computing time in that loop. Add
9591
# those together. Because of overlapping iterations, this can be larger
9592
# than the actual time in the loop. If there were
9593
# no time spent in communication, this is how long the loop should take.
9594
- print "T ideal:\t", totalMax
9595
+ print("T ideal:\t", totalMax)
9596
# the micro load balance is the process with the highest computation time
9597
# divided by the ideal total loop execution time.
9598
- print "microLB:\t", maxT_i / totalMax
9599
+ print("microLB:\t", maxT_i / totalMax)
9600
# the transfer term is the total time spent in the ideal loop divided by
9601
# the actual time spent in the loop.
9602
- print "Transfer:\t", totalMax / T
9603
+ print("Transfer:\t", totalMax / T)
9604
# finally, compute the efficiency. == LB * microLB * Transfer * IPC
9605
- print "n:\t\t", LB * (maxT_i / totalMax) * (totalMax / T) * 1.0, "\n"
9606
+ print("n:\t\t", LB * (maxT_i / totalMax) * (totalMax / T) * 1.0, "\n")
9607
9608
- print "---------------- JPython test script end -------------"
9609
+ print("---------------- JPython test script end -------------")
9610
9611
if __name__ == "__main__":
9612
main()
9613
--- a/tools/src/perfexplorer/examples/PhaseComparison/autotuning.py (original)
9614
--- b/tools/src/perfexplorer/examples/PhaseComparison/autotuning.py (refactored)
9615
9616
grapher.processData()
9617
9618
def main():
9619
- print "--------------- JPython test script start ------------"
9620
+ print("--------------- JPython test script start ------------")
9621
# load the data
9622
global tauData
9623
global prefix
9624
9625
9626
# draw the graph
9627
drawGraph(extracted)
9628
- print "---------------- JPython test script end -------------"
9629
+ print("---------------- JPython test script end -------------")
9630
9631
if __name__ == "__main__":
9632
main()
9633
--- a/tools/src/perfexplorer/examples/PhaseComparison/phases.py (original)
9634
--- b/tools/src/perfexplorer/examples/PhaseComparison/phases.py (refactored)
9635
9636
tmp = parameterMap.get("tauData")
9637
if tmp != None:
9638
tauData = tmp
9639
- print "Performance data: " + tauData
9640
+ print("Performance data: " + tauData)
9641
else:
9642
- print "TAU profile data path not specified... using current directory of profile.x.x.x files."
9643
+ print("TAU profile data path not specified... using current directory of profile.x.x.x files.")
9644
9645
tmp = parameterMap.get("prefix")
9646
if tmp != None:
9647
prefix = tmp
9648
else:
9649
- print "Prefix not specified. Using default."
9650
- print "Prefix: " + prefix
9651
+ print("Prefix not specified. Using default.")
9652
+ print("Prefix: " + prefix)
9653
9654
9655
def loadFile(fileName):
9656
9657
grapher.processData()
9658
9659
def main():
9660
- print "--------------- JPython test script start ------------"
9661
+ print("--------------- JPython test script start ------------")
9662
# load the data
9663
global tauData
9664
global prefix
9665
9666
9667
# draw the graph
9668
drawGraph(extracted)
9669
- print "---------------- JPython test script end -------------"
9670
+ print("---------------- JPython test script end -------------")
9671
9672
if __name__ == "__main__":
9673
main()
9674
--- a/tools/src/perfexplorer/examples/Power5CPIStack/processCPI.py (original)
9675
--- b/tools/src/perfexplorer/examples/Power5CPIStack/processCPI.py (refactored)
9676
9677
numFunctions=0
9678
numClusters=1
9679
9680
- print "Parsing cluster info from: ", info
9681
+ print("Parsing cluster info from: ", info)
9682
i = open(info, 'r')
9683
9684
names=[]
9685
9686
for j in range(1,len(tokens)):
9687
if tokens[j] == "nan" or tokens[j] == "-nan":
9688
if percentDurations[j-1] >= localThreshold:
9689
- print "! WARNING ! 'nan' encountered for ", tokens[0].strip("\""), "- results are not trustworthy for cluster", j-1
9690
+ print("! WARNING ! 'nan' encountered for ", tokens[0].strip("\""), "- results are not trustworthy for cluster", j-1)
9691
counts.append(0)
9692
else:
9693
counts.append(int(tokens[j]))
9694
9695
if percentDurations[p] < tmpVal:
9696
maxClusters=p
9697
break
9698
- print "threshold = " + str(threshold) + "% maxClusters = " + str(maxClusters-1)
9699
+ print("threshold = " + str(threshold) + "% maxClusters = " + str(maxClusters-1))
9700
9701
return names, densities, totalDurations, averageDurations, percentDurations, counters, maxClusters
9702
9703
9704
numFunctions=0
9705
numClusters=1
9706
9707
- print "Parsing benchmark info from: ", info
9708
+ print("Parsing benchmark info from: ", info)
9709
i = open(info, 'r')
9710
9711
names=["NOISE"]
9712
9713
maxClusters = maxClusters + 1
9714
i.close()
9715
9716
- print "threshold = " + str(threshold) + "% maxClusters = " + str(maxClusters)
9717
+ print("threshold = " + str(threshold) + "% maxClusters = " + str(maxClusters))
9718
9719
return names, densities, totalDurations, averageDurations, percentDurations, counters, maxClusters
9720
9721
#########################################################################################
9722
9723
def handleNone(dictionary, name, index):
9724
- if name in dictionary.keys():
9725
+ if name in list(dictionary.keys()):
9726
return dictionary[name][index]
9727
else:
9728
return 0
9729
9730
def handleNone1(dictionary, name, index):
9731
- if name in dictionary.keys():
9732
+ if name in list(dictionary.keys()):
9733
if dictionary[name][index] > 0.0:
9734
return dictionary[name][index]
9735
else:
9736
9737
fact = FactWrapper("Overall", "CPI Stack", cpiStack)
9738
handle = ruleHarness.assertObject(fact)
9739
fact.setFactHandle(handle)
9740
- print verbose
9741
+ print(verbose)
9742
if verbose == "yes":
9743
factDebug = FactWrapper("Dump CPI", "CPI Stack", cpiStack)
9744
handleDebug = ruleHarness.assertObject(factDebug)
9745
9746
#########################################################################################
9747
9748
def main(argv):
9749
- print "--------------- JPython test script start ------------"
9750
+ print("--------------- JPython test script start ------------")
9751
getParameters()
9752
global fractionThreshold
9753
names, densities, totalDurations, averageDurations, percentDurations, counters, maxClusters = parseCounters(inputData, fractionThreshold)
9754
# cpiStack = makeTestStack()
9755
for i in range(1,maxClusters):
9756
- print "\n>>>>>>>>>>>>>>>> Analyzing Stalls for Cluster", i, "<<<<<<<<<<<<<<<<\n"
9757
+ print("\n>>>>>>>>>>>>>>>> Analyzing Stalls for Cluster", i, "<<<<<<<<<<<<<<<<\n")
9758
cpiStack = computeCPIStats(names, percentDurations, counters, i)
9759
processRules(cpiStack)
9760
- print "---------------- JPython test script end -------------"
9761
+ print("---------------- JPython test script end -------------")
9762
9763
#########################################################################################
9764
9765
--- a/tools/src/perfexplorer/examples/PowerCapEffect/cap.py (original)
9766
--- b/tools/src/perfexplorer/examples/PowerCapEffect/cap.py (refactored)
9767
9768
return inputs
9769
9770
def main():
9771
- print "--------------- JPython test script start ------------"
9772
+ print("--------------- JPython test script start ------------")
9773
# load the data
9774
for app in ["amg2013","CoMD-mpi","lulesh"]:
9775
inputs = loadFromDB(app)
9776
inputs = aggregateMPI(inputs)
9777
drawGraph(inputs,app)
9778
- print "---------------- JPython test script end -------------"
9779
+ print("---------------- JPython test script end -------------")
9780
9781
if __name__ == "__main__":
9782
main()
9783
--- a/tools/src/perfexplorer/examples/SaveNewMetric/savemetric.py (original)
9784
--- b/tools/src/perfexplorer/examples/SaveNewMetric/savemetric.py (refactored)
9785
9786
def loadFromDB():
9787
Utilities.setSession("postgres-test")
9788
trial = Utilities.getTrialByName("threads").get(0)
9789
- print trial.getName()
9790
+ print(trial.getName())
9791
return trial
9792
9793
def derive(trial):
9794
9795
saver.processData()
9796
9797
def main():
9798
- print "--------------- JPython test script start ------------"
9799
+ print("--------------- JPython test script start ------------")
9800
# load the data
9801
trial = loadFromDB()
9802
9803
# create a derived metric
9804
derived = derive(TrialResult(trial))
9805
- print derived.get(0).getMetrics()
9806
+ print(derived.get(0).getMetrics())
9807
# Utilities.saveMetric(trial,derived.get(0).getMetrics())
9808
save(derived)
9809
- print "---------------- JPython test script end -------------"
9810
+ print("---------------- JPython test script end -------------")
9811
9812
if __name__ == "__main__":
9813
main()
9814
--- a/tools/src/perfexplorer/examples/StackedBarChart/graph.py (original)
9815
--- b/tools/src/perfexplorer/examples/StackedBarChart/graph.py (refactored)
9816
9817
grapher.drawChartToFile("./testing.eps")
9818
9819
def main():
9820
- print "--------------- JPython test script start ------------"
9821
+ print("--------------- JPython test script start ------------")
9822
# load the data
9823
inputs = loadFromFiles()
9824
9825
9826
extracted = extractor.processData()
9827
9828
drawGraph(extracted)
9829
- print "---------------- JPython test script end -------------"
9830
+ print("---------------- JPython test script end -------------")
9831
9832
if __name__ == "__main__":
9833
main()
9834
--- a/tools/src/perfexplorer/examples/StackedBarChart/graph_stacked_chart.py (original)
9835
--- b/tools/src/perfexplorer/examples/StackedBarChart/graph_stacked_chart.py (refactored)
9836
9837
"""
9838
9839
def main(inputs,title):
9840
- print "--------------- JPython test script start ------------"
9841
+ print("--------------- JPython test script start ------------")
9842
# load the data
9843
# inputs = loadFromFiles()
9844
# inputs = loadFromDB2()
9845
9846
9847
# drawAreaGraph(extracted, title)
9848
drawBarGraph(extracted, title, True)
9849
- print "---------------- JPython test script end -------------"
9850
+ print("---------------- JPython test script end -------------")
9851
9852
if __name__ == "__main__":
9853
inputs = loadFromFiles()
9854
--- a/tools/src/perfexplorer/examples/StackedBarChart/kevin.py (original)
9855
--- b/tools/src/perfexplorer/examples/StackedBarChart/kevin.py (refactored)
9856
9857
"""
9858
9859
def main(inputs,title):
9860
- print "--------------- JPython test script start ------------"
9861
+ print("--------------- JPython test script start ------------")
9862
# load the data
9863
# inputs = loadFromFiles()
9864
# inputs = loadFromDB2()
9865
9866
9867
# drawAreaGraph(extracted, title)
9868
drawBarGraph(extracted, title, True)
9869
- print "---------------- JPython test script end -------------"
9870
+ print("---------------- JPython test script end -------------")
9871
9872
if __name__ == "__main__":
9873
inputs = loadFromFiles()
9874
--- a/tools/src/perfexplorer/examples/Sunburst/dump.py (original)
9875
--- b/tools/src/perfexplorer/examples/Sunburst/dump.py (refactored)
9876
9877
9878
def dumpNode(myfile,node,parent,parentPath,result,metric):
9879
comma = False
9880
- for key, value in node.iteritems():
9881
+ for key, value in node.items():
9882
currentPath = key
9883
if parentPath != "":
9884
currentPath = parentPath + " => " + key
9885
9886
9887
def dumpIcicleNode(myfile,node,parent,parentPath,result,metric):
9888
comma = False
9889
- for key, value in node.iteritems():
9890
+ for key, value in node.items():
9891
currentPath = key
9892
if parentPath != "":
9893
currentPath = parentPath + " => " + key
9894
9895
comma = True
9896
9897
def main():
9898
- print "--------------- JPython test script start ------------"
9899
+ print("--------------- JPython test script start ------------")
9900
# load the data
9901
t = 0
9902
raw = loadFromFiles()
9903
9904
dumpIcicleNode(mydata2,tree,"","",result,metric)
9905
mydata2.close()
9906
9907
- print "---------------- JPython test script end -------------"
9908
+ print("---------------- JPython test script end -------------")
9909
9910
if __name__ == "__main__":
9911
main()
9912
--- a/tools/src/perfexplorer/examples/TimerBreakdown/graph.py (original)
9913
--- b/tools/src/perfexplorer/examples/TimerBreakdown/graph.py (refactored)
9914
9915
"""
9916
9917
def main():
9918
- print "--------------- JPython test script start ------------"
9919
+ print("--------------- JPython test script start ------------")
9920
# load the data
9921
#inputs = loadFromFiles()
9922
inputs = loadFromDB()
9923
9924
9925
drawAreaGraph(extracted)
9926
drawBarGraph(extracted, True)
9927
- print "---------------- JPython test script end -------------"
9928
+ print("---------------- JPython test script end -------------")
9929
9930
if __name__ == "__main__":
9931
main()
9932
--- a/tools/src/perfexplorer/examples/TimerBreakdown/graph2ndLevel.py (original)
9933
--- b/tools/src/perfexplorer/examples/TimerBreakdown/graph2ndLevel.py (refactored)
9934
9935
"""
9936
9937
def main():
9938
- print "--------------- JPython test script start ------------"
9939
+ print("--------------- JPython test script start ------------")
9940
# load the data
9941
#inputs = loadFromFiles()
9942
inputs = loadFromDB()
9943
9944
index = index + 1
9945
drawAreaGraph(extracted)
9946
drawBarGraph(extracted, True)
9947
- print "---------------- JPython test script end -------------"
9948
+ print("---------------- JPython test script end -------------")
9949
9950
if __name__ == "__main__":
9951
main()
9952
--- a/tools/src/perfexplorer/openuh/ClustorGraph.py (original)
9953
--- b/tools/src/perfexplorer/openuh/ClustorGraph.py (refactored)
9954
9955
9956
9957
def main():
9958
- print "--------------- JPython test script start ------------"
9959
+ print("--------------- JPython test script start ------------")
9960
inputs = load("Application","Experiment","Trial")
9961
9962
grapher = DrawMetadataGraph(inputs)
9963
9964
#grapher.setYAxisLabel("")
9965
grapher.processData()
9966
9967
- print "---------------- JPython test script end -------------"
9968
+ print("---------------- JPython test script end -------------")
9969
9970
if __name__ == "__main__":
9971
main()
9972
--- a/tools/src/perfexplorer/openuh/commandLine.py (original)
9973
--- b/tools/src/perfexplorer/openuh/commandLine.py (refactored)
9974
9975
9976
###################################################################
9977
9978
-print "---------------- JPython test script begin -----------"
9979
+print("---------------- JPython test script begin -----------")
9980
9981
# create a rulebase for processing
9982
#ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
9983
9984
top10er = TopXEvents(derived, newMetric, AbstractResult.EXCLUSIVE, 10)
9985
top10 = top10er.processData().get(0)
9986
9987
-print "Top 10 events with high stall/cycle ratios:"
9988
+print("Top 10 events with high stall/cycle ratios:")
9989
for event in top10er.getSortedEventNames():
9990
- print "\t", event, derived.getInclusive(0, event, newMetric)
9991
+ print("\t", event, derived.getInclusive(0, event, newMetric))
9992
#MeanEventFact.compareEventToMain(derived, mainEvent, derived, event)
9993
9994
# process the rules
9995
#RuleHarness.getInstance().processRules()
9996
9997
-print "---------------- JPython test script end -------------"
9998
+print("---------------- JPython test script end -------------")
9999
--- a/tools/src/perfexplorer/openuh/compareMemoryStalls.py (original)
10000
--- b/tools/src/perfexplorer/openuh/compareMemoryStalls.py (refactored)
10001
10002
10003
def processTrial(trial):
10004
# extract the non-callpath events from the trial
10005
- print "extracting non-callpath..."
10006
+ print("extracting non-callpath...")
10007
extractor = ExtractNonCallpathEventOperation(trial)
10008
extracted = extractor.processData().get(0)
10009
10010
10011
# extracted = extractor.processData().get(0)
10012
10013
# get basic statistics
10014
- print "computing mean..."
10015
+ print("computing mean...")
10016
statMaker = BasicStatisticsOperation(extracted, True)
10017
stats = statMaker.processData()
10018
means = stats.get(BasicStatisticsOperation.MEAN)
10019
10020
# get main event
10021
mainEvent = means.getMainEvent()
10022
- print "Main Event: ", mainEvent
10023
+ print("Main Event: ", mainEvent)
10024
10025
# calculate all derived metrics
10026
- print "Deriving memory stall metrics..."
10027
+ print("Deriving memory stall metrics...")
10028
derived, PowerPerProc = getMemoryModel(means)
10029
10030
return derived
10031
10032
global Total
10033
global TotalRatio
10034
10035
- print "--------------- JPython test script start ------------"
10036
- print "--- Calculating Memory Stall Causes --- "
10037
+ print("--------------- JPython test script start ------------")
10038
+ print("--- Calculating Memory Stall Causes --- ")
10039
10040
# load the trial
10041
- print "loading the data..."
10042
+ print("loading the data...")
10043
Utilities.setSession("openuh")
10044
# load just the average values across all threads, input: app_name, exp_name, trial_name
10045
trial = TrialResult(Utilities.getTrial("Fluid Dynamic - Unoptimized OpenMP", "rib 90", "1_16"))
10046
10047
top10er = TopXEvents(diff, diff.getTimeMetric(), AbstractResult.EXCLUSIVE, 10)
10048
top10 = top10er.processData().get(0);
10049
for event in top10.getEvents():
10050
- print
10051
- print event, "L1 hits: ", L1Hits, diff.getInclusive(0, event, L1Hits)
10052
- print event, "L2 hits: ", L2Hits, diff.getInclusive(0, event, L2Hits)
10053
- print event, "L3 hits: ", L3Hits, diff.getInclusive(0, event, L3Hits)
10054
- print event, "TLB Penalty: ", TLBPenalty, diff.getInclusive(0, event, TLBPenalty)
10055
- print event, "Local Memory Hits: ", LocalMemoryHits, diff.getInclusive(0, event, LocalMemoryHits)
10056
- print event, "Remote Memory Hits: ", RemoteMemoryHits, diff.getInclusive(0, event, RemoteMemoryHits)
10057
- print event, "Total: ", Total, diff.getInclusive(0, event, Total)
10058
- print event, "Total Ratio: ", TotalRatio, diff.getInclusive(0, event, TotalRatio)
10059
- print event, "local/remote ratio: ", RatioMemoryAccesses, diff.getInclusive(0, event, RatioMemoryAccesses)
10060
- print
10061
-
10062
-
10063
- print "---------------- JPython test script end -------------"
10064
+ print()
10065
+ print(event, "L1 hits: ", L1Hits, diff.getInclusive(0, event, L1Hits))
10066
+ print(event, "L2 hits: ", L2Hits, diff.getInclusive(0, event, L2Hits))
10067
+ print(event, "L3 hits: ", L3Hits, diff.getInclusive(0, event, L3Hits))
10068
+ print(event, "TLB Penalty: ", TLBPenalty, diff.getInclusive(0, event, TLBPenalty))
10069
+ print(event, "Local Memory Hits: ", LocalMemoryHits, diff.getInclusive(0, event, LocalMemoryHits))
10070
+ print(event, "Remote Memory Hits: ", RemoteMemoryHits, diff.getInclusive(0, event, RemoteMemoryHits))
10071
+ print(event, "Total: ", Total, diff.getInclusive(0, event, Total))
10072
+ print(event, "Total Ratio: ", TotalRatio, diff.getInclusive(0, event, TotalRatio))
10073
+ print(event, "local/remote ratio: ", RatioMemoryAccesses, diff.getInclusive(0, event, RatioMemoryAccesses))
10074
+ print()
10075
+
10076
+
10077
+ print("---------------- JPython test script end -------------")
10078
10079
10080
if __name__ == "__main__":
10081
--- a/tools/src/perfexplorer/openuh/comparePower.py (original)
10082
--- b/tools/src/perfexplorer/openuh/comparePower.py (refactored)
10083
10084
global Joules
10085
global PPJ
10086
10087
- print "--------------- JPython test script start ------------"
10088
- print "--- Calculating Power Models --- "
10089
+ print("--------------- JPython test script start ------------")
10090
+ print("--- Calculating Power Models --- ")
10091
10092
# create a rulebase for processing
10093
#print "Loading Rules..."
10094
#ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10095
10096
# load the trial
10097
- print "loading the data..."
10098
+ print("loading the data...")
10099
10100
# check to see if the user has selected a trial
10101
Utilities.setSession("openuh")
10102
trial = TrialResult(Utilities.getTrial("Fluid Dynamic Energy/Power", trialName, "1_1"))
10103
10104
# extract the non-callpath events from the trial
10105
- print "extracting non-callpath..."
10106
+ print("extracting non-callpath...")
10107
extractor = ExtractNonCallpathEventOperation(trial)
10108
extracted = extractor.processData().get(0)
10109
10110
# get basic statistics
10111
- print "computing mean..."
10112
+ print("computing mean...")
10113
statMaker = BasicStatisticsOperation(extracted, False)
10114
stats = statMaker.processData()
10115
means = stats.get(BasicStatisticsOperation.MEAN)
10116
10117
# get main event
10118
mainEvent = means.getMainEvent()
10119
- print "Main Event: ", mainEvent
10120
+ print("Main Event: ", mainEvent)
10121
10122
# calculate all derived metrics
10123
- print
10124
- print "Deriving power metric..."
10125
+ print()
10126
+ print("Deriving power metric...")
10127
derived, PowerPerProc = getPowerModel(means)
10128
10129
# get the top 10 events
10130
10131
thread = 0
10132
10133
# iterate over events, output inefficiency derived metric
10134
- print
10135
+ print()
10136
#print "Top 10 Average", PowerPerProc, "values per thread for this trial:"
10137
#for event in top10er.getSortedEventNames():
10138
#print event, derived.getExclusive(thread, event, PowerPerProc)
10139
- print
10140
+ print()
10141
#print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, PowerPerProc)
10142
- print
10143
+ print()
10144
10145
# compute the energy consumed by each event
10146
- print "Computing joules consumed..."
10147
+ print("Computing joules consumed...")
10148
derived, EnergyPerProc = getEnergy(derived, PowerPerProc)
10149
Joules = EnergyPerProc
10150
10151
# iterate over events, output inefficiency derived metric
10152
- print
10153
+ print()
10154
#print "Top 10 Average", EnergyPerProc, "values per thread for this trial:"
10155
#for event in top10er.getSortedEventNames():
10156
#print event, derived.getExclusive(thread, event, EnergyPerProc)
10157
- print
10158
+ print()
10159
#print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, EnergyPerProc)
10160
- print
10161
+ print()
10162
10163
# compute the floating point operations per joule per event
10164
- print "Computing FP_OPS/joule..."
10165
+ print("Computing FP_OPS/joule...")
10166
derived, FlopsPerJoule = getFlopsPerJoule(derived, EnergyPerProc)
10167
PPJ = FlopsPerJoule
10168
10169
# iterate over events, output inefficiency derived metric
10170
- print
10171
+ print()
10172
#print "Top 10 Average", FlopsPerJoule, "values per thread for this trial:"
10173
#for event in top10er.getSortedEventNames():
10174
#print event, derived.getExclusive(thread, event, FlopsPerJoule)
10175
- print
10176
+ print()
10177
#print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, FlopsPerJoule)
10178
- print
10179
+ print()
10180
10181
# compute the floating point operations per joule per event
10182
- print "Computing Instructions Per Cycle..."
10183
+ print("Computing Instructions Per Cycle...")
10184
derived, IPC = getIPC(derived)
10185
10186
# iterate over events, output inefficiency derived metric
10187
- print
10188
+ print()
10189
#print "Top 10 Average", IPC, "values per thread for this trial:"
10190
#for event in top10er.getSortedEventNames():
10191
#print event, derived.getExclusive(thread, event, IPC)
10192
- print
10193
+ print()
10194
#print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, IPC)
10195
- print
10196
+ print()
10197
10198
# compute the floating point operations per joule per event
10199
- print "Computing Issued Per Cycle..."
10200
+ print("Computing Issued Per Cycle...")
10201
derived, issuedPerCycle = getIssuedPerCycle(derived)
10202
10203
# iterate over events, output inefficiency derived metric
10204
- print
10205
+ print()
10206
#print "Top 10 Average", issuedPerCycle, "values per thread for this trial:"
10207
#for event in top10er.getSortedEventNames():
10208
#print event, derived.getExclusive(thread, event, issuedPerCycle)
10209
- print
10210
+ print()
10211
10212
#print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, issuedPerCycle)
10213
- print
10214
+ print()
10215
10216
#print "Time to completion..."
10217
- print
10218
+ print()
10219
#for event in top10er.getSortedEventNames():
10220
#print event, derived.getExclusive(thread, event, derived.getTimeMetric())/1000000
10221
- print
10222
+ print()
10223
10224
#print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, derived.getTimeMetric())/1000000
10225
10226
# process the rules
10227
#RuleHarness.getInstance().processRules()
10228
10229
- print "---------------- JPython test script end -------------"
10230
+ print("---------------- JPython test script end -------------")
10231
return derived
10232
10233
if __name__ == "__main__":
10234
10235
top20er = TopXEvents(first, first.getTimeMetric(), AbstractResult.EXCLUSIVE, 20)
10236
top20 = top20er.processData().get(0)
10237
10238
- print "Joules"
10239
+ print("Joules")
10240
for event in top20.getEvents():
10241
if difference.getExclusive(0, event, Joules) < 0.0:
10242
- print event, difference.getExclusive(0, event, Joules), first.getExclusive(0, event, Joules), second.getExclusive(0, event, Joules)
10243
-
10244
- print
10245
- print "Performance Per Joule"
10246
+ print(event, difference.getExclusive(0, event, Joules), first.getExclusive(0, event, Joules), second.getExclusive(0, event, Joules))
10247
+
10248
+ print()
10249
+ print("Performance Per Joule")
10250
for event in top20.getEvents():
10251
if difference.getExclusive(0, event, PPJ) > 0.0:
10252
- print event, difference.getExclusive(0, event, PPJ) , first.getExclusive(0, event, PPJ) , second.getExclusive(0, event, PPJ)
10253
+ print(event, difference.getExclusive(0, event, PPJ) , first.getExclusive(0, event, PPJ) , second.getExclusive(0, event, PPJ))
10254
--- a/tools/src/perfexplorer/openuh/demoScript.py (original)
10255
--- b/tools/src/perfexplorer/openuh/demoScript.py (refactored)
10256
10257
10258
###################################################################
10259
10260
-print "---------------- JPython test script begin -----------"
10261
+print("---------------- JPython test script begin -----------")
10262
10263
# create a rulebase for processing
10264
ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10265
10266
10267
for event in derived.getEvents():
10268
MeanEventFact.compareEventToMain(derived, mainEvent, derived, event)
10269
-print
10270
+print()
10271
10272
# process the rules
10273
RuleHarness.getInstance().processRules()
10274
10275
-print "---------------- JPython test script end -------------"
10276
+print("---------------- JPython test script end -------------")
10277
--- a/tools/src/perfexplorer/openuh/inefficiency.py (original)
10278
--- b/tools/src/perfexplorer/openuh/inefficiency.py (refactored)
10279
10280
merged = None
10281
newName = None
10282
if (derivor.exists()):
10283
- print "Exists: ", newName
10284
+ print("Exists: ", newName)
10285
merged = input
10286
newName = derivor.getNewName()
10287
else:
10288
10289
# saver = SaveResultOperation(derived)
10290
# saver.setForceOverwrite(False)
10291
# saver.processData()
10292
- print "new metric: ", newName
10293
+ print("new metric: ", newName)
10294
return merged, newName
10295
10296
###################################################################
10297
10298
10299
###################################################################
10300
10301
-print "--------------- JPython test script start ------------"
10302
-print "--- Calculating inefficiency --- "
10303
+print("--------------- JPython test script start ------------")
10304
+print("--- Calculating inefficiency --- ")
10305
10306
# create a rulebase for processing
10307
#print "Loading Rules..."
10308
ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10309
10310
# load the trial
10311
-print "loading the data..."
10312
+print("loading the data...")
10313
10314
# choose the right database configuration - a string which matches the end of the jdbc connection,
10315
# such as "perfdmf" to match "jdbc:derby:/Users/khuck/src/tau2/apple/lib/perfdmf"
10316
10317
trial = TrialResult(Utilities.getTrial("Fluid Dynamic - Unoptimized OpenMP", "rib 90", "Original OpenMP 1_16"))
10318
10319
# extract the non-callpath events from the trial
10320
-print "extracting"
10321
+print("extracting")
10322
extractor = ExtractNonCallpathEventOperation(trial)
10323
extracted = extractor.processData().get(0)
10324
10325
# get basic statistics
10326
-print "stats"
10327
+print("stats")
10328
statMaker = BasicStatisticsOperation(extracted, False)
10329
stats = statMaker.processData()
10330
means = stats.get(BasicStatisticsOperation.MEAN)
10331
10332
# get main event
10333
mainEvent = means.getMainEvent()
10334
-print "Main Event: ", mainEvent
10335
+print("Main Event: ", mainEvent)
10336
10337
# calculate all derived metrics
10338
derived, inefficiency1, inefficiency2 = getInefficiency(means)
10339
10340
for event in derived.getEvents():
10341
#print event, derived.getExclusive(thread, event, inefficiency1), derived.getInclusive(thread, event, inefficiency1)
10342
MeanEventFact.compareEventToMain(derived, mainEvent, derived, event)
10343
-print
10344
+print()
10345
10346
# process the rules
10347
RuleHarness.getInstance().processRules()
10348
10349
-print "---------------- JPython test script end -------------"
10350
+print("---------------- JPython test script end -------------")
10351
--- a/tools/src/perfexplorer/openuh/loadimbalance.bsc.py (original)
10352
--- b/tools/src/perfexplorer/openuh/loadimbalance.bsc.py (refactored)
10353
10354
10355
###################################################################
10356
10357
-print "--------------- JPython test script start ------------"
10358
-print "--- Looking for load imbalances --- "
10359
+print("--------------- JPython test script start ------------")
10360
+print("--- Looking for load imbalances --- ")
10361
10362
# create a rulebase for processing
10363
-print "Loading Rules..."
10364
+print("Loading Rules...")
10365
ruleHarness = RuleHarness.useGlobalRules("/home/khuck/tau2/tools/src/perfexplorer/openuh/BSCRules.drl")
10366
10367
# load the trial
10368
-print "loading the data..."
10369
+print("loading the data...")
10370
10371
# choose the right database configuration -
10372
# a string which matches the end of the jdbc connection,
10373
10374
means = stats.get(BasicStatisticsOperation.MEAN)
10375
totals = stats.get(BasicStatisticsOperation.TOTAL)
10376
mainEvent = means.getMainEvent()
10377
-print "Main Event: ", mainEvent
10378
+print("Main Event: ", mainEvent)
10379
10380
# get the ratio between stddev and total
10381
ratioMaker = RatioOperation(stddev, means)
10382
10383
for event in ratios.getEvents():
10384
for metric in ratios.getMetrics():
10385
MeanEventFact.evaluateLoadBalance(means, ratios, event, metric)
10386
-print
10387
+print()
10388
10389
# add the callpath event names to the facts in the rulebase.
10390
10391
10392
# process the rules
10393
RuleHarness.getInstance().processRules()
10394
10395
-print "---------------- JPython test script end -------------"
10396
+print("---------------- JPython test script end -------------")
10397
--- a/tools/src/perfexplorer/openuh/loadimbalance.py (original)
10398
--- b/tools/src/perfexplorer/openuh/loadimbalance.py (refactored)
10399
10400
10401
###################################################################
10402
10403
-print "--------------- JPython test script start ------------"
10404
-print "--- Looking for load imbalances --- "
10405
+print("--------------- JPython test script start ------------")
10406
+print("--- Looking for load imbalances --- ")
10407
10408
# create a rulebase for processing
10409
-print "Loading Rules..."
10410
+print("Loading Rules...")
10411
ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10412
10413
# load the trial
10414
-print "loading the data..."
10415
+print("loading the data...")
10416
10417
# choose the right database configuration - a string which matches the end of the jdbc connection,
10418
# such as "perfdmf" to match "jdbc:derby:/Users/khuck/src/tau2/apple/lib/perfdmf"
10419
10420
means = stats.get(BasicStatisticsOperation.MEAN)
10421
totals = stats.get(BasicStatisticsOperation.TOTAL)
10422
mainEvent = means.getMainEvent()
10423
-print "Main Event: ", mainEvent
10424
+print("Main Event: ", mainEvent)
10425
10426
# get the ratio between stddev and total
10427
ratioMaker = RatioOperation(stddev, means)
10428
10429
#for metric in ratios.getMetrics():
10430
#print event, totals.getInclusive(thread, event, metric), means.getInclusive(thread, event, metric), stddev.getInclusive(thread, event, metric), ratios.getInclusive(thread, event, metric)
10431
MeanEventFact.evaluateLoadBalance(means, ratios, event, metric)
10432
-print
10433
+print()
10434
10435
# add the callpath event names to the facts in the rulebase.
10436
10437
10438
# process the rules
10439
RuleHarness.getInstance().processRules()
10440
10441
-print "---------------- JPython test script end -------------"
10442
+print("---------------- JPython test script end -------------")
10443
--- a/tools/src/perfexplorer/openuh/memoryStalls.py (original)
10444
--- b/tools/src/perfexplorer/openuh/memoryStalls.py (refactored)
10445
10446
global Total
10447
global TotalRatio
10448
10449
- print "--------------- JPython test script start ------------"
10450
- print "--- Calculating Memory Stall Causes --- "
10451
+ print("--------------- JPython test script start ------------")
10452
+ print("--- Calculating Memory Stall Causes --- ")
10453
10454
# create a rulebase for processing
10455
- print "Loading Rules..."
10456
+ print("Loading Rules...")
10457
ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10458
10459
# load the trial
10460
- print "loading the data..."
10461
+ print("loading the data...")
10462
10463
# check to see if the user has selected a trial
10464
tmp = Utilities.getCurrentTrial()
10465
if tmp != None:
10466
trial = TrialResult(tmp)
10467
- print
10468
+ print()
10469
else:
10470
# remove these two lines to bypass this and use the default trial
10471
- print "No trial selected - script exiting"
10472
+ print("No trial selected - script exiting")
10473
return
10474
10475
# choose the right database configuration - a string which matches the end of the jdbc connection,
10476
10477
#trial = TrialResult(Utilities.getTrial("Fluid Dynamic - Unoptimized", "rib 45", "1_8"))
10478
10479
# extract the non-callpath events from the trial
10480
- print "extracting non-callpath..."
10481
+ print("extracting non-callpath...")
10482
extractor = ExtractNonCallpathEventOperation(trial)
10483
extracted = extractor.processData().get(0)
10484
10485
# get basic statistics
10486
- print "computing mean..."
10487
+ print("computing mean...")
10488
statMaker = BasicStatisticsOperation(extracted, True)
10489
stats = statMaker.processData()
10490
means = stats.get(BasicStatisticsOperation.MEAN)
10491
10492
# get main event
10493
mainEvent = means.getMainEvent()
10494
- print "Main Event: ", mainEvent
10495
+ print("Main Event: ", mainEvent)
10496
10497
# calculate all derived metrics
10498
- print "Deriving memory stall metrics..."
10499
+ print("Deriving memory stall metrics...")
10500
derived, PowerPerProc = getMemoryModel(means)
10501
10502
# iterate over events, output inefficiency derived metric
10503
for event in derived.getEvents():
10504
MeanEventFact.compareEventToMain(derived, mainEvent, derived, event)
10505
- print
10506
- print
10507
+ print()
10508
+ print()
10509
10510
# output the top 10
10511
top10er = TopXEvents(derived, derived.getTimeMetric(), AbstractResult.EXCLUSIVE, 10)
10512
top10 = top10er.processData().get(0);
10513
for event in top10.getEvents():
10514
- print
10515
+ print()
10516
if event == mainEvent:
10517
- print event, "L1 hits: ", L1Hits, derived.getInclusive(0, event, L1Hits)
10518
- print event, "L2 hits: ", L2Hits, derived.getInclusive(0, event, L2Hits)
10519
- print event, "L3 hits: ", L3Hits, derived.getInclusive(0, event, L3Hits)
10520
- print event, "TLB Penalty: ", TLBPenalty, derived.getInclusive(0, event, TLBPenalty)
10521
- print event, "Local Memory Hits: ", LocalMemoryHits, derived.getInclusive(0, event, LocalMemoryHits)
10522
- print event, "Remote Memory Hits: ", RemoteMemoryHits, derived.getInclusive(0, event, RemoteMemoryHits)
10523
- print event, "Total: ", Total, derived.getInclusive(0, event, Total)
10524
- print event, "Total Ratio: ", TotalRatio, derived.getInclusive(0, event, TotalRatio)
10525
- print event, "local/remote ratio: ", RatioMemoryAccesses, derived.getInclusive(0, event, RatioMemoryAccesses)
10526
+ print(event, "L1 hits: ", L1Hits, derived.getInclusive(0, event, L1Hits))
10527
+ print(event, "L2 hits: ", L2Hits, derived.getInclusive(0, event, L2Hits))
10528
+ print(event, "L3 hits: ", L3Hits, derived.getInclusive(0, event, L3Hits))
10529
+ print(event, "TLB Penalty: ", TLBPenalty, derived.getInclusive(0, event, TLBPenalty))
10530
+ print(event, "Local Memory Hits: ", LocalMemoryHits, derived.getInclusive(0, event, LocalMemoryHits))
10531
+ print(event, "Remote Memory Hits: ", RemoteMemoryHits, derived.getInclusive(0, event, RemoteMemoryHits))
10532
+ print(event, "Total: ", Total, derived.getInclusive(0, event, Total))
10533
+ print(event, "Total Ratio: ", TotalRatio, derived.getInclusive(0, event, TotalRatio))
10534
+ print(event, "local/remote ratio: ", RatioMemoryAccesses, derived.getInclusive(0, event, RatioMemoryAccesses))
10535
else:
10536
- print event, "L1 hits: ", L1Hits, derived.getExclusive(0, event, L1Hits)
10537
- print event, "L2 hits: ", L2Hits, derived.getExclusive(0, event, L2Hits)
10538
- print event, "L3 hits: ", L3Hits, derived.getExclusive(0, event, L3Hits)
10539
- print event, "TLB Penalty: ", TLBPenalty, derived.getExclusive(0, event, TLBPenalty)
10540
- print event, "Local Memory Hits: ", LocalMemoryHits, derived.getExclusive(0, event, LocalMemoryHits)
10541
- print event, "Remote Memory Hits: ", RemoteMemoryHits, derived.getExclusive(0, event, RemoteMemoryHits)
10542
- print event, "Total: ", Total, derived.getExclusive(0, event, Total)
10543
- print event, "Total Ratio: ", TotalRatio, derived.getExclusive(0, event, TotalRatio)
10544
- print event, "local/remote ratio: ", RatioMemoryAccesses, derived.getExclusive(0, event, RatioMemoryAccesses)
10545
- print
10546
+ print(event, "L1 hits: ", L1Hits, derived.getExclusive(0, event, L1Hits))
10547
+ print(event, "L2 hits: ", L2Hits, derived.getExclusive(0, event, L2Hits))
10548
+ print(event, "L3 hits: ", L3Hits, derived.getExclusive(0, event, L3Hits))
10549
+ print(event, "TLB Penalty: ", TLBPenalty, derived.getExclusive(0, event, TLBPenalty))
10550
+ print(event, "Local Memory Hits: ", LocalMemoryHits, derived.getExclusive(0, event, LocalMemoryHits))
10551
+ print(event, "Remote Memory Hits: ", RemoteMemoryHits, derived.getExclusive(0, event, RemoteMemoryHits))
10552
+ print(event, "Total: ", Total, derived.getExclusive(0, event, Total))
10553
+ print(event, "Total Ratio: ", TotalRatio, derived.getExclusive(0, event, TotalRatio))
10554
+ print(event, "local/remote ratio: ", RatioMemoryAccesses, derived.getExclusive(0, event, RatioMemoryAccesses))
10555
+ print()
10556
10557
# process the rules
10558
RuleHarness.getInstance().processRules()
10559
10560
- print "---------------- JPython test script end -------------"
10561
+ print("---------------- JPython test script end -------------")
10562
10563
if __name__ == "__main__":
10564
main()
10565
--- a/tools/src/perfexplorer/openuh/powerModels.py (original)
10566
--- b/tools/src/perfexplorer/openuh/powerModels.py (refactored)
10567
10568
global True
10569
global False
10570
10571
- print "--------------- JPython test script start ------------"
10572
- print "--- Calculating Power Models --- "
10573
+ print("--------------- JPython test script start ------------")
10574
+ print("--- Calculating Power Models --- ")
10575
10576
# create a rulebase for processing
10577
#print "Loading Rules..."
10578
#ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10579
10580
# load the trial
10581
- print "loading the data..."
10582
+ print("loading the data...")
10583
10584
# check to see if the user has selected a trial
10585
tmp = Utilities.getCurrentTrial()
10586
if tmp != None:
10587
trial = TrialResult(tmp)
10588
- print
10589
+ print()
10590
else:
10591
# remove these two lines to bypass this and use the default trial
10592
- print "No trial selected - script exiting"
10593
+ print("No trial selected - script exiting")
10594
return
10595
10596
Utilities.setSession("openuh")
10597
10598
#trial = TrialResult(Utilities.getTrial("Fluid Dynamic - Unoptimized", "rib 45", "1_8"))
10599
10600
# extract the non-callpath events from the trial
10601
- print "extracting non-callpath..."
10602
+ print("extracting non-callpath...")
10603
extractor = ExtractNonCallpathEventOperation(trial)
10604
extracted = extractor.processData().get(0)
10605
10606
10607
# extracted = extractor.processData().get(0)
10608
10609
# get basic statistics
10610
- print "computing mean..."
10611
+ print("computing mean...")
10612
statMaker = BasicStatisticsOperation(extracted, True)
10613
stats = statMaker.processData()
10614
means = stats.get(BasicStatisticsOperation.MEAN)
10615
10616
# get main event
10617
mainEvent = means.getMainEvent()
10618
- print "Main Event: ", mainEvent
10619
+ print("Main Event: ", mainEvent)
10620
10621
# calculate all derived metrics
10622
- print "Deriving power metrics..."
10623
+ print("Deriving power metrics...")
10624
derived, PowerPerProc = getPowerModel(means)
10625
10626
# get the top 10 "power dense" events
10627
10628
thread = 0
10629
10630
# iterate over events, output inefficiency derived metric
10631
- print "Top 10 Average", PowerPerProc, "values per thread for this trial:"
10632
+ print("Top 10 Average", PowerPerProc, "values per thread for this trial:")
10633
for event in top10er.getSortedEventNames():
10634
- print event, top10.getExclusive(thread, event, PowerPerProc)
10635
- print
10636
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, PowerPerProc)
10637
- print
10638
+ print(event, top10.getExclusive(thread, event, PowerPerProc))
10639
+ print()
10640
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, PowerPerProc))
10641
+ print()
10642
10643
# compute the energy consumed by each event
10644
- print "Computing joules consumed..."
10645
+ print("Computing joules consumed...")
10646
derived, EnergyPerProc = getEnergy(derived, PowerPerProc)
10647
10648
# get the top 10 "power dense" events
10649
10650
top10 = top10er.processData().get(0)
10651
10652
# iterate over events, output inefficiency derived metric
10653
- print "Top 10 Average", EnergyPerProc, "values per thread for this trial:"
10654
+ print("Top 10 Average", EnergyPerProc, "values per thread for this trial:")
10655
for event in top10er.getSortedEventNames():
10656
- print event, top10.getExclusive(thread, event, EnergyPerProc)
10657
+ print(event, top10.getExclusive(thread, event, EnergyPerProc))
10658
#print event, top10.getExclusive(thread, event, "LINUX_TIMERS")
10659
#print event, top10.getExclusive(thread, event, "(LINUX_TIMERS*1.0E-6)")
10660
- print
10661
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, EnergyPerProc)
10662
+ print()
10663
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, EnergyPerProc))
10664
#print mainEvent, derived.getExclusive(thread, event, "LINUX_TIMERS")
10665
#print mainEvent, derived.getExclusive(thread, event, "(LINUX_TIMERS*1.0E-6)")
10666
- print
10667
+ print()
10668
10669
# compute the floating point operations per joule per event
10670
- print "Computing FP_OPS/joule..."
10671
+ print("Computing FP_OPS/joule...")
10672
derived, FlopsPerJoule = getFlopsPerJoule(derived, EnergyPerProc)
10673
10674
# get the top 10 "power dense" events
10675
10676
top10 = top10er.processData().get(0)
10677
10678
# iterate over events, output inefficiency derived metric
10679
- print "Top 10 Average", FlopsPerJoule, "values per thread for this trial:"
10680
+ print("Top 10 Average", FlopsPerJoule, "values per thread for this trial:")
10681
for event in top10er.getSortedEventNames():
10682
- print event, top10.getExclusive(thread, event, FlopsPerJoule)
10683
- print
10684
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, FlopsPerJoule)
10685
- print
10686
+ print(event, top10.getExclusive(thread, event, FlopsPerJoule))
10687
+ print()
10688
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, FlopsPerJoule))
10689
+ print()
10690
10691
# process the rules
10692
#RuleHarness.getInstance().processRules()
10693
10694
- print "---------------- JPython test script end -------------"
10695
+ print("---------------- JPython test script end -------------")
10696
10697
if __name__ == "__main__":
10698
main()
10699
--- a/tools/src/perfexplorer/openuh/powerOpt.py (original)
10700
--- b/tools/src/perfexplorer/openuh/powerOpt.py (refactored)
10701
10702
global True
10703
global False
10704
10705
- print "--------------- JPython test script start ------------"
10706
- print "--- Calculating Power Models --- "
10707
+ print("--------------- JPython test script start ------------")
10708
+ print("--- Calculating Power Models --- ")
10709
10710
# create a rulebase for processing
10711
#print "Loading Rules..."
10712
#ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10713
10714
# load the trial
10715
- print "loading the data..."
10716
+ print("loading the data...")
10717
10718
# check to see if the user has selected a trial
10719
tmp = Utilities.getCurrentTrial()
10720
if tmp != None:
10721
trial = TrialResult(tmp)
10722
- print
10723
+ print()
10724
else:
10725
# remove these two lines to bypass this and use the default trial
10726
- print "No trial selected - script exiting"
10727
+ print("No trial selected - script exiting")
10728
return
10729
10730
# extract the non-callpath events from the trial
10731
- print "extracting non-callpath..."
10732
+ print("extracting non-callpath...")
10733
extractor = ExtractNonCallpathEventOperation(trial)
10734
extracted = extractor.processData().get(0)
10735
10736
# get basic statistics
10737
- print "computing mean..."
10738
+ print("computing mean...")
10739
statMaker = BasicStatisticsOperation(extracted, False)
10740
stats = statMaker.processData()
10741
means = stats.get(BasicStatisticsOperation.MEAN)
10742
10743
# get main event
10744
mainEvent = means.getMainEvent()
10745
- print "Main Event: ", mainEvent
10746
+ print("Main Event: ", mainEvent)
10747
10748
# calculate all derived metrics
10749
- print
10750
- print "Deriving power metric..."
10751
+ print()
10752
+ print("Deriving power metric...")
10753
derived, PowerPerProc = getPowerModel(means)
10754
10755
# get the top 10 events
10756
10757
thread = 0
10758
10759
# iterate over events, output inefficiency derived metric
10760
- print
10761
- print "Top 10 Average", PowerPerProc, "values per thread for this trial:"
10762
- for event in top10er.getSortedEventNames():
10763
- print event, derived.getExclusive(thread, event, PowerPerProc)
10764
- print
10765
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, PowerPerProc)
10766
- print
10767
+ print()
10768
+ print("Top 10 Average", PowerPerProc, "values per thread for this trial:")
10769
+ for event in top10er.getSortedEventNames():
10770
+ print(event, derived.getExclusive(thread, event, PowerPerProc))
10771
+ print()
10772
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, PowerPerProc))
10773
+ print()
10774
10775
# compute the energy consumed by each event
10776
- print "Computing joules consumed..."
10777
+ print("Computing joules consumed...")
10778
derived, EnergyPerProc = getEnergy(derived, PowerPerProc)
10779
10780
# iterate over events, output inefficiency derived metric
10781
- print
10782
- print "Top 10 Average", EnergyPerProc, "values per thread for this trial:"
10783
- for event in top10er.getSortedEventNames():
10784
- print event, derived.getExclusive(thread, event, EnergyPerProc)
10785
- print
10786
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, EnergyPerProc)
10787
- print
10788
+ print()
10789
+ print("Top 10 Average", EnergyPerProc, "values per thread for this trial:")
10790
+ for event in top10er.getSortedEventNames():
10791
+ print(event, derived.getExclusive(thread, event, EnergyPerProc))
10792
+ print()
10793
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, EnergyPerProc))
10794
+ print()
10795
10796
# compute the floating point operations per joule per event
10797
- print "Computing FP_OPS/joule..."
10798
+ print("Computing FP_OPS/joule...")
10799
derived, FlopsPerJoule = getFlopsPerJoule(derived, EnergyPerProc)
10800
10801
# iterate over events, output inefficiency derived metric
10802
- print
10803
- print "Top 10 Average", FlopsPerJoule, "values per thread for this trial:"
10804
- for event in top10er.getSortedEventNames():
10805
- print event, derived.getExclusive(thread, event, FlopsPerJoule)
10806
- print
10807
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, FlopsPerJoule)
10808
- print
10809
+ print()
10810
+ print("Top 10 Average", FlopsPerJoule, "values per thread for this trial:")
10811
+ for event in top10er.getSortedEventNames():
10812
+ print(event, derived.getExclusive(thread, event, FlopsPerJoule))
10813
+ print()
10814
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, FlopsPerJoule))
10815
+ print()
10816
10817
# compute the floating point operations per joule per event
10818
- print "Computing Instructions Per Cycle..."
10819
+ print("Computing Instructions Per Cycle...")
10820
derived, IPC = getIPC(derived)
10821
10822
# iterate over events, output inefficiency derived metric
10823
- print
10824
- print "Top 10 Average", IPC, "values per thread for this trial:"
10825
- for event in top10er.getSortedEventNames():
10826
- print event, derived.getExclusive(thread, event, IPC)
10827
- print
10828
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, IPC)
10829
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, "PAPI_TOT_INS")
10830
- print
10831
+ print()
10832
+ print("Top 10 Average", IPC, "values per thread for this trial:")
10833
+ for event in top10er.getSortedEventNames():
10834
+ print(event, derived.getExclusive(thread, event, IPC))
10835
+ print()
10836
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, IPC))
10837
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, "PAPI_TOT_INS"))
10838
+ print()
10839
10840
# compute the floating point operations per joule per event
10841
- print "Computing Issued Per Cycle..."
10842
+ print("Computing Issued Per Cycle...")
10843
derived, issuedPerCycle = getIssuedPerCycle(derived)
10844
10845
# iterate over events, output inefficiency derived metric
10846
- print
10847
- print "Top 10 Average", issuedPerCycle, "values per thread for this trial:"
10848
- for event in top10er.getSortedEventNames():
10849
- print event, derived.getExclusive(thread, event, issuedPerCycle)
10850
- print
10851
-
10852
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, issuedPerCycle)
10853
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, "PAPI_TOT_IIS")
10854
- print
10855
-
10856
- print "Time to completion..."
10857
- print
10858
- for event in top10er.getSortedEventNames():
10859
- print event, derived.getExclusive(thread, event, derived.getTimeMetric())/1000000
10860
- print
10861
-
10862
- print mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, derived.getTimeMetric())/1000000
10863
+ print()
10864
+ print("Top 10 Average", issuedPerCycle, "values per thread for this trial:")
10865
+ for event in top10er.getSortedEventNames():
10866
+ print(event, derived.getExclusive(thread, event, issuedPerCycle))
10867
+ print()
10868
+
10869
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, issuedPerCycle))
10870
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, "PAPI_TOT_IIS"))
10871
+ print()
10872
+
10873
+ print("Time to completion...")
10874
+ print()
10875
+ for event in top10er.getSortedEventNames():
10876
+ print(event, derived.getExclusive(thread, event, derived.getTimeMetric())/1000000)
10877
+ print()
10878
+
10879
+ print(mainEvent, "INCLUSIVE: ", derived.getInclusive(thread, mainEvent, derived.getTimeMetric())/1000000)
10880
10881
# process the rules
10882
#RuleHarness.getInstance().processRules()
10883
10884
- print "---------------- JPython test script end -------------"
10885
+ print("---------------- JPython test script end -------------")
10886
10887
if __name__ == "__main__":
10888
main()
10889
--- a/tools/src/perfexplorer/openuh/script.py (original)
10890
--- b/tools/src/perfexplorer/openuh/script.py (refactored)
10891
10892
10893
def loadRules():
10894
global ruleHarness
10895
- print "Loading Rules..."
10896
+ print("Loading Rules...")
10897
ruleHarness = RuleHarness.useGlobalRules("rules/GeneralRules.drl")
10898
ruleHarness.addRules("rules/ApplicationRules.drl")
10899
ruleHarness.addRules("rules/MachineRules.drl")
10900
return
10901
10902
def loaddata():
10903
- print "loading the data..."
10904
+ print("loading the data...")
10905
Utilities.setSession("openuh")
10906
#Utilities.setSession("perfdmf")
10907
#trial = TrialMeanResult(Utilities.getTrial("fortran", "test", "O3-2048-real.8-bounds"))
10908
10909
return input, newMetrics
10910
10911
10912
-print "--------------- JPython test script start ------------"
10913
-print "doing single trial analysis for mm on gomez"
10914
+print("--------------- JPython test script start ------------")
10915
+print("doing single trial analysis for mm on gomez")
10916
10917
# create a rulebase for processing
10918
#loadRules()
10919
10920
derived, newMetrics = deriveMetrics(extracted)
10921
for thread in derived.getThreads():
10922
for metric in newMetrics:
10923
- print event, metric, derived.getInclusive(thread, event, metric)
10924
-
10925
-print
10926
+ print(event, metric, derived.getInclusive(thread, event, metric))
10927
+
10928
+print()
10929
10930
# get the HP stall percentages
10931
percentages, newMetrics = stallHPPercentages(derived)
10932
for thread in percentages.getThreads():
10933
for metric in newMetrics:
10934
- print event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0)
10935
-
10936
-print
10937
+ print(event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0))
10938
+
10939
+print()
10940
10941
# get the stall percentages
10942
percentages, newMetrics = stallPercentages(derived)
10943
for thread in percentages.getThreads():
10944
for metric in newMetrics:
10945
- print event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0)
10946
-
10947
-print
10948
+ print(event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0))
10949
+
10950
+print()
10951
10952
# get the HP stall percentages, breakdown of fpstalls
10953
percentages, newMetrics = stallL1BreakdownPercentages(derived)
10954
for thread in percentages.getThreads():
10955
for metric in newMetrics:
10956
- print event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0)
10957
-
10958
-print
10959
+ print(event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0))
10960
+
10961
+print()
10962
10963
#RuleHarness.getInstance().processRules()
10964
10965
-print "---------------- JPython test script end -------------"
10966
+print("---------------- JPython test script end -------------")
10967
--- a/tools/src/perfexplorer/openuh/totalStalls.py (original)
10968
--- b/tools/src/perfexplorer/openuh/totalStalls.py (refactored)
10969
10970
10971
def loadRules():
10972
global ruleHarness
10973
- print "Loading Rules..."
10974
+ print("Loading Rules...")
10975
ruleHarness = RuleHarness.useGlobalRules("openuh/OpenUHRules.drl")
10976
return
10977
10978
def loaddata():
10979
- print "loading the data..."
10980
+ print("loading the data...")
10981
Utilities.setSession("openuh")
10982
#Utilities.setSession("perfdmf")
10983
#trial = TrialMeanResult(Utilities.getTrial("fortran", "test", "O3-2048-real.8-bounds"))
10984
10985
return input, newMetrics
10986
10987
10988
-print "--------------- JPython test script start ------------"
10989
-print "doing single trial analysis for mm on gomez"
10990
+print("--------------- JPython test script start ------------")
10991
+print("doing single trial analysis for mm on gomez")
10992
10993
# create a rulebase for processing
10994
loadRules()
10995
10996
trial = loaddata()
10997
#event = "LOOP #2 [file:/mnt/netapp/home1/khuck/openuh/src/fpga/msap.c <65, 158>]"
10998
#event = "diff_coeff__"
10999
-print "main event: ", trial.getMainEvent()
11000
-print "time metric: ", trial.getTimeMetric()
11001
+print("main event: ", trial.getMainEvent())
11002
+print("time metric: ", trial.getTimeMetric())
11003
11004
# extract the non-callpath events
11005
extracted = extractNonCallpath(trial)
11006
11007
# get the stall percentages
11008
-print "Breakdown of Total Stalls:"
11009
+print("Breakdown of Total Stalls:")
11010
derived, newMetrics = deriveMetrics(extracted)
11011
percentages, newMetrics = stallPercentages(derived)
11012
for event in percentages.getEvents():
11013
11014
for metric in newMetrics:
11015
#print event, metric, "%.2f%%" % (percentages.getInclusive(thread, event, metric)*100.0)
11016
MeanEventFact.evaluateMetric(percentages, event, metric)
11017
-print
11018
+print()
11019
11020
# get the HP stall percentages, breakdown of fpstalls
11021
# print "Breakdown of FP Stalls:"
11022
11023
11024
RuleHarness.getInstance().processRules()
11025
11026
-print "---------------- JPython test script end -------------"
11027
+print("---------------- JPython test script end -------------")
11028
--- a/tools/src/perfexplorer/rules/cluster.py (original)
11029
--- b/tools/src/perfexplorer/rules/cluster.py (refactored)
11030
11031
11032
def loadRules():
11033
global ruleHarness
11034
- print "Loading Rules..."
11035
+ print("Loading Rules...")
11036
ruleHarness = RuleHarness.useGlobalRules("rules/GeneralRules.drl")
11037
ruleHarness.addRules("rules/ApplicationRules.drl")
11038
ruleHarness.addRules("rules/MachineRules.drl")
11039
return
11040
11041
def loaddata():
11042
- print "loading the data..."
11043
+ print("loading the data...")
11044
#from home
11045
Utilities.setSession("localhost:5432/perfdmf")
11046
trial = TrialResult(Utilities.getTrial("sweep3d", "jaguar", "16"))
11047
11048
return extractor.processData().get(0)
11049
11050
def getTop5(input):
11051
- print "Getting top 5 events (sorted by exclusive time)..."
11052
+ print("Getting top 5 events (sorted by exclusive time)...")
11053
getTop5 = TopXEvents(input, input.getTimeMetric(), AbstractResult.EXCLUSIVE, 5)
11054
top5 = getTop5.processData().get(0)
11055
11056
11057
11058
11059
11060
-print "--------------- JPython test script start ------------"
11061
+print("--------------- JPython test script start ------------")
11062
11063
-print "doing single trial analysis for gtc on jaguar"
11064
+print("doing single trial analysis for gtc on jaguar")
11065
11066
# create a rulebase for processing
11067
# loadRules()
11068
11069
11070
# RuleHarness.getInstance().processRules()
11071
11072
-print "---------------- JPython test script end -------------"
11073
+print("---------------- JPython test script end -------------")
11074
--- a/tools/src/perfexplorer/rules/miranda.py (original)
11075
--- b/tools/src/perfexplorer/rules/miranda.py (refactored)
11076
11077
False = 0
11078
11079
def loadRules():
11080
- print "Loading Rules..."
11081
+ print("Loading Rules...")
11082
ruleHarness = RuleHarness.useGlobalRules("rules/GeneralRules.drl")
11083
ruleHarness.addRules("rules/ApplicationRules.drl")
11084
ruleHarness.addRules("rules/MachineRules.drl")
11085
return
11086
11087
def loaddata():
11088
- print "loading the data..."
11089
+ print("loading the data...")
11090
Utilities.setSession("spaceghost")
11091
trial = Utilities.getTrial("Miranda", "BlueGeneL", "8K.old")
11092
11093
11094
return trialResult, trialMetadata
11095
11096
def getTop10(input):
11097
- print "Getting top 10 events (sorted by exclusive time)..."
11098
+ print("Getting top 10 events (sorted by exclusive time)...")
11099
getTop10 = TopXEvents(input, input.getTimeMetric(), AbstractResult.EXCLUSIVE, 10)
11100
top10 = getTop10.processData().get(0)
11101
return top10
11102
11103
RuleHarness.getInstance().assertObject(outputs.get(0));
11104
return outputs
11105
11106
-print "--------------- JPython test script start ------------"
11107
-print "doing single trial correlation analysis for Miranda on BGL"
11108
+print("--------------- JPython test script start ------------")
11109
+print("doing single trial correlation analysis for Miranda on BGL")
11110
# create a rulebase for processing
11111
loadRules()
11112
# load the trial
11113
11114
top10 = getTop10(trialResult)
11115
correlateMetadata(top10, trialMetadata)
11116
RuleHarness.getInstance().processRules()
11117
-print "---------------- JPython test script end -------------"
11118
+print("---------------- JPython test script end -------------")
11119
--- a/tools/src/perfexplorer/rules/rules.py (original)
11120
--- b/tools/src/perfexplorer/rules/rules.py (refactored)
11121
11122
False = 0
11123
11124
def rules():
11125
- print "doing phase test for gtc on jaguar"
11126
+ print("doing phase test for gtc on jaguar")
11127
# load the trial
11128
Utilities.setSession("perfdmf.test")
11129
baseline = Utilities.getTrial("gtc_bench", "superscaling.jaguar", "64")
11130
11131
diff.addInput(comparison)
11132
diff.processData()
11133
metaDiff = DifferenceMetadataOperation(baseline, comparison)
11134
- print metaDiff.differencesAsString()
11135
- print "****** Processing Super Duper Rules! ******"
11136
+ print(metaDiff.differencesAsString())
11137
+ print("****** Processing Super Duper Rules! ******")
11138
ruleHarness = RuleHarness("rules/GeneralRules.drl")
11139
ruleHarness.addRules("rules/ApplicationRules.drl")
11140
ruleHarness.addRules("rules/MachineRules.drl")
11141
11142
ruleHarness.assertObject(diff)
11143
ruleHarness.processRules()
11144
11145
- print "got the data"
11146
+ print("got the data")
11147
11148
return
11149
11150
-print "--------------- JPython test script start ------------"
11151
+print("--------------- JPython test script start ------------")
11152
11153
rules()
11154
11155
# pe.exit()
11156
11157
-print "---------------- JPython test script end -------------"
11158
+print("---------------- JPython test script end -------------")
11159
--- a/tools/src/perfexplorer/rules/scdemo1.py (original)
11160
--- b/tools/src/perfexplorer/rules/scdemo1.py (refactored)
11161
11162
11163
def loadRules():
11164
global ruleHarness
11165
- print "Loading Rules..."
11166
+ print("Loading Rules...")
11167
ruleHarness = RuleHarness.useGlobalRules("rules/GeneralRules.drl")
11168
ruleHarness.addRules("rules/ApplicationRules.drl")
11169
ruleHarness.addRules("rules/MachineRules.drl")
11170
return
11171
11172
def loaddata():
11173
- print "loading the data..."
11174
+ print("loading the data...")
11175
#from home
11176
# Utilities.setSession("localhost:5432/perfdmf")
11177
# trial = TrialMeanResult(Utilities.getTrial("gtc_bench", "jaguar", "64"))
11178
11179
return extractor.processData().get(0)
11180
11181
def getTop10andMain(input):
11182
- print "Getting top 10 events (sorted by exclusive time)..."
11183
+ print("Getting top 10 events (sorted by exclusive time)...")
11184
getTop10 = TopXEvents(input, input.getTimeMetric(), AbstractResult.EXCLUSIVE, 10)
11185
top10 = getTop10.processData().get(0)
11186
sorted = top10.getEvents()
11187
11188
else:
11189
metric8Value = 0.0;
11190
11191
- print event, value, metric1Value, metric2Value, metric4Value, metric5Value, metric7Value, metric8Value
11192
+ print(event, value, metric1Value, metric2Value, metric4Value, metric5Value, metric7Value, metric8Value)
11193
11194
-print "--------------- JPython test script start ------------"
11195
+print("--------------- JPython test script start ------------")
11196
11197
-print "doing single trial analysis for gtc on jaguar"
11198
+print("doing single trial analysis for gtc on jaguar")
11199
11200
# create a rulebase for processing
11201
loadRules()
11202
11203
11204
RuleHarness.getInstance().processRules()
11205
11206
-print "---------------- JPython test script end -------------"
11207
+print("---------------- JPython test script end -------------")
11208
--- a/tools/src/perfexplorer/rules/scdemo2.py (original)
11209
--- b/tools/src/perfexplorer/rules/scdemo2.py (refactored)
11210
11211
False = 0
11212
11213
def loadRules():
11214
- print "Loading Rules..."
11215
+ print("Loading Rules...")
11216
ruleHarness = RuleHarness.useGlobalRules("rules/GeneralRules.drl")
11217
ruleHarness.addRules("rules/ApplicationRules.drl")
11218
ruleHarness.addRules("rules/MachineRules.drl")
11219
return
11220
11221
def loaddata():
11222
- print "loading the data..."
11223
+ print("loading the data...")
11224
#from home
11225
Utilities.setSession("localhost:5432/perfdmf")
11226
# from office
11227
11228
return extractor.processData().get(0)
11229
11230
def getTop5(input):
11231
- print "Getting top 5 events (sorted by exclusive time)..."
11232
+ print("Getting top 5 events (sorted by exclusive time)...")
11233
getTop5 = TopXEvents(input, input.getTimeMetric(), AbstractResult.EXCLUSIVE, 5)
11234
top5 = getTop5.processData().get(0)
11235
11236
11237
11238
11239
11240
-print "--------------- JPython test script start ------------"
11241
+print("--------------- JPython test script start ------------")
11242
11243
-print "doing single trial analysis for Sweep3D on jaguar"
11244
+print("doing single trial analysis for Sweep3D on jaguar")
11245
11246
# create a rulebase for processing
11247
loadRules()
11248
11249
11250
RuleHarness.getInstance().processRules()
11251
11252
-print "---------------- JPython test script end -------------"
11253
+print("---------------- JPython test script end -------------")
11254
--- a/tools/src/perfexplorer/rules/scdemo3.py (original)
11255
--- b/tools/src/perfexplorer/rules/scdemo3.py (refactored)
11256
11257
11258
def loadRules():
11259
global ruleHarness
11260
- print "Loading Rules..."
11261
+ print("Loading Rules...")
11262
ruleHarness = RuleHarness.useGlobalRules("rules/GeneralRules.drl")
11263
ruleHarness.addRules("rules/ApplicationRules.drl")
11264
ruleHarness.addRules("rules/MachineRules.drl")
11265
return
11266
11267
def loaddata():
11268
- print "loading the data..."
11269
+ print("loading the data...")
11270
#from home
11271
Utilities.setSession("localhost:5432/perfdmf")
11272
baseline = TrialMeanResult(Utilities.getTrial("gtc_bench", "jaguar", "64"))
11273
11274
return extractor.processData().get(0)
11275
11276
def getTop5(baseline, comparison):
11277
- print "Getting top 5 events (sorted by exclusive time)..."
11278
+ print("Getting top 5 events (sorted by exclusive time)...")
11279
# get the top 5 events for the baseline
11280
getTop5 = TopXEvents(baseline, baseline.getTimeMetric(), AbstractResult.EXCLUSIVE, 5)
11281
baseEvents = getTop5.processData().get(0).getEvents()
11282
11283
diff.processData();
11284
RuleHarness.getInstance().assertObject(diff);
11285
11286
-print "--------------- JPython test script start ------------"
11287
+print("--------------- JPython test script start ------------")
11288
11289
-print "doing single trial analysis for gtc on jaguar"
11290
+print("doing single trial analysis for gtc on jaguar")
11291
11292
# create a rulebase for processing
11293
loadRules()
11294
11295
11296
RuleHarness.getInstance().processRules()
11297
11298
-print "---------------- JPython test script end -------------"
11299
+print("---------------- JPython test script end -------------")
11300
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/clr_utils.py (original)
11301
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/clr_utils.py (refactored)
11302
11303
for jt in jtext:
11304
try:
11305
j = json.loads(jt)
11306
- except ValueError, e:
11307
+ except ValueError as e:
11308
continue
11309
ret.append(j)
11310
11311
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr-back.py (original)
11312
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr-back.py (refactored)
11313
11314
#!/usr/bin/env python
11315
11316
#import sys, os, re, _thread, signal
11317
-import sys, os, re, thread, signal
11318
+import sys, os, re, _thread, signal
11319
#from cStringIO import StringIO
11320
from io import StringIO
11321
import subprocess
11322
11323
with open(cfgfn) as f:
11324
cfgtmp = json.load(f)
11325
# override if cfg defines any
11326
- for k in cfgtmp.keys():
11327
+ for k in list(cfgtmp.keys()):
11328
cfg[k] = cfgtmp[k]
11329
# override if specifed as cmd option
11330
- for k in ocfg.keys():
11331
+ for k in list(ocfg.keys()):
11332
cfg[k] = ocfg[k]
11333
11334
11335
11336
targetnode = os.environ['PYCOOLR_NODE']
11337
#targetnode = cfg['masternode']
11338
if len(enclaves) == 0:
11339
- if cfg.has_key('enclaves'):
11340
+ if 'enclaves' in cfg:
11341
enclaves = cfg['enclaves']
11342
11343
#print 'masternode:', cfg['masternode']
11344
-print('targetnode:', targetnode)
11345
-print('enclaves:', enclaves)
11346
+print(('targetnode:', targetnode))
11347
+print(('enclaves:', enclaves))
11348
11349
if len(appcfgfn) > 0:
11350
with open(appcfgfn) as f:
11351
appcfg = json.load(f)
11352
- for k in appcfg.keys():
11353
+ for k in list(appcfg.keys()):
11354
cfg[k] = appcfg[k]
11355
11356
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
11357
- print("Please double check %s: appname or appsamples tags" % appcfgfn)
11358
+ if not ('appname' in cfg and 'appsamples' in cfg):
11359
+ print(("Please double check %s: appname or appsamples tags" % appcfgfn))
11360
sys.exit(1)
11361
11362
11363
11364
try:
11365
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
11366
except:
11367
- print('unable to open', cfg["outputfn"])
11368
+ print(('unable to open', cfg["outputfn"]))
11369
11370
11371
#if not fakemode:
11372
11373
params['enclaves'] = enclaves
11374
11375
if sys.version_info[0] < 3:
11376
- import Tkinter
11377
+ import tkinter
11378
#from Tkinter import *
11379
- import tkFileDialog
11380
- import tkFont
11381
- from tkFont import Font
11382
+ import tkinter.filedialog
11383
+ import tkinter.font
11384
+ from tkinter.font import Font
11385
#from Tkinter.FileDialog import askopenfilename
11386
else:
11387
import tkinter
11388
11389
def __init__(self, master, row=2, col=3):
11390
11391
# Create a container
11392
- self.frame = Tkinter.Frame(master,width=200,height=100)
11393
+ self.frame = tkinter.Frame(master,width=200,height=100)
11394
# Create 2 buttons
11395
#self.button_left = Tkinter.Button(frame,text="< Decrease Slope",
11396
# command=self.decrease)
11397
11398
self.nbsamples = params['cfg']['nbsamples']
11399
#self.nbcvars = params['cfg']['nbcvars']
11400
self.listmetrics = params['cfg']['metrics']
11401
- print("self.listmetrics",self.listmetrics)
11402
+ print(("self.listmetrics",self.listmetrics))
11403
#self.listsamples = params['cfg']['appsamples']
11404
self.nbGraphs = params['cfg']['nbgraphs']
11405
11406
11407
self.btncvarsupdate = None
11408
11409
self.metrics = params['cfg']['metrics']
11410
- print("self.metrics", self.metrics)
11411
+ print(("self.metrics", self.metrics))
11412
#self.ranks = params['cfg']['ranks']
11413
self.ranks = [None] * self.nbsamples
11414
self.procs = [None] * self.nbsamples
11415
11416
self.listFontFamily = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
11417
self.listFontWeight = ['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black']
11418
11419
- self.list_fonts = list( tkFont.families() )
11420
+ self.list_fonts = list( tkinter.font.families() )
11421
11422
self.selectedFontPolicy = None
11423
self.selectedFontSize = None
11424
self.selectedFontWeight = None
11425
11426
# create a custom font
11427
- self.customFont = tkFont.Font(family="Helvetica", size=12)
11428
+ self.customFont = tkinter.font.Font(family="Helvetica", size=12)
11429
11430
for idx in range(params['cfg']['nbgraphs']):
11431
self.listUsedGraphs.append(-1)
11432
11433
try:
11434
root.config(menu=menubar)
11435
except AttributeError as attErr:
11436
- print('menu Exception: ', type(attErr), attErr)
11437
+ print(('menu Exception: ', type(attErr), attErr))
11438
11439
#self.winPvars()
11440
#self.winCvars()
11441
11442
self.subSpawn()
11443
11444
def try_execute(self, c, statement, parameters=None):
11445
- print ("statement: ", statement)
11446
- print ("parameters: ", parameters)
11447
+ print(("statement: ", statement))
11448
+ print(("parameters: ", parameters))
11449
try:
11450
if parameters:
11451
c.execute(statement,parameters);
11452
else:
11453
c.execute(statement);
11454
except sqlite3.Error as e:
11455
- print("database error...", e.args[0])
11456
+ print(("database error...", e.args[0]))
11457
11458
def open_connection(self):
11459
global conn
11460
# check for file to exist
11461
#print ("Checking for file: ", sqlite_file)
11462
- print("Checking for file: ", self.sosdbfile)
11463
+ print(("Checking for file: ", self.sosdbfile))
11464
while not os.path.exists(self.sosdbfile):
11465
- print("Waiting on file: ", self.sosdbfile)
11466
+ print(("Waiting on file: ", self.sosdbfile))
11467
time.sleep(1)
11468
11469
#print("Connecting to: ", sqlite_file)
11470
- print("Connecting to: ", self.sosdbfile)
11471
+ print(("Connecting to: ", self.sosdbfile))
11472
# Connecting to the database file
11473
#conn = sqlite3.connect(sqlite_file)
11474
#fd = os.open(sqlite_file, os.O_RDONLY)
11475
11476
11477
def browsefontpolicy(self):
11478
print('browsefontpolicy')
11479
- fontpolicydiag = tkFileDialog.askopenfilename(filetypes=[("Text files","*.fft")])
11480
+ fontpolicydiag = tkinter.filedialog.askopenfilename(filetypes=[("Text files","*.fft")])
11481
11482
def onselectFontPolicy(self,evt):
11483
w = evt.widget
11484
selection = w.curselection()
11485
value = w.get(selection[0])
11486
self.selectedFontPolicy = value
11487
- print('select font: ', value)
11488
+ print(('select font: ', value))
11489
11490
def onselectFontSize(self, evt):
11491
print('select font size')
11492
11493
selection = w.curselection()
11494
value = w.get(selection[0])
11495
self.selectedFontSize = value
11496
- print('select font: ', value)
11497
+ print(('select font: ', value))
11498
11499
def onselectFontWeight(self, evt):
11500
print('select font weight')
11501
11502
selection = w.curselection()
11503
value = w.get(selection[0])
11504
self.selectedFontWeight = value
11505
- print('select font: ', value)
11506
+ print(('select font: ', value))
11507
11508
def loadFontPolicy(self):
11509
fontpolicydiag = askopenfilename(filetypes=(("*.fft")))
11510
11511
#f1 = Tk.Frame(pvarswin,width=150,height=100)
11512
s1 = Tk.Scrollbar(self.f1)
11513
#l1 = Tk.Listbox(f1,selectmode='multiple',width=80,height=40)
11514
- print("self.listmetrics",self.listmetrics)
11515
- print("len(self.listmetrics)",len(self.listmetrics))
11516
- print("self.nbsamples",self.nbsamples)
11517
+ print(("self.listmetrics",self.listmetrics))
11518
+ print(("len(self.listmetrics)",len(self.listmetrics)))
11519
+ print(("self.nbsamples",self.nbsamples))
11520
for i in range(self.nbsamples): self.l1.insert(i, self.listmetrics[i])
11521
s1.config(command = self.l1.yview)
11522
self.l1.config(yscrollcommand = s1.set)
11523
11524
11525
def clearplot(self,idxGraph):
11526
11527
- print('clearplot: idxGraph=', idxGraph)
11528
+ print(('clearplot: idxGraph=', idxGraph))
11529
ax = self.ax[idxGraph]
11530
ax.cla()
11531
#ax.clf()
11532
11533
total_val=total_val+ref4
11534
num_vals=num_vals+1
11535
mean_val=total_val/num_vals
11536
- print('display record ref4='+str(ref4))
11537
+ print(('display record ref4='+str(ref4)))
11538
self.data_lr[i].add(t,ref4)
11539
#self.data_lr[i].add(t,mean_val)
11540
goodrecord=1
11541
11542
try:
11543
ax.cla()
11544
except Exception as errCla:
11545
- print('update_gui: Error cla(): ', type(errCla), errCla)
11546
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
11547
11548
ax.set_xlim([t-gxsec, t])
11549
#print 'get x and y'
11550
11551
11552
#print 'parse graphs'
11553
metric_value = max(sample[1],0)
11554
- print("metric_value",metric_value)
11555
+ print(("metric_value",metric_value))
11556
numeric = re.search(r'\d+', metric_value)
11557
metric_value_num = numeric.group()
11558
metric_value_float = float(metric_value_num)
11559
11560
try:
11561
ax.cla()
11562
except Exception as errCla:
11563
- print('update_gui: Error cla(): ', type(errCla), errCla)
11564
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
11565
11566
ax.set_xlim([pack_time-gxsec, pack_time])
11567
#print 'get x and y'
11568
11569
try:
11570
ax.cla()
11571
except Exception as errCla:
11572
- print('update_gui: Error cla(): ', type(errCla), errCla)
11573
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
11574
11575
ax.set_xlim([pack_time-gxsec, pack_time])
11576
#print 'get x and y'
11577
11578
try:
11579
j = json.loads(payload)
11580
except ValueError as e:
11581
- print('Failed to load json data: %s' %e)
11582
+ print(('Failed to load json data: %s' %e))
11583
continue
11584
#return False
11585
11586
11587
if 'node' not in e and\
11588
'sample' not in e and\
11589
'time' not in e:
11590
- print('Ignore this invalid sample:', json.dumps(e))
11591
+ print(('Ignore this invalid sample:', json.dumps(e)))
11592
continue
11593
11594
#print 'set timestamp'
11595
11596
try:
11597
self.canvas.draw()
11598
except Exception as errDraw:
11599
- print('Error drawing canvas: ', type(errDraw), errDraw)
11600
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
11601
#plt.draw()
11602
11603
profile_t7 = time.time()
11604
11605
all_rows = c.fetchall()
11606
ts = np.array([x[0] for x in all_rows])
11607
min_timestamp = ts[0]
11608
- print("min timestamp: ", min_timestamp)
11609
+ print(("min timestamp: ", min_timestamp))
11610
11611
11612
def get_min_timestamp(self):
11613
11614
sql_statement = ("SELECT min(time_pack) FROM viewCombined;")
11615
print("get_min_timestamp Executing query")
11616
11617
- print("sql statement: ", sql_statement)
11618
+ print(("sql statement: ", sql_statement))
11619
#self.try_execute(c, sql_statement)
11620
os.environ['SOS_SQL'] = sql_statement
11621
sos_bin_path = os.environ.get('SOS_BIN_DIR')
11622
- print('SOS BIN path: ', sos_bin_path)
11623
+ print(('SOS BIN path: ', sos_bin_path))
11624
os.system('cd '+ sos_bin_path)
11625
- print('current dir: ', os.getcwd())
11626
+ print(('current dir: ', os.getcwd()))
11627
# Redirect stdout of passed command into a string
11628
11629
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
11630
- print('soscmd: ', soscmd)
11631
+ print(('soscmd: ', soscmd))
11632
tmp_res_min_ts_sql = subprocess.check_output(soscmd, shell=True)
11633
11634
#self.res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
11635
- print('get min ts: tmp res sql=', tmp_res_min_ts_sql)
11636
+ print(('get min ts: tmp res sql=', tmp_res_min_ts_sql))
11637
res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
11638
- print("List of result SQL MIN TS: ", res_min_ts_sql)
11639
+ print(("List of result SQL MIN TS: ", res_min_ts_sql))
11640
min_ts_rows = res_min_ts_sql[1].split(",")
11641
- print("List of result SQL MIN TS values: ", min_ts_rows)
11642
+ print(("List of result SQL MIN TS values: ", min_ts_rows))
11643
# Remove first element of SQL result
11644
#ts = np.array([x[0] for x in min_ts_rows])
11645
str_min_timestamp = min_ts_rows[0].replace('\"', '')
11646
11647
sql_statement = ("SELECT value_name, value, time_pack FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
11648
#sql_statement = ("SELECT * FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
11649
11650
- print("sql statement: ", sql_statement )
11651
+ print(("sql statement: ", sql_statement ))
11652
#self.try_execute(c, sql_statement)
11653
os.environ['SOS_SQL'] = sql_statement
11654
sos_bin_path = os.environ.get('SOS_BIN_DIR')
11655
- print('SOS BIN path: ', sos_bin_path)
11656
+ print(('SOS BIN path: ', sos_bin_path))
11657
os.system('cd '+ sos_bin_path)
11658
- print('current dir: ', os.getcwd())
11659
+ print(('current dir: ', os.getcwd()))
11660
# Redirect stdout of passed command into a string
11661
11662
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
11663
- print('soscmd: ', soscmd)
11664
+ print(('soscmd: ', soscmd))
11665
tmp_res_sql = subprocess.check_output(soscmd, shell=True)
11666
11667
self.try_execute(c, sql_statement)
11668
11669
#print 'stdout of SOS demo: ', sys.stdout
11670
#self.res_sql = resultstdout.getvalue()
11671
- print('tmp res_sql: ', tmp_res_sql)
11672
+ print(('tmp res_sql: ', tmp_res_sql))
11673
11674
self.res_sql = tmp_res_sql.splitlines()
11675
# REmove first element of SQL result
11676
self.res_sql.pop(0)
11677
11678
for item_sql in self.res_sql:
11679
- print('res sql: ', item_sql)
11680
+ print(('res sql: ', item_sql))
11681
11682
11683
# Call demo with SQL statement given as argument and store standard output
11684
11685
while self.ranks.size == 0:
11686
time.sleep(1)
11687
self.ranks,self.procs = self.get_ranks(self.conn)
11688
- print ("ranks: ", self.ranks)
11689
+ print(("ranks: ", self.ranks))
11690
11691
# get the number of nodes
11692
self.nodes,self.noderanks = self.get_nodes(self.conn)
11693
while self.nodes.size == 0:
11694
time.sleep(1)
11695
nodes,self.noderanks = self.get_nodes(self.conn)
11696
- print ("nodes: ", self.nodes)
11697
+ print(("nodes: ", self.nodes))
11698
11699
self.get_min_timestamp_db(self.conn)
11700
#resize the figure
11701
11702
print('SOS: Execute demo app')
11703
sos_path = os.environ.get('SOS_BUILD_DIR')
11704
self.sos_bin_path = sos_path+"/bin"
11705
- print('SOS BIN PATH: ', self.sos_bin_path)
11706
+ print(('SOS BIN PATH: ', self.sos_bin_path))
11707
os.system("cd "+ self.sos_bin_path)
11708
11709
11710
11711
11712
self.opendb()
11713
11714
- print("metrics: ", self.metrics)
11715
+ print(("metrics: ", self.metrics))
11716
#self.get_min_timestamp()
11717
11718
while True:
11719
11720
countsamples = 0
11721
for sample in self.rows[j]:
11722
params['ts'] = 0
11723
- print 'PYCOOLR sample: ', sample
11724
+ print('PYCOOLR sample: ', sample)
11725
#self.req_sql(self.conn, self.ranks, self.rows)
11726
profile_t2 = time.time()
11727
self.lock.acquire()
11728
11729
if self.listRecordSample[i] != -1:
11730
j = self.listRecordSample[i]
11731
11732
- print('readsosmetrics: i=%d, j=%d' %(i,j))
11733
+ print(('readsosmetrics: i=%d, j=%d' %(i,j)))
11734
11735
#rank = self.ranks[j]
11736
#rank2 = self.ranks2[j]
11737
11738
self.rows[j] = self.conn.fetchall()
11739
#print 'rows: ', self.rows[j]
11740
if len(self.rows[j]) <= 0:
11741
- print("Error: query returned no rows.",)
11742
+ print(("Error: query returned no rows.",))
11743
else:
11744
goodrecord = 1
11745
11746
11747
11748
11749
payload.strip()
11750
- print('payload =',payload)
11751
+ print(('payload =',payload))
11752
try:
11753
j = json.loads(payload)
11754
except ValueError as e:
11755
- print('Failed to load json data: %s' %e)
11756
+ print(('Failed to load json data: %s' %e))
11757
continue
11758
#return False
11759
11760
11761
if 'node' not in e and\
11762
'sample' not in e and\
11763
'time' not in e:
11764
- print('Ignore this invalid sample:', json.dumps(e))
11765
+ print(('Ignore this invalid sample:', json.dumps(e)))
11766
continue
11767
11768
#print 'set timestamp'
11769
11770
print('subSpawn: load beacon subscriber library')
11771
envlibpath = os.environ['PYCOOLR_LIBPATH']
11772
libarbjsonbeep = cdll.LoadLibrary(envlibpath+'/libarbitraryjsonbeepmulsub.so')
11773
- thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
11774
- thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
11775
+ _thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
11776
+ _thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
11777
#thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
11778
except Exception as errThread:
11779
- print("Error: unable to start thread: ", errThread)
11780
+ print(("Error: unable to start thread: ", errThread))
11781
11782
elif self.tool == "sos":
11783
try:
11784
#thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
11785
- thread.start_new_thread(self.readsosmetrics,())
11786
+ _thread.start_new_thread(self.readsosmetrics,())
11787
#thread.start_new_thread(self.readsosmetrics_db,())
11788
11789
except Exception as errThread:
11790
- print('Error: unable to start thread: ', errThread)
11791
+ print(('Error: unable to start thread: ', errThread))
11792
11793
11794
self.refresh_plot()
11795
11796
11797
self.selectedcvarsvalues[i] = self.listcvarsentry[i].get()
11798
strcvarsvalues += self.selectedcvarsvalues[i]
11799
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
11800
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
11801
if i+1 < self.numselectedcvars:
11802
strcvarsmetrics += ","
11803
strcvarsvalues += ","
11804
11805
#self.strcvars += "="
11806
#self.strcvars += self.selectedcvarsvalues[i]
11807
#strcvars += ","
11808
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
11809
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
11810
if i+1 < self.numselectedcvars:
11811
strcvarsmetrics += ","
11812
strcvarsvalues += ","
11813
11814
#self.strcvars += ":"
11815
self.strcvars += strcvarsvalues
11816
11817
- print("strcvarsmetrics: ", strcvarsmetrics)
11818
- print("strcvarsvalues: ", strcvarsvalues)
11819
- print("strcvars: ", self.strcvars)
11820
+ print(("strcvarsmetrics: ", strcvarsmetrics))
11821
+ print(("strcvarsvalues: ", strcvarsvalues))
11822
+ print(("strcvars: ", self.strcvars))
11823
11824
# Test if we have to communicate MPI_T CVARS in a Publish/Subscribe mode
11825
if cvars_comm_mode == "pub":
11826
11827
11828
for i in range(len(selection)):
11829
value = w.get(selection[i])
11830
- print("selection:", selection, ": '%s'" % value)
11831
+ print(("selection:", selection, ": '%s'" % value))
11832
self.selectedcvarsmetrics[i] = value
11833
11834
if self.listlabelcvarsmetric:
11835
11836
self.listcvarsarrayindexentry = [None] * len(selection)
11837
self.listcvarsarrayindex = [None] * len(selection)
11838
11839
- print('selection: ', selection)
11840
- print('range selection: ', range(len(selection)))
11841
-
11842
- for cvaritem, cvarindex in zip(selection, range(len(selection))):
11843
+ print(('selection: ', selection))
11844
+ print(('range selection: ', list(range(len(selection)))))
11845
+
11846
+ for cvaritem, cvarindex in zip(selection, list(range(len(selection)))):
11847
11848
value = w.get(selection[cvarindex])
11849
- print('len selection: ', len(selection))
11850
- print('value of item %d: %s ' % (cvarindex, value))
11851
- print('cvaritem: ', cvaritem)
11852
- print('cvarindex= ', cvarindex)
11853
- print('cvarsindexrow= ', self.cvarsindexrow)
11854
-
11855
- print('cfg cvars array:', self.listcfgcvarsarray[0])
11856
+ print(('len selection: ', len(selection)))
11857
+ print(('value of item %d: %s ' % (cvarindex, value)))
11858
+ print(('cvaritem: ', cvaritem))
11859
+ print(('cvarindex= ', cvarindex))
11860
+ print(('cvarsindexrow= ', self.cvarsindexrow))
11861
+
11862
+ print(('cfg cvars array:', self.listcfgcvarsarray[0]))
11863
if value == self.listcfgcvarsarray[0]:
11864
11865
self.listlabelcvarsmetric[cvarindex]=Tk.Label(self.stepCvarsUpdate, text=value)
11866
11867
#print "selection:", selection, ": '%s'" % value
11868
11869
listintselection = [int (i) for i in selection]
11870
- print('listintselection: ', listintselection)
11871
+ print(('listintselection: ', listintselection))
11872
11873
for i in range(self.nbsamples):
11874
if (self.listSamplesAllocated[i] > -1) and (i not in listintselection):
11875
11876
if self.listSamplesAllocated[j] == -1:
11877
#index = int(j)
11878
self.listUsedGraphs[i] = j
11879
- print('graph %d allocated to sample %d' % (i, j))
11880
+ print(('graph %d allocated to sample %d' % (i, j)))
11881
self.listRecordSample[i] = j
11882
self.listSamplesAllocated[j] = i
11883
break
11884
11885
# Mark current graph as used
11886
self.listUsedGraphs[j] = 1
11887
# Record the current graph as plotting the current sample
11888
- print('Record Sample %d for graph %d' %(index,j))
11889
+ print(('Record Sample %d for graph %d' %(index,j)))
11890
self.listRecordSample[j] = index
11891
11892
# Mark current sample as allocated to the current graph
11893
11894
self.canvas.draw()
11895
#self.frame.update()
11896
except Exception as errDraw:
11897
- print('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw)
11898
+ print(('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw))
11899
self.lock.release()
11900
11901
self.root.after(1000,self.refresh_plot)
11902
11903
try:
11904
self.canvas.draw()
11905
except Exception as errDraw:
11906
- print('Error drawing canvas: ', type(errDraw), errDraw)
11907
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
11908
11909
def checkfn(self, idx, text):
11910
print('checkfn')
11911
- print('Check index=%d text=%s' % (idx,text))
11912
+ print(('Check index=%d text=%s' % (idx,text)))
11913
#print 'Size of listbtnchecked[]= ', len(self.listbtnchecked)
11914
#self.listbtnchecked[idx] = 1
11915
11916
11917
# print 'nothing'
11918
11919
11920
-root = Tkinter.Tk()
11921
+root = tkinter.Tk()
11922
11923
app = Coolrsub(root,2,3)
11924
root.mainloop()
11925
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr-launch.py (original)
11926
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr-launch.py (refactored)
11927
11928
try:
11929
opts, args = getopt.getopt(sys.argv[1:],
11930
shortopt, longopt)
11931
-except getopt.GetoptError, err:
11932
- print err
11933
+except getopt.GetoptError as err:
11934
+ print(err)
11935
usage()
11936
sys.exit(1)
11937
11938
11939
elif o in ("--ncols"):
11940
ocfg["ncols"]=int(a)
11941
elif o in ("--list"):
11942
- print ''
11943
- print '[available graph modules]'
11944
- print ''
11945
+ print('')
11946
+ print('[available graph modules]')
11947
+ print('')
11948
for i in cfg["modnames"]:
11949
- print i
11950
- print ''
11951
- print ''
11952
+ print(i)
11953
+ print('')
11954
+ print('')
11955
sys.exit(0)
11956
elif o in ("--mods"):
11957
ocfg["modnames"] = a.split(",")
11958
11959
if len(args) < 1:
11960
- print ''
11961
- print 'No config file is specified. Enabled the fake mode.'
11962
- print ''
11963
+ print('')
11964
+ print('No config file is specified. Enabled the fake mode.')
11965
+ print('')
11966
cfg["masternode"] = "frontend"
11967
cfg["drawexternal"] = "no"
11968
cfg["drawacpipwr"] = "no"
11969
11970
with open(cfgfn) as f:
11971
cfgtmp = json.load(f)
11972
# override if cfg defines any
11973
- for k in cfgtmp.keys():
11974
+ for k in list(cfgtmp.keys()):
11975
cfg[k] = cfgtmp[k]
11976
# override if specifed as cmd option
11977
- for k in ocfg.keys():
11978
+ for k in list(ocfg.keys()):
11979
cfg[k] = ocfg[k]
11980
11981
if len(targetnode) == 0 :
11982
targetnode = cfg['masternode']
11983
if len(enclaves) == 0:
11984
- if cfg.has_key('enclaves'):
11985
+ if 'enclaves' in cfg:
11986
enclaves = cfg['enclaves']
11987
11988
-print 'masternode:', cfg['masternode']
11989
-print 'targetnode:', targetnode
11990
-print 'enclaves:', enclaves
11991
+print('masternode:', cfg['masternode'])
11992
+print('targetnode:', targetnode)
11993
+print('enclaves:', enclaves)
11994
11995
if len(appcfgfn) > 0:
11996
with open(appcfgfn) as f:
11997
appcfg = json.load(f)
11998
- for k in appcfg.keys():
11999
+ for k in list(appcfg.keys()):
12000
cfg[k] = appcfg[k]
12001
12002
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
12003
- print "Please double check %s: appname or appsamples tags" % appcfgfn
12004
+ if not ('appname' in cfg and 'appsamples' in cfg):
12005
+ print("Please double check %s: appname or appsamples tags" % appcfgfn)
12006
sys.exit(1)
12007
12008
12009
12010
try:
12011
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
12012
except:
12013
- print 'unable to open', cfg["outputfn"]
12014
-
12015
-print >>logf, json.dumps(info)
12016
+ print('unable to open', cfg["outputfn"])
12017
+
12018
+print(json.dumps(info), file=logf)
12019
12020
#if not fakemode:
12021
# querycmds = cfg['querycmds']
12022
12023
12024
12025
if sys.version_info[0] < 3:
12026
- import Tkinter as Tk
12027
+ import tkinter as Tk
12028
else:
12029
import tkinter as Tk
12030
12031
12032
#root.after(2000,mainLoop)
12033
12034
ngraphs = len(params['cfg']['appsamples'])
12035
-print 'samples ', params['cfg']['appsamples']
12036
+print('samples ', params['cfg']['appsamples'])
12037
data_lr = [listrotate2D(length=params['lrlen']) for i in range(ngraphs)]
12038
12039
#fig = plt.figure( figsize=(cfg["figwidth"],cfg["figheight"]) )
12040
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr-sos-db.py (original)
12041
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr-sos-db.py (refactored)
12042
12043
#!/usr/bin/env python
12044
12045
-import sys, os, re, thread, signal
12046
+import sys, os, re, _thread, signal
12047
import multiprocessing
12048
import json
12049
import sqlite3
12050
12051
try:
12052
opts, args = getopt.getopt(sys.argv[1:],
12053
shortopt, longopt)
12054
-except getopt.GetoptError, err:
12055
- print err
12056
+except getopt.GetoptError as err:
12057
+ print(err)
12058
usage()
12059
sys.exit(1)
12060
12061
12062
elif o in ("--ncols"):
12063
ocfg["ncols"]=int(a)
12064
elif o in ("--list"):
12065
- print ''
12066
- print '[available graph modules]'
12067
- print ''
12068
+ print('')
12069
+ print('[available graph modules]')
12070
+ print('')
12071
for i in cfg["modnames"]:
12072
- print i
12073
- print ''
12074
- print ''
12075
+ print(i)
12076
+ print('')
12077
+ print('')
12078
sys.exit(0)
12079
elif o in ("--mods"):
12080
ocfg["modnames"] = a.split(",")
12081
12082
12083
if len(args) < 1:
12084
- print ''
12085
- print 'No config file is specified. Enabled the fake mode.'
12086
- print ''
12087
+ print('')
12088
+ print('No config file is specified. Enabled the fake mode.')
12089
+ print('')
12090
cfg["masternode"] = "frontend"
12091
cfg["drawexternal"] = "no"
12092
cfg["drawacpipwr"] = "no"
12093
12094
with open(cfgfn) as f:
12095
cfgtmp = json.load(f)
12096
# override if cfg defines any
12097
- for k in cfgtmp.keys():
12098
+ for k in list(cfgtmp.keys()):
12099
cfg[k] = cfgtmp[k]
12100
# override if specifed as cmd option
12101
- for k in ocfg.keys():
12102
+ for k in list(ocfg.keys()):
12103
cfg[k] = ocfg[k]
12104
12105
12106
12107
targetnode = os.environ['PYCOOLR_NODE']
12108
#targetnode = cfg['masternode']
12109
if len(enclaves) == 0:
12110
- if cfg.has_key('enclaves'):
12111
+ if 'enclaves' in cfg:
12112
enclaves = cfg['enclaves']
12113
12114
#print 'masternode:', cfg['masternode']
12115
-print 'targetnode:', targetnode
12116
-print 'enclaves:', enclaves
12117
+print('targetnode:', targetnode)
12118
+print('enclaves:', enclaves)
12119
12120
if len(appcfgfn) > 0:
12121
with open(appcfgfn) as f:
12122
appcfg = json.load(f)
12123
- for k in appcfg.keys():
12124
+ for k in list(appcfg.keys()):
12125
cfg[k] = appcfg[k]
12126
12127
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
12128
- print "Please double check %s: appname or appsamples tags" % appcfgfn
12129
+ if not ('appname' in cfg and 'appsamples' in cfg):
12130
+ print("Please double check %s: appname or appsamples tags" % appcfgfn)
12131
sys.exit(1)
12132
12133
12134
12135
try:
12136
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
12137
except:
12138
- print 'unable to open', cfg["outputfn"]
12139
+ print('unable to open', cfg["outputfn"])
12140
12141
12142
#if not fakemode:
12143
12144
params['enclaves'] = enclaves
12145
12146
if sys.version_info[0] < 3:
12147
- import Tkinter
12148
+ import tkinter
12149
#from Tkinter import *
12150
- import tkFileDialog
12151
- import tkFont
12152
- from tkFont import Font
12153
+ import tkinter.filedialog
12154
+ import tkinter.font
12155
+ from tkinter.font import Font
12156
#from Tkinter.FileDialog import askopenfilename
12157
else:
12158
import tkinter
12159
12160
def __init__(self, master, row=2, col=3):
12161
12162
# Create a container
12163
- self.frame = Tkinter.Frame(master,width=200,height=100)
12164
+ self.frame = tkinter.Frame(master,width=200,height=100)
12165
# Create 2 buttons
12166
#self.button_left = Tkinter.Button(frame,text="< Decrease Slope",
12167
# command=self.decrease)
12168
12169
self.listFontFamily = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
12170
self.listFontWeight = ['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black']
12171
12172
- self.list_fonts = list( tkFont.families() )
12173
+ self.list_fonts = list( tkinter.font.families() )
12174
12175
self.selectedFontPolicy = None
12176
self.selectedFontSize = None
12177
self.selectedFontWeight = None
12178
12179
# create a custom font
12180
- self.customFont = tkFont.Font(family="Helvetica", size=12)
12181
+ self.customFont = tkinter.font.Font(family="Helvetica", size=12)
12182
12183
for idx in range(params['cfg']['nbgraphs']):
12184
self.listUsedGraphs.append(-1)
12185
12186
try:
12187
root.config(menu=menubar)
12188
except AttributeError as attErr:
12189
- print 'menu Exception: ', type(attErr), attErr
12190
+ print('menu Exception: ', type(attErr), attErr)
12191
12192
#self.winPvars()
12193
#self.winCvars()
12194
12195
self.subSpawn(sos_db_file)
12196
12197
def try_execute(self, c, statement, parameters=None):
12198
- print ("statement: ", statement)
12199
- print ("parameters: ", parameters)
12200
+ print(("statement: ", statement))
12201
+ print(("parameters: ", parameters))
12202
try:
12203
if parameters:
12204
c.execute(statement,parameters);
12205
else:
12206
c.execute(statement);
12207
except sqlite3.Error as e:
12208
- print("database error.....", e.args[0])
12209
+ print(("database error.....", e.args[0]))
12210
12211
def open_connection(self,sqlite_file):
12212
global conn
12213
# check for file to exist
12214
#print ("Checking for file: ", sqlite_file)
12215
- print ("Checking for file: ", "sosd.00000.db")
12216
+ print(("Checking for file: ", "sosd.00000.db"))
12217
while not os.path.exists("sosd.00000.db"):
12218
- print ("Waiting on file: ", sqlite_file)
12219
+ print(("Waiting on file: ", sqlite_file))
12220
time.sleep(1)
12221
12222
#print("Connecting to: ", sqlite_file)
12223
- print("Connecting to: ", "sosd.00000.db")
12224
+ print(("Connecting to: ", "sosd.00000.db"))
12225
# Connecting to the database file
12226
#conn = sqlite3.connect(sqlite_file)
12227
#fd = os.open(sqlite_file, os.O_RDONLY)
12228
12229
return c
12230
12231
def btnfontsupdate(self):
12232
- print 'Update font'
12233
+ print('Update font')
12234
if self.selectedFontPolicy or self.selectedFontSize or self.selectedFontWeight:
12235
matplotlib.rcParams.update({'font.size': self.selectedFontSize, 'font.family': self.selectedFontPolicy})
12236
#self.customFont.configure(family=self.selectedFontPolicy)
12237
12238
matplotlib.rc('font', **font)
12239
12240
def ckbtnFontBold(self):
12241
- print 'Bold selected'
12242
+ print('Bold selected')
12243
12244
def ckbtnFontItalic(self):
12245
- print 'Italic selected'
12246
+ print('Italic selected')
12247
12248
def ckbtnFontUnderline(self):
12249
- print 'Underline selected'
12250
+ print('Underline selected')
12251
12252
def browsefontpolicy(self):
12253
- print 'browsefontpolicy'
12254
- fontpolicydiag = tkFileDialog.askopenfilename(filetypes=[("Text files","*.fft")])
12255
+ print('browsefontpolicy')
12256
+ fontpolicydiag = tkinter.filedialog.askopenfilename(filetypes=[("Text files","*.fft")])
12257
12258
def onselectFontPolicy(self,evt):
12259
w = evt.widget
12260
selection = w.curselection()
12261
value = w.get(selection[0])
12262
self.selectedFontPolicy = value
12263
- print 'select font: ', value
12264
+ print('select font: ', value)
12265
12266
def onselectFontSize(self, evt):
12267
- print 'select font size'
12268
+ print('select font size')
12269
w = evt.widget
12270
selection = w.curselection()
12271
value = w.get(selection[0])
12272
self.selectedFontSize = value
12273
- print 'select font: ', value
12274
+ print('select font: ', value)
12275
12276
def onselectFontWeight(self, evt):
12277
- print 'select font weight'
12278
+ print('select font weight')
12279
w = evt.widget
12280
selection = w.curselection()
12281
value = w.get(selection[0])
12282
self.selectedFontWeight = value
12283
- print 'select font: ', value
12284
+ print('select font: ', value)
12285
12286
def loadFontPolicy(self):
12287
fontpolicydiag = askopenfilename(filetypes=(("*.fft")))
12288
12289
#def cvarsmenu(self):
12290
12291
def fontmenu(self):
12292
- print 'nothing'
12293
+ print('nothing')
12294
12295
self.paramswin = Tk.Tk()
12296
self.paramswin.title("Fonts: family, size and weight")
12297
12298
12299
def clearplot(self,idxGraph):
12300
12301
- print 'clearplot: idxGraph=', idxGraph
12302
+ print('clearplot: idxGraph=', idxGraph)
12303
ax = self.ax[idxGraph]
12304
ax.cla()
12305
#ax.clf()
12306
12307
total_val=total_val+ref4
12308
num_vals=num_vals+1
12309
mean_val=total_val/num_vals
12310
- print 'display record ref4='+str(ref4)
12311
+ print('display record ref4='+str(ref4))
12312
self.data_lr[i].add(t,ref4)
12313
#self.data_lr[i].add(t,mean_val)
12314
goodrecord=1
12315
12316
if goodrecord==0:
12317
- print 'bad record'
12318
+ print('bad record')
12319
return
12320
12321
def updategui3(self, params, rows):
12322
12323
total_val=0
12324
num_vals=0
12325
12326
- print '[PYCOOLR] Starting update gui'
12327
+ print('[PYCOOLR] Starting update gui')
12328
#if sample['node'] == params['targetnode'] and sample['sample'] == 'tau':
12329
#
12330
# data handling
12331
12332
12333
#print("Fetching rows.")
12334
rows[j] = self.conn.fetchall()
12335
- print 'row: ', rows[j]
12336
+ print('row: ', rows[j])
12337
if len(rows[j]) <= 0:
12338
- print("Error: query returned no rows.",)
12339
+ print(("Error: query returned no rows.",))
12340
else:
12341
goodrecord = 1
12342
12343
12344
try:
12345
ax.cla()
12346
except Exception as errCla:
12347
- print 'update_gui: Error cla(): ', type(errCla), errCla
12348
+ print('update_gui: Error cla(): ', type(errCla), errCla)
12349
12350
#ax.set_xlim([t-gxsec, t])
12351
#print 'get x and y'
12352
12353
try:
12354
ax.cla()
12355
except Exception as errCla:
12356
- print 'update_gui: Error cla(): ', type(errCla), errCla
12357
+ print('update_gui: Error cla(): ', type(errCla), errCla)
12358
12359
ax.set_xlim([pack_time-gxsec, pack_time])
12360
#print 'get x and y'
12361
12362
graphs = [None, None, None, None, None, None]
12363
axises = [None, None, None, None, None, None]
12364
12365
- print '[PYCOOLR] Starting update gui'
12366
+ print('[PYCOOLR] Starting update gui')
12367
#if sample['node'] == params['targetnode'] and sample['sample'] == 'tau':
12368
#
12369
# data handling
12370
12371
12372
#print("Fetching rows.")
12373
rows[j] = self.conn.fetchall()
12374
- print 'rows: ', rows[j]
12375
+ print('rows: ', rows[j])
12376
if len(rows[j]) <= 0:
12377
- print("Error: query returned no rows.",)
12378
+ print(("Error: query returned no rows.",))
12379
else:
12380
goodrecord = 1
12381
12382
12383
graph = {}
12384
12385
if newplot:
12386
- print 'newplot True'
12387
+ print('newplot True')
12388
axis = pl.subplot(1)
12389
axis.set_title("Time per iteration");
12390
graph[r] = (pl.plot(pack_time, metric_values, marker='*', linestyle='-', label=str(r))[0])
12391
12392
pl.ylabel("Seconds")
12393
pl.xlabel("Timestamp")
12394
else:
12395
- print 'newplot False'
12396
+ print('newplot False')
12397
#axis = pl.subplot(321)
12398
graph[r].set_data(pack_time, metric_values)
12399
axis.relim() # Recalculate limits
12400
12401
try:
12402
ax.cla()
12403
except Exception as errCla:
12404
- print 'update_gui: Error cla(): ', type(errCla), errCla
12405
+ print('update_gui: Error cla(): ', type(errCla), errCla)
12406
12407
ax.set_xlim([t-gxsec, t])
12408
#print 'get x and y'
12409
12410
12411
def subscribe(self,libarbjsonbeep):
12412
12413
- print 'start thread with Subscribe'
12414
+ print('start thread with Subscribe')
12415
12416
listargs = ['MEMORY','NODE_POWER_WATTS','MPI_T_PVAR']
12417
12418
12419
12420
def publish(self,libarbpubcvars):
12421
12422
- print 'start thread with Publish'
12423
+ print('start thread with Publish')
12424
12425
#listargs = ['MEMORY','NODE_POWER_WATTS','MPI_T_PVAR']
12426
12427
12428
payload += resultPayload[j]
12429
12430
payload.strip()
12431
- print 'payload =',payload
12432
+ print('payload =',payload)
12433
try:
12434
j = json.loads(payload)
12435
except ValueError as e:
12436
- print 'Failed to load json data: %s' %e
12437
+ print('Failed to load json data: %s' %e)
12438
continue
12439
#return False
12440
12441
12442
if 'node' not in e and\
12443
'sample' not in e and\
12444
'time' not in e:
12445
- print 'Ignore this invalid sample:', json.dumps(e)
12446
+ print('Ignore this invalid sample:', json.dumps(e))
12447
continue
12448
12449
#print 'set timestamp'
12450
12451
#print 'finished parsing listEvents'
12452
#draw to refresh plotting
12453
#layout.canvas.draw()
12454
- print 'draw canvas'
12455
+ print('draw canvas')
12456
try:
12457
self.canvas.draw()
12458
except Exception as errDraw:
12459
- print 'Error drawing canvas: ', type(errDraw), errDraw
12460
+ print('Error drawing canvas: ', type(errDraw), errDraw)
12461
#plt.draw()
12462
12463
profile_t7 = time.time()
12464
12465
all_rows = c.fetchall()
12466
ts = np.array([x[0] for x in all_rows])
12467
min_timestamp = ts[0]
12468
- print("min timestamp: ", min_timestamp)
12469
+ print(("min timestamp: ", min_timestamp))
12470
12471
def req_sql(self, c, ranks, ranks2, group_column, metric):
12472
- print 'req_sql entering'
12473
+ print('req_sql entering')
12474
for r in ranks:
12475
sql_statement = ("SELECT distinct tbldata.name, tblvals.val, tblvals.time_pack, tblpubs.comm_rank FROM tblvals INNER JOIN tbldata ON tblvals.guid = tbldata.guid INNER JOIN tblpubs ON tblpubs.guid = tbldata.pub_guid WHERE tblvals.guid IN (SELECT guid FROM tbldata WHERE tbldata.name LIKE '" + metric + "') AND tblpubs." + group_column)
12476
"""
12477
12478
sql_statement = (sql_statement + " like '" + r + "' and tblvals.val > 0 order by tblvals.time_pack;")
12479
12480
#params = [metric,r]
12481
- print "Executing query: ", sql_statement,
12482
+ print("Executing query: ", sql_statement, end=' ')
12483
self.try_execute(c, sql_statement)
12484
- print "Done. "
12485
+ print("Done. ")
12486
12487
#print("Fetching rows.")
12488
all_rows = c.fetchall()
12489
if len(all_rows) <= 0:
12490
- print("Error: query returned no rows.",)
12491
- print(sql_statement, params)
12492
+ print(("Error: query returned no rows.",))
12493
+ print((sql_statement, params))
12494
12495
#print("Making numpy array of: metric_values")
12496
metric_values = np.array([max(x[1],0) for x in all_rows])
12497
12498
while self.ranks.size == 0:
12499
time.sleep(1)
12500
self.ranks,self.procs = self.get_ranks(self.conn)
12501
- print ("ranks: ", self.ranks)
12502
+ print(("ranks: ", self.ranks))
12503
12504
# get the number of nodes
12505
self.nodes,self.noderanks = self.get_nodes(self.conn)
12506
while self.nodes.size == 0:
12507
time.sleep(1)
12508
nodes,self.noderanks = self.get_nodes(self.conn)
12509
- print ("nodes: ", self.nodes)
12510
+ print(("nodes: ", self.nodes))
12511
12512
self.get_min_timestamp(self.conn)
12513
#resize the figure
12514
12515
12516
def readsosmetrics(self,arguments):
12517
12518
- print 'readsosmetrics'
12519
+ print('readsosmetrics')
12520
profile_t1 = time.time()
12521
self.opendb(arguments)
12522
12523
- print 'after opening db, read db and plot ....'
12524
+ print('after opening db, read db and plot ....')
12525
12526
while True:
12527
12528
12529
if self.listRecordSample[i] != -1:
12530
j = self.listRecordSample[i]
12531
12532
- print 'readsosmetrics: i=%d, j=%d' %(i,j)
12533
+ print('readsosmetrics: i=%d, j=%d' %(i,j))
12534
12535
#rank = self.ranks[j]
12536
#rank2 = self.ranks2[j]
12537
12538
self.rows[j] = self.conn.fetchall()
12539
#print 'rows: ', self.rows[j]
12540
if len(self.rows[j]) <= 0:
12541
- print("Error: query returned no rows.",)
12542
+ print(("Error: query returned no rows.",))
12543
else:
12544
goodrecord = 1
12545
12546
12547
12548
def readEvents(self,libarbjsonbeep):
12549
12550
- print '[PYCOOLR] readEvents begin'
12551
+ print('[PYCOOLR] readEvents begin')
12552
12553
low_index = 0
12554
high_index = 0
12555
12556
try:
12557
j = json.loads(payload)
12558
except ValueError as e:
12559
- print 'Failed to load json data: %s' %e
12560
+ print('Failed to load json data: %s' %e)
12561
continue
12562
#return False
12563
12564
12565
if 'node' not in e and\
12566
'sample' not in e and\
12567
'time' not in e:
12568
- print 'Ignore this invalid sample:', json.dumps(e)
12569
+ print('Ignore this invalid sample:', json.dumps(e))
12570
continue
12571
12572
#print 'set timestamp'
12573
12574
12575
def subSpawn(self,arguments):
12576
12577
- print 'subSpawn: load beacon subscriber library'
12578
+ print('subSpawn: load beacon subscriber library')
12579
envlibpath = os.environ['PYCOOLR_LIBPATH']
12580
libarbjsonbeep = cdll.LoadLibrary(envlibpath+'/libarbitraryjsonbeepmulsub.so')
12581
12582
12583
12584
try:
12585
#thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
12586
- thread.start_new_thread(self.readsosmetrics,(arguments,))
12587
+ _thread.start_new_thread(self.readsosmetrics,(arguments,))
12588
12589
except Exception as errThread:
12590
- print 'Error: unable to start thread: ', errThread
12591
+ print('Error: unable to start thread: ', errThread)
12592
12593
self.refresh_plot()
12594
#self.readEvents(libarbjsonbeep)
12595
12596
#print "selection:", selection, ": '%s'" % value
12597
12598
listintselection = [int (i) for i in selection]
12599
- print 'listintselection: ', listintselection
12600
+ print('listintselection: ', listintselection)
12601
12602
for i in range(self.nbsamples):
12603
if (self.listSamplesAllocated[i] > -1) and (i not in listintselection):
12604
12605
if self.listSamplesAllocated[j] == -1:
12606
#index = int(j)
12607
self.listUsedGraphs[i] = j
12608
- print 'graph %d allocated to sample %d' % (i, j)
12609
+ print('graph %d allocated to sample %d' % (i, j))
12610
self.listRecordSample[i] = j
12611
self.listSamplesAllocated[j] = i
12612
break
12613
12614
# Mark current graph as used
12615
self.listUsedGraphs[j] = 1
12616
# Record the current graph as plotting the current sample
12617
- print 'Record Sample %d for graph %d' %(index,j)
12618
+ print('Record Sample %d for graph %d' %(index,j))
12619
self.listRecordSample[j] = index
12620
12621
# Mark current sample as allocated to the current graph
12622
12623
self.canvas.draw()
12624
#self.frame.update()
12625
except Exception as errDraw:
12626
- print 'refresh_plot: Error drawing canvas: ', type(errDraw), errDraw
12627
+ print('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw)
12628
self.lock.release()
12629
12630
self.root.after(1000,self.refresh_plot)
12631
12632
def updatebtn(self):
12633
- print 'update buttonupdate button'
12634
+ print('update buttonupdate button')
12635
try:
12636
self.canvas.draw()
12637
except Exception as errDraw:
12638
- print 'Error drawing canvas: ', type(errDraw), errDraw
12639
+ print('Error drawing canvas: ', type(errDraw), errDraw)
12640
12641
def checkfn(self, idx, text):
12642
- print 'checkfn'
12643
- print 'Check index=%d text=%s' % (idx,text)
12644
+ print('checkfn')
12645
+ print('Check index=%d text=%s' % (idx,text))
12646
#print 'Size of listbtnchecked[]= ', len(self.listbtnchecked)
12647
#self.listbtnchecked[idx] = 1
12648
12649
12650
# print 'nothing'
12651
12652
12653
-root = Tkinter.Tk()
12654
+root = tkinter.Tk()
12655
12656
app = Coolrsub(root,2,3)
12657
root.mainloop()
12658
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr.py (original)
12659
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/coolr.py (refactored)
12660
12661
#!/usr/bin/env python
12662
12663
#import sys, os, re, _thread, signal
12664
-import sys, os, re, thread, signal
12665
+import sys, os, re, _thread, signal
12666
#from cStringIO import StringIO
12667
from io import StringIO
12668
import subprocess
12669
12670
with open(cfgfn) as f:
12671
cfgtmp = json.load(f)
12672
# override if cfg defines any
12673
- for k in cfgtmp.keys():
12674
+ for k in list(cfgtmp.keys()):
12675
cfg[k] = cfgtmp[k]
12676
# override if specifed as cmd option
12677
- for k in ocfg.keys():
12678
+ for k in list(ocfg.keys()):
12679
cfg[k] = ocfg[k]
12680
12681
12682
12683
targetnode = os.environ['PYCOOLR_NODE']
12684
#targetnode = cfg['masternode']
12685
if len(enclaves) == 0:
12686
- if cfg.has_key('enclaves'):
12687
+ if 'enclaves' in cfg:
12688
enclaves = cfg['enclaves']
12689
12690
#print 'masternode:', cfg['masternode']
12691
-print('targetnode:', targetnode)
12692
-print('enclaves:', enclaves)
12693
+print(('targetnode:', targetnode))
12694
+print(('enclaves:', enclaves))
12695
12696
if len(appcfgfn) > 0:
12697
with open(appcfgfn) as f:
12698
appcfg = json.load(f)
12699
- for k in appcfg.keys():
12700
+ for k in list(appcfg.keys()):
12701
cfg[k] = appcfg[k]
12702
12703
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
12704
- print("Please double check %s: appname or appsamples tags" % appcfgfn)
12705
+ if not ('appname' in cfg and 'appsamples' in cfg):
12706
+ print(("Please double check %s: appname or appsamples tags" % appcfgfn))
12707
sys.exit(1)
12708
12709
12710
12711
try:
12712
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
12713
except:
12714
- print('unable to open', cfg["outputfn"])
12715
+ print(('unable to open', cfg["outputfn"]))
12716
12717
12718
#if not fakemode:
12719
12720
params['enclaves'] = enclaves
12721
12722
if sys.version_info[0] < 3:
12723
- import Tkinter
12724
+ import tkinter
12725
#from Tkinter import *
12726
- import tkFileDialog
12727
- import tkFont
12728
- from tkFont import Font
12729
+ import tkinter.filedialog
12730
+ import tkinter.font
12731
+ from tkinter.font import Font
12732
#from Tkinter.FileDialog import askopenfilename
12733
else:
12734
import tkinter
12735
12736
def __init__(self, master, row=2, col=3):
12737
12738
# Create a container
12739
- self.frame = Tkinter.Frame(master,width=200,height=100)
12740
+ self.frame = tkinter.Frame(master,width=200,height=100)
12741
# Create 2 buttons
12742
#self.button_left = Tkinter.Button(frame,text="< Decrease Slope",
12743
# command=self.decrease)
12744
12745
sql_statement = ("SELECT MAX(frame) FROM tblVals;")
12746
self.try_execute(self.conn, sql_statement)
12747
query_result = self.conn.fetchall()
12748
- print("query_result", query_result[0][0])
12749
+ print(("query_result", query_result[0][0]))
12750
if type(query_result[0][0]) == int:
12751
frame = int(query_result[0][0])
12752
- print("frame", frame)
12753
+ print(("frame", frame))
12754
self.metricsDB = ""
12755
#Get the metric's names
12756
#sql_statement = ("SELECT distinct(value_name), comm_rank FROM viewCombined where frame > 0 ORDER BY value_name, comm_rank;")
12757
12758
params['cfg']['units'] = ["KB" if (metric[0].find("KB") > -1) else "counts" for metric in self.metrics]
12759
params['cfg']['units'] = ["#Events" if (metric[0].find("NumEvents") > -1) else units for metric,units in zip(self.metrics,params['cfg']['units'])]
12760
print("---------------------------------------------------------------------------------------------------------")
12761
- print("self.nbsamples",self.nbsamples)
12762
+ print(("self.nbsamples",self.nbsamples))
12763
12764
if self.tool == "beacon":
12765
self.nbcvars = params['cfg']['nbcvars']
12766
12767
self.listFontFamily = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
12768
self.listFontWeight = ['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black']
12769
12770
- self.list_fonts = list( tkFont.families() )
12771
+ self.list_fonts = list( tkinter.font.families() )
12772
12773
self.selectedFontPolicy = None
12774
self.selectedFontSize = None
12775
self.selectedFontWeight = None
12776
12777
# create a custom font
12778
- self.customFont = tkFont.Font(family="Helvetica", size=12)
12779
+ self.customFont = tkinter.font.Font(family="Helvetica", size=12)
12780
12781
for idx in range(params['cfg']['nbgraphs']):
12782
self.listUsedGraphs.append(-1)
12783
12784
# selected_list.append(i)
12785
#print selected_list
12786
self.selected_list= [-1] * self.nbGraphs
12787
- print self.selected_list
12788
+ print(self.selected_list)
12789
12790
12791
12792
12793
try:
12794
root.config(menu=menubar)
12795
except AttributeError as attErr:
12796
- print('menu Exception: ', type(attErr), attErr)
12797
+ print(('menu Exception: ', type(attErr), attErr))
12798
12799
#self.winPvars()
12800
#self.winCvars()
12801
12802
else:
12803
c.execute(statement);
12804
except sqlite3.Error as e:
12805
- print("database error...", e.args[0])
12806
+ print(("database error...", e.args[0]))
12807
12808
def open_connection(self):
12809
global conn
12810
# check for file to exist
12811
#print ("Checking for file: ", sqlite_file)
12812
- print("Checking for file: ", self.sosdbfile)
12813
+ print(("Checking for file: ", self.sosdbfile))
12814
while not os.path.exists(self.sosdbfile):
12815
- print("Waiting on file: ", self.sosdbfile)
12816
+ print(("Waiting on file: ", self.sosdbfile))
12817
time.sleep(1)
12818
12819
#print("Connecting to: ", sqlite_file)
12820
- print("Connecting to: ", self.sosdbfile)
12821
+ print(("Connecting to: ", self.sosdbfile))
12822
# Connecting to the database file
12823
#conn = sqlite3.connect(sqlite_file)
12824
#fd = os.open(sqlite_file, os.O_RDONLY)
12825
12826
12827
def browsefontpolicy(self):
12828
print('browsefontpolicy')
12829
- fontpolicydiag = tkFileDialog.askopenfilename(filetypes=[("Text files","*.fft")])
12830
+ fontpolicydiag = tkinter.filedialog.askopenfilename(filetypes=[("Text files","*.fft")])
12831
12832
def onselectFontPolicy(self,evt):
12833
w = evt.widget
12834
selection = w.curselection()
12835
value = w.get(selection[0])
12836
self.selectedFontPolicy = value
12837
- print('select font: ', value)
12838
+ print(('select font: ', value))
12839
12840
def onselectFontSize(self, evt):
12841
print('select font size')
12842
12843
selection = w.curselection()
12844
value = w.get(selection[0])
12845
self.selectedFontSize = value
12846
- print('select font: ', value)
12847
+ print(('select font: ', value))
12848
12849
def onselectFontWeight(self, evt):
12850
print('select font weight')
12851
12852
selection = w.curselection()
12853
value = w.get(selection[0])
12854
self.selectedFontWeight = value
12855
- print('select font: ', value)
12856
+ print(('select font: ', value))
12857
12858
def loadFontPolicy(self):
12859
fontpolicydiag = askopenfilename(filetypes=(("*.fft")))
12860
12861
12862
#def winPvars(self):
12863
def select_metric(self, fig_num):
12864
- print "Selected metric for figure: " + str(fig_num)
12865
+ print("Selected metric for figure: " + str(fig_num))
12866
# this is the child window
12867
self.pvarswin = Tk.Tk()
12868
str_metric_title = "Select Metric: "+ str(fig_num)
12869
12870
12871
def clearplot(self,idxGraph):
12872
12873
- print('clearplot: idxGraph=', idxGraph)
12874
+ print(('clearplot: idxGraph=', idxGraph))
12875
ax = self.ax[idxGraph]
12876
ax.cla()
12877
#ax.clf()
12878
12879
total_val=total_val+ref4
12880
num_vals=num_vals+1
12881
mean_val=total_val/num_vals
12882
- print('display record ref4='+str(ref4))
12883
+ print(('display record ref4='+str(ref4)))
12884
self.data_lr[i].add(t,ref4)
12885
#self.data_lr[i].add(t,mean_val)
12886
goodrecord=1
12887
12888
try:
12889
ax.cla()
12890
except Exception as errCla:
12891
- print('update_gui: Error cla(): ', type(errCla), errCla)
12892
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
12893
12894
ax.set_xlim([t-gxsec, t])
12895
#print 'get x and y'
12896
12897
try:
12898
ax.cla()
12899
except Exception as errCla:
12900
- print('update_gui: Error cla(): ', type(errCla), errCla)
12901
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
12902
12903
ax.set_xlim([pack_time-gxsec, pack_time])
12904
#print 'get x and y'
12905
12906
try:
12907
ax.cla()
12908
except Exception as errCla:
12909
- print('update_gui: Error cla(): ', type(errCla), errCla)
12910
+ print(('update_gui: Error cla(): ', type(errCla), errCla))
12911
12912
ax.set_xlim([pack_time-gxsec, pack_time])
12913
#print 'get x and y'
12914
12915
try:
12916
j = json.loads(payload)
12917
except ValueError as e:
12918
- print('Failed to load json data: %s' %e)
12919
+ print(('Failed to load json data: %s' %e))
12920
continue
12921
#return False
12922
12923
12924
if 'node' not in e and\
12925
'sample' not in e and\
12926
'time' not in e:
12927
- print('Ignore this invalid sample:', json.dumps(e))
12928
+ print(('Ignore this invalid sample:', json.dumps(e)))
12929
continue
12930
12931
#print 'set timestamp'
12932
12933
try:
12934
self.canvas.draw()
12935
except Exception as errDraw:
12936
- print('Error drawing canvas: ', type(errDraw), errDraw)
12937
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
12938
#plt.draw()
12939
12940
profile_t7 = time.time()
12941
12942
all_rows = c.fetchall()
12943
ts = np.array([x[0] for x in all_rows])
12944
min_timestamp = ts[0]
12945
- print("min timestamp: ", min_timestamp)
12946
+ print(("min timestamp: ", min_timestamp))
12947
12948
12949
def get_min_timestamp(self):
12950
12951
sql_statement = ("SELECT min(time_pack) FROM viewCombined;")
12952
print("get_min_timestamp Executing query")
12953
12954
- print("sql statement: ", sql_statement)
12955
+ print(("sql statement: ", sql_statement))
12956
#self.try_execute(c, sql_statement)
12957
os.environ['SOS_SQL'] = sql_statement
12958
sos_bin_path = os.environ.get('SOS_BIN_DIR')
12959
- print('SOS BIN path: ', sos_bin_path)
12960
+ print(('SOS BIN path: ', sos_bin_path))
12961
os.system('cd '+ sos_bin_path)
12962
- print('current dir: ', os.getcwd())
12963
+ print(('current dir: ', os.getcwd()))
12964
# Redirect stdout of passed command into a string
12965
12966
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
12967
- print('soscmd: ', soscmd)
12968
+ print(('soscmd: ', soscmd))
12969
tmp_res_min_ts_sql = subprocess.check_output(soscmd, shell=True)
12970
12971
#self.res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
12972
- print('get min ts: tmp res sql=', tmp_res_min_ts_sql)
12973
+ print(('get min ts: tmp res sql=', tmp_res_min_ts_sql))
12974
res_min_ts_sql = tmp_res_min_ts_sql.splitlines()
12975
- print("List of result SQL MIN TS: ", res_min_ts_sql)
12976
+ print(("List of result SQL MIN TS: ", res_min_ts_sql))
12977
min_ts_rows = res_min_ts_sql[1].split(",")
12978
- print("List of result SQL MIN TS values: ", min_ts_rows)
12979
+ print(("List of result SQL MIN TS values: ", min_ts_rows))
12980
# Remove first element of SQL result
12981
#ts = np.array([x[0] for x in min_ts_rows])
12982
str_min_timestamp = min_ts_rows[0].replace('\"', '')
12983
12984
sql_statement = ("SELECT value_name, value, time_pack FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
12985
#sql_statement = ("SELECT * FROM viewCombined WHERE value_name LIKE '" + metric+ "'")
12986
12987
- print("sql statement: ", sql_statement )
12988
+ print(("sql statement: ", sql_statement ))
12989
#self.try_execute(c, sql_statement)
12990
os.environ['SOS_SQL'] = sql_statement
12991
sos_bin_path = os.environ.get('SOS_BIN_DIR')
12992
- print('SOS BIN path: ', sos_bin_path)
12993
+ print(('SOS BIN path: ', sos_bin_path))
12994
os.system('cd '+ sos_bin_path)
12995
- print('current dir: ', os.getcwd())
12996
+ print(('current dir: ', os.getcwd()))
12997
# Redirect stdout of passed command into a string
12998
12999
soscmd = sos_bin_path + "/demo_app_silent --sql SOS_SQL"
13000
- print('soscmd: ', soscmd)
13001
+ print(('soscmd: ', soscmd))
13002
tmp_res_sql = subprocess.check_output(soscmd, shell=True)
13003
13004
self.try_execute(c, sql_statement)
13005
13006
#print 'stdout of SOS demo: ', sys.stdout
13007
#self.res_sql = resultstdout.getvalue()
13008
- print('tmp res_sql: ', tmp_res_sql)
13009
+ print(('tmp res_sql: ', tmp_res_sql))
13010
13011
self.res_sql = tmp_res_sql.splitlines()
13012
# REmove first element of SQL result
13013
self.res_sql.pop(0)
13014
13015
for item_sql in self.res_sql:
13016
- print('res sql: ', item_sql)
13017
+ print(('res sql: ', item_sql))
13018
13019
13020
# Call demo with SQL statement given as argument and store standard output
13021
13022
print('SOS: Execute demo app')
13023
sos_path = os.environ.get('SOS_BUILD_DIR')
13024
self.sos_bin_path = sos_path+"/bin"
13025
- print('SOS BIN PATH: ', self.sos_bin_path)
13026
+ print(('SOS BIN PATH: ', self.sos_bin_path))
13027
os.system("cd "+ self.sos_bin_path)
13028
13029
13030
13031
if self.listRecordSample[i] != -1:
13032
j = self.listRecordSample[i]
13033
13034
- print('readsosmetrics: i=%d, j=%d' %(i,j))
13035
+ print(('readsosmetrics: i=%d, j=%d' %(i,j)))
13036
13037
#rank = self.ranks[j]
13038
#rank2 = self.ranks2[j]
13039
13040
self.rows[j] = self.conn.fetchall()
13041
#print 'rows: ', self.rows[j]
13042
if len(self.rows[j]) <= 0:
13043
- print("Error: query returned no rows.",)
13044
+ print(("Error: query returned no rows.",))
13045
else:
13046
goodrecord = 1
13047
13048
13049
13050
13051
payload.strip()
13052
- print('payload =',payload)
13053
+ print(('payload =',payload))
13054
try:
13055
j = json.loads(payload)
13056
except ValueError as e:
13057
- print('Failed to load json data: %s' %e)
13058
+ print(('Failed to load json data: %s' %e))
13059
continue
13060
#return False
13061
13062
13063
if 'node' not in e and\
13064
'sample' not in e and\
13065
'time' not in e:
13066
- print('Ignore this invalid sample:', json.dumps(e))
13067
+ print(('Ignore this invalid sample:', json.dumps(e)))
13068
continue
13069
13070
#print 'set timestamp'
13071
13072
print('subSpawn: load beacon subscriber library')
13073
envlibpath = os.environ['PYCOOLR_LIBPATH']
13074
libarbjsonbeep = cdll.LoadLibrary(envlibpath+'/libarbitraryjsonbeepmulsub.so')
13075
- thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
13076
- thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
13077
+ _thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
13078
+ _thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
13079
#thread.start_new_thread(self.readEvents,(libarbjsonbeep,))
13080
except Exception as errThread:
13081
- print("Error: unable to start thread: ", errThread)
13082
+ print(("Error: unable to start thread: ", errThread))
13083
13084
elif self.tool == "sos":
13085
try:
13086
#thread.start_new_thread(self.subscribe,(libarbjsonbeep,))
13087
- thread.start_new_thread(self.readsosmetrics,())
13088
+ _thread.start_new_thread(self.readsosmetrics,())
13089
#thread.start_new_thread(self.readsosmetrics_db,())
13090
13091
except Exception as errThread:
13092
- print('Error: unable to start thread: ', errThread)
13093
+ print(('Error: unable to start thread: ', errThread))
13094
13095
13096
self.refresh_plot()
13097
13098
13099
self.selectedcvarsvalues[i] = self.listcvarsentry[i].get()
13100
strcvarsvalues += self.selectedcvarsvalues[i]
13101
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
13102
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
13103
if i+1 < self.numselectedcvars:
13104
strcvarsmetrics += ","
13105
strcvarsvalues += ","
13106
13107
#self.strcvars += "="
13108
#self.strcvars += self.selectedcvarsvalues[i]
13109
#strcvars += ","
13110
- print('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i))
13111
+ print(('numselectedcvars=%d, index=%d' % (self.numselectedcvars, i)))
13112
if i+1 < self.numselectedcvars:
13113
strcvarsmetrics += ","
13114
strcvarsvalues += ","
13115
13116
#self.strcvars += ":"
13117
self.strcvars += strcvarsvalues
13118
13119
- print("strcvarsmetrics: ", strcvarsmetrics)
13120
- print("strcvarsvalues: ", strcvarsvalues)
13121
- print("strcvars: ", self.strcvars)
13122
+ print(("strcvarsmetrics: ", strcvarsmetrics))
13123
+ print(("strcvarsvalues: ", strcvarsvalues))
13124
+ print(("strcvars: ", self.strcvars))
13125
13126
# Test if we have to communicate MPI_T CVARS in a Publish/Subscribe mode
13127
if cvars_comm_mode == "pub":
13128
13129
13130
for i in range(len(selection)):
13131
value = w.get(selection[i])
13132
- print("selection:", selection, ": '%s'" % value)
13133
+ print(("selection:", selection, ": '%s'" % value))
13134
self.selectedcvarsmetrics[i] = value
13135
13136
if self.listlabelcvarsmetric:
13137
13138
self.listcvarsarrayindexentry = [None] * len(selection)
13139
self.listcvarsarrayindex = [None] * len(selection)
13140
13141
- print('selection: ', selection)
13142
- print('range selection: ', range(len(selection)))
13143
-
13144
- for cvaritem, cvarindex in zip(selection, range(len(selection))):
13145
+ print(('selection: ', selection))
13146
+ print(('range selection: ', list(range(len(selection)))))
13147
+
13148
+ for cvaritem, cvarindex in zip(selection, list(range(len(selection)))):
13149
13150
value = w.get(selection[cvarindex])
13151
- print('len selection: ', len(selection))
13152
- print('value of item %d: %s ' % (cvarindex, value))
13153
- print('cvaritem: ', cvaritem)
13154
- print('cvarindex= ', cvarindex)
13155
- print('cvarsindexrow= ', self.cvarsindexrow)
13156
-
13157
- print('cfg cvars array:', self.listcfgcvarsarray[0])
13158
+ print(('len selection: ', len(selection)))
13159
+ print(('value of item %d: %s ' % (cvarindex, value)))
13160
+ print(('cvaritem: ', cvaritem))
13161
+ print(('cvarindex= ', cvarindex))
13162
+ print(('cvarsindexrow= ', self.cvarsindexrow))
13163
+
13164
+ print(('cfg cvars array:', self.listcfgcvarsarray[0]))
13165
if value == self.listcfgcvarsarray[0]:
13166
13167
self.listlabelcvarsmetric[cvarindex]=Tk.Label(self.stepCvarsUpdate, text=value)
13168
13169
#print "selection:", selection, ": '%s'" % value
13170
13171
listintselection = [int (i) for i in selection]
13172
- print('listintselection: ', listintselection)
13173
+ print(('listintselection: ', listintselection))
13174
#print('listSamplesAllocated: ', self.listSamplesAllocated)
13175
#print('nbsamples', self.nbsamples)
13176
#print('len(self.listSamplesAllocated)', len(self.listSamplesAllocated))
13177
13178
if self.listSamplesAllocated[fig_num] == -1:
13179
#index = int(j)
13180
self.listUsedGraphs[fig_num] = listintselection[fig_num]
13181
- print('graph %d allocated to sample %d' % (fig_num, listintselection[fig_num]))
13182
+ print(('graph %d allocated to sample %d' % (fig_num, listintselection[fig_num])))
13183
self.listRecordSample[fig_num] = listintselection[fig_num]
13184
self.listSamplesAllocated[listintselection[fig_num]] = fig_num
13185
13186
13187
# Mark current graph as used
13188
self.listUsedGraphs[j] = 1
13189
# Record the current graph as plotting the current sample
13190
- print('Record Sample %d for graph %d' %(index,j))
13191
+ print(('Record Sample %d for graph %d' %(index,j)))
13192
self.listRecordSample[j] = index
13193
13194
# Mark current sample as allocated to the current graph
13195
13196
self.canvas.draw()
13197
#self.frame.update()
13198
except Exception as errDraw:
13199
- print('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw)
13200
+ print(('refresh_plot: Error drawing canvas: ', type(errDraw), errDraw))
13201
self.lock.release()
13202
13203
self.root.after(1000,self.refresh_plot)
13204
13205
try:
13206
self.canvas.draw()
13207
except Exception as errDraw:
13208
- print('Error drawing canvas: ', type(errDraw), errDraw)
13209
+ print(('Error drawing canvas: ', type(errDraw), errDraw))
13210
13211
def checkfn(self, idx, text):
13212
print('checkfn')
13213
- print('Check index=%d text=%s' % (idx,text))
13214
+ print(('Check index=%d text=%s' % (idx,text)))
13215
#print 'Size of listbtnchecked[]= ', len(self.listbtnchecked)
13216
#self.listbtnchecked[idx] = 1
13217
13218
13219
# print 'nothing'
13220
13221
13222
-root = Tkinter.Tk()
13223
+root = tkinter.Tk()
13224
13225
app = Coolrsub(root,2,3)
13226
root.mainloop()
13227
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/init_coolr.py (original)
13228
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/init_coolr.py (refactored)
13229
13230
try:
13231
opts, args = getopt.getopt(sys.argv[1:],
13232
shortopt, longopt)
13233
-except getopt.GetoptError, err:
13234
- print err
13235
+except getopt.GetoptError as err:
13236
+ print(err)
13237
usage()
13238
sys.exit(1)
13239
13240
13241
elif o in ("--ncols"):
13242
ocfg["ncols"]=int(a)
13243
elif o in ("--list"):
13244
- print ''
13245
- print '[available graph modules]'
13246
- print ''
13247
+ print('')
13248
+ print('[available graph modules]')
13249
+ print('')
13250
for i in cfg["modnames"]:
13251
- print i
13252
- print ''
13253
- print ''
13254
+ print(i)
13255
+ print('')
13256
+ print('')
13257
sys.exit(0)
13258
elif o in ("--mods"):
13259
ocfg["modnames"] = a.split(",")
13260
13261
if len(args) < 1:
13262
- print ''
13263
- print 'No config file is specified. Enabled the fake mode.'
13264
- print ''
13265
+ print('')
13266
+ print('No config file is specified. Enabled the fake mode.')
13267
+ print('')
13268
cfg["masternode"] = "frontend"
13269
cfg["drawexternal"] = "no"
13270
cfg["drawacpipwr"] = "no"
13271
13272
with open(cfgfn) as f:
13273
cfgtmp = json.load(f)
13274
# override if cfg defines any
13275
- for k in cfgtmp.keys():
13276
+ for k in list(cfgtmp.keys()):
13277
cfg[k] = cfgtmp[k]
13278
# override if specifed as cmd option
13279
- for k in ocfg.keys():
13280
+ for k in list(ocfg.keys()):
13281
cfg[k] = ocfg[k]
13282
13283
if len(targetnode) == 0 :
13284
targetnode = cfg['masternode']
13285
if len(enclaves) == 0:
13286
- if cfg.has_key('enclaves'):
13287
+ if 'enclaves' in cfg:
13288
enclaves = cfg['enclaves']
13289
13290
-print 'masternode:', cfg['masternode']
13291
-print 'targetnode:', targetnode
13292
-print 'enclaves:', enclaves
13293
+print('masternode:', cfg['masternode'])
13294
+print('targetnode:', targetnode)
13295
+print('enclaves:', enclaves)
13296
13297
if len(appcfgfn) > 0:
13298
with open(appcfgfn) as f:
13299
appcfg = json.load(f)
13300
- for k in appcfg.keys():
13301
+ for k in list(appcfg.keys()):
13302
cfg[k] = appcfg[k]
13303
13304
- if not (cfg.has_key('appname') and cfg.has_key('appsamples')):
13305
- print "Please double check %s: appname or appsamples tags" % appcfgfn
13306
+ if not ('appname' in cfg and 'appsamples' in cfg):
13307
+ print("Please double check %s: appname or appsamples tags" % appcfgfn)
13308
sys.exit(1)
13309
13310
13311
13312
try:
13313
logf = open(cfg["outputfn"], 'w', 0) # unbuffered write
13314
except:
13315
- print 'unable to open', cfg["outputfn"]
13316
+ print('unable to open', cfg["outputfn"])
13317
13318
-print >>logf, json.dumps(info)
13319
+print(json.dumps(info), file=logf)
13320
13321
#if not fakemode:
13322
# querycmds = cfg['querycmds']
13323
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/layout.py (original)
13324
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/layout.py (refactored)
13325
13326
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
13327
13328
if sys.version_info[0] < 3:
13329
- import Tkinter as Tk
13330
+ import tkinter as Tk
13331
else:
13332
import tkinter as Tk
13333
13334
--- a/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/listrotate.py (original)
13335
--- b/tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/listrotate.py (refactored)
13336
13337
lr = listrotate2D(5)
13338
for i in range(8):
13339
lr.add(i,i)
13340
- print lr.getlistx()
13341
- print lr.getlisty()
13342
- print lr.getlistr()
13343
- print
13344
+ print(lr.getlistx())
13345
+ print(lr.getlisty())
13346
+ print(lr.getlistr())
13347
+ print()
13348
13349
- print '------------'
13350
+ print('------------')
13351
lr = listrotate1D(5)
13352
for i in range(8):
13353
lr.add(i)
13354
- print lr.getlist()
13355
- print lr.getlistr()
13356
- print
13357
+ print(lr.getlist())
13358
+ print(lr.getlistr())
13359
+ print()
13360
--- a/tools/src/tau_portal/bin/tau_portal.py (original)
13361
--- b/tools/src/tau_portal/bin/tau_portal.py (refactored)
13362
13363
#!/usr/bin/env python
13364
-import httplib, urllib, fileinput, sys, getpass, platform
13365
+import http.client, urllib.request, urllib.parse, urllib.error, fileinput, sys, getpass, platform
13366
from optparse import OptionParser
13367
#import perfdmf and portal modules
13368
import portal
13369
13370
# sys.exit(2)
13371
13372
if len(sys.argv[1:]) < 1:
13373
- print usage()
13374
+ print(usage())
13375
sys.exit(-1)
13376
13377
13378
13379
parser.parse_args(sys.argv[1:])
13380
13381
if (options.username == ""):
13382
- print "TAU Portal Username: ",
13383
+ print("TAU Portal Username: ", end=' ')
13384
options.username = sys.stdin.readline().strip()
13385
if (options.password == ""):
13386
options.password = getpass.getpass("TAU Portal Password: ")
13387
if (options.workspace == "" and not sys.argv[1] in ["list_workspaces", "work"]):
13388
- print "TAU Portal Workspace: ",
13389
+ print("TAU Portal Workspace: ", end=' ')
13390
options.workspace = sys.stdin.readline().strip()
13391
13392
#print options, args
13393
13394
trial_list = []
13395
for trial in args:
13396
trial_list.append(open(trial, 'r'))
13397
- print portal.upload(options.username, options.password, options.workspace,
13398
- options.experiment, trial_list, options.host, options.summary)
13399
+ print(portal.upload(options.username, options.password, options.workspace,
13400
+ options.experiment, trial_list, options.host, options.summary))
13401
#print "upload"
13402
elif (sys.argv[1] in ["download", "down"]):
13403
file = portal.download(options.username, options.password,
13404
options.workspace, options.experiment, args[0], options.host)
13405
if (file.startswith("TAU Portal")):
13406
- print file
13407
+ print(file)
13408
else:
13409
- print "TAU PORTAL download is a success.\n"
13410
+ print("TAU PORTAL download is a success.\n")
13411
name = args[0] + ".ppk"
13412
filewriter = open(name, 'w')
13413
filewriter.write(file)
13414
13415
#print "sync"
13416
elif (sys.argv[1] in ["list_workspaces", "work"]):
13417
for workspace in portal.get_workspaces(options.username, options.password):
13418
- print workspace + ",",
13419
+ print(workspace + ",", end=' ')
13420
elif (sys.argv[1] in ["list_trials", "trial"]):
13421
trials = portal.get_trials(options.username, options.password,
13422
options.workspace, options.experiment, options.host)
13423
if (not trials is None):
13424
- print ", ".join(trials)
13425
+ print(", ".join(trials))
13426
#print portal.get_trials(options.username, options.password,
13427
#options.workspace, options.host)
13428
else:
13429
- print "Command : " + sys.argv[1] + " unknown."
13430
+ print("Command : " + sys.argv[1] + " unknown.")
13431
13432
main()
13433