diff scripts/overhead_result_calc.py @ 4:ef2b8d975a99

exec time vs task size data generation and calculation splited in two scripts
author Merten Sach <msach@mailbox.tu-berlin.de>
date Mon, 12 Dec 2011 20:26:28 +0100
parents
children
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/scripts/overhead_result_calc.py	Mon Dec 12 20:26:28 2011 +0100
     1.3 @@ -0,0 +1,111 @@
     1.4 +#! /usr/bin/env python
     1.5 +# -*- coding: utf-8 -*-
     1.6 +
     1.7 +import sys
     1.8 +from re import search
     1.9 +import datetime
    1.10 +from os.path import basename
    1.11 +
    1.12 +"""
    1.13 +This script generates a graph that represents the overhead
    1.14 +involved in synchronisation operations
    1.15 +"""
    1.16 +
    1.17 +usage="""
    1.18 +	This generates an output file for each value of number-of-threads that give the number of all runs.
    1.19 +	As an input it expects a result file which is generated by the overhead_result_calc.py script.
    1.20 +	The file extension of the result file has to be ".meas".
    1.21 +	It is expected that the output directory's path is meaningful, such as machine-name, date, and so on
    1.22 +	Usage:
    1.23 +		%s [result files] [path to output dir]
    1.24 +""" % sys.argv[0]
    1.25 +
    1.26 +if len(sys.argv) < 3:
    1.27 +	print usage
    1.28 +	sys.exit(0)
    1.29 +
    1.30 +result_filenames = sys.argv[1:-1]
    1.31 +output_dir_path = sys.argv[-1]
    1.32 +
    1.33 +for result_filename in result_filenames:
    1.34 +	#open input file
    1.35 +	try:
    1.36 +		result_file = open(result_filename,"r")
    1.37 +	except:
    1.38 +		print "Cannot open result file %s" % result_filename
    1.39 +		sys.exit(1)
    1.40 +	
    1.41 +	#parse(evaluate) result file
    1.42 +	try:
    1.43 +		exec(result_file.read())
    1.44 +	except:
    1.45 +		print "Cannot parse result file: %s" % result_filename
    1.46 +		result_file.close()
    1.47 +		sys.exit(1)
    1.48 +	result_file.close()
    1.49 +	
    1.50 +	#check for file extension
    1.51 +	result_filename = basename(result_filename)
    1.52 +	if search("\.meas$",result_filename) == None:
    1.53 +		print "Wrong file extension! Has to be '.meas'"
    1.54 +		sys.exit(1)
    1.55 +	
    1.56 +	output = output_dir_path + "/" + result_filename.replace(".meas",".result")
    1.57 +	#open gnuplot output
    1.58 +	try:
    1.59 +		gnuplot_output = open(output,"w")
    1.60 +	except IOError:
    1.61 +		print "Cannot open output file %s" % output
    1.62 +		result_file.close()
    1.63 +		sys.exit(1)
    1.64 +
    1.65 +	table_header = "# %20s\t%20s\t%20s\t%20s\t%20s\t%20s\t%20s\t%20s\n" % (
    1.66 +							 "<iters per task>",
    1.67 +							 "<total exe cycles>",
    1.68 +							 "<total work cyc>",
    1.69 +							 "<one task cyc>",
    1.70 +							 "<total overhead cyc>",
    1.71 +							 "<num syncs>",
    1.72 +							 "<overhead per Sync cyc>",
    1.73 +							 "<Exe/Work ratio>")
    1.74 +
    1.75 +	#write header to file
    1.76 +	gnuplot_output.writelines(["# Output file name: %s\n" % data_filename,
    1.77 +							"# Date of Run: %s\n" % date_of_run,
    1.78 +							"# Number of Cores: %d\n" % NUM_CORES,
    1.79 +							"# Number of Threads: %f per Core, %d total\n" % (threads_per_core, totalThreads),
    1.80 +							table_header,
    1.81 +							"# " + (len(table_header)-3)*"-" + "\n"])
    1.82 +
    1.83 +	#Now print the results out
    1.84 +	for workload_iterations_in_task in ITERS_PER_TASK_TABLE:
    1.85 +		results = array_of_results[workload_iterations_in_task]
    1.86 +
    1.87 +		#take shortest run
    1.88 +		results.sort(lambda x,y: cmp(x["total_exe_cycles"],y["total_exe_cycles"]))
    1.89 +		total_workcycles = results[0]["total_workcycles"]
    1.90 +		total_exe_cycles  = results[0]["total_exe_cycles"]
    1.91 +		#exeCycles_workCycles_ratio = results[0]["exeCycles_workCycles_ratio"]
    1.92 +
    1.93 +		#Calculate numbers
    1.94 +		overhead             = total_exe_cycles - total_workcycles
    1.95 +		total_syncs          = totalThreads * TASKS_PER_THREAD * 2
    1.96 +		overhead_per_sync    = float(overhead) / float(total_syncs)
    1.97 +		cycles_of_task       = float(total_workcycles) / float(TASKS_PER_THREAD * totalThreads)
    1.98 +		overhead_per_core    = float(overhead) / NUM_CORES
    1.99 +		workcycles_per_core  = total_workcycles / NUM_CORES
   1.100 +	
   1.101 +		# The 2 is in there because we have two sync operations in one per outer iteration
   1.102 +		exeCycles_workCycles_ratio = float(total_workcycles+float(overhead)/2)/float(total_workcycles)
   1.103 +
   1.104 +		gnuplot_output.write("%20d\t%20d\t%20d\t%20f\t%20d\t%20d\t%20f\t%20f\n" % (
   1.105 +						  workload_iterations_in_task,
   1.106 +						  total_exe_cycles,
   1.107 +						  total_workcycles,
   1.108 +						  cycles_of_task,
   1.109 +						  overhead,
   1.110 +						  total_syncs,
   1.111 +						  overhead_per_sync,
   1.112 +						  exeCycles_workCycles_ratio))
   1.113 +
   1.114 +	gnuplot_output.close();