#! /usr/bin/env python
# -*- coding: utf-8 -*-

import sys
from re import match, search
from pprint import pformat
from datetime import datetime
from subprocess import call,Popen,PIPE

"""
This script generates a graph that represents the overhead

involved in synchronisation operations
This script generates results for 5 runs per threads and iteration in
TOTAL_THREADS_TABLE and ITER_PER_TASK_TABLE
"""

usage="""
	This runs the exec time vs task size in three levels of loop nest.  The outer most iterates through 
	a selection of numbers-of-thread.  For each of those, the next lever iterates over a number of work-loops-per-task
	values.  The innermost repeats several times and chooses the best.
	Finally, it generates an output file for each value of number-of-threads that give the number of all runs.
	It is expected that the output directory's path is meaningful, such as machine-name, date, and so on
	Usage:
		%s [executable binary] [path to output dir]
""" % sys.argv[0]

NUM_CORES = 4 #Number of Cores the code was compiled for
ITERS_PER_TASK_TABLE = [2, 5, 10, 20, 40, 80, 160, 320, 640] #Number of iterations of inner loop
TASKS_PER_THREAD = 30000 #Number of interations of outer loop 
TOTAL_THREADS_TABLE = [8, 32, 128, 512]

def getNumber(line):
	match_obj = search("(\d+\.?\d*)", line)
	if match_obj != None:
		return match_obj.groups()[0]
	else:
		raise ValueError

if len(sys.argv) != 3:
	print usage
	sys.exit(0)
    
cmd=sys.argv[1]
try:
	f = open(cmd)
except IOError:
	print "Please provide a valid executable."
	f.close()
	sys.exit(1)
finally:
	f.close()

output_dir_path = sys.argv[2]

#===================================================================
#  Done with parsing cmd line inputs, start doing the runs 
#

for totalThreads in TOTAL_THREADS_TABLE:
	print "\nDoing run with %d threads" % totalThreads
	output = "%s/%d_thds__o%d__perfCtrs.meas" % (output_dir_path, totalThreads, TASKS_PER_THREAD)
	print "output file: %s" % output
	threadsPerCore = totalThreads/NUM_CORES
	array_of_results = {}
	for workload_iterations_in_task in ITERS_PER_TASK_TABLE:
		print "Run for %s workload iterations in a task" % workload_iterations_in_task
		results = []
		for run in range(5):
			print "Run %d" % run,
			program_output = Popen("%s -t %d -i %d -o %d" % (cmd,
												totalThreads,
												workload_iterations_in_task,
												TASKS_PER_THREAD),
								stdout=PIPE, stderr=None, shell=True).stdout.read()
			#parse arguments for
			for line in program_output.split("\n"):
				if match("^Sum across threads of work cycles:", line) != None:
					total_workcycles = int(getNumber(line))
				if match("^Total Execution Cycles:", line) != None:
					total_exe_cycles = int(getNumber(line))
				if match("^ExeCycles/WorkCycles Ratio", line) != None:
					exeCycles_workCycles_ratio = float(getNumber(line))
			results.append({"total_workcycles"            : total_workcycles,
						"total_exe_cycles"            : total_exe_cycles,
						"exeCycles_workCycles_ratio" : exeCycles_workCycles_ratio})
			print "ratio %f" % exeCycles_workCycles_ratio
		array_of_results[workload_iterations_in_task] = results
		
	#open file to output data
	try:
		data_output = open(output,"w")
	except IOError:
		print "Cannot open output file %s" % output
		sys.exit(1)
	
	#output relevant data to file
	data_output.write(";\n".join(["# This is a output of the overhead_data_generation.py script, run the overhead_result_calc.py script to get the calculated results",
					  "data_filename = " + pformat(output),
					  "NUM_CORES = " + pformat(NUM_CORES),					  
					  "ITERS_PER_TASK_TABLE = " + pformat(ITERS_PER_TASK_TABLE),
					  "TASKS_PER_THREAD = " + pformat(TASKS_PER_THREAD),
					  "date_of_run = " + pformat(datetime.now()),
					  "threads_per_core = " + pformat(threadsPerCore),
					  "totalThreads = " + pformat(totalThreads),
					  "# array_of_results: hash key is the number of iterations per task(inner iterations)",
					  "array_of_results = " + pformat(array_of_results)]))
	
							
	data_output.close()

