diff --git a/src/entrypoints/ASPIS/Experiments/Experiment.py b/src/entrypoints/ASPIS/Experiments/Experiment.py index 03c1eae286fb2b6d2e38ad8e9518f350a8f2bc70..070fdfbe748a50000207aee347ed0f2b73d9f492 100644 --- a/src/entrypoints/ASPIS/Experiments/Experiment.py +++ b/src/entrypoints/ASPIS/Experiments/Experiment.py @@ -9,10 +9,10 @@ import os import subprocess as sp nMaxIterations = 10 -maxInjectionTime = 1 +maxInjectionTime = 0.2 maxTestTime = 10 nSeeds = 5000 -nExperiments = 33333 +nExperiments = 50000 main_stack_start = 0x20000000 main_stack_size = 512 stack_size = 1*2**10 @@ -296,6 +296,7 @@ def makeExperiments(nExperiments, seeds): injTime = rn.random() * maxInjectionTime sleep(injTime) ip, mem, value = inject(s) + # ip, mem, value = [0, 0, 1] # Here we check where we end up (endExperiment, fault handlers, start of main due to reboot, stuck in an infinite loop) msg = s.recvuntil(b"(gdb)", timeout=maxTestTime) @@ -359,7 +360,6 @@ def makeExperiments(nExperiments, seeds): - 1: Folder for openocd.cfg, main-entry.elf and main-entry.bin """ if __name__ == "__main__": - print(sys.argv) # Extract paths for all needed folderName = sys.argv[1] needToFlash = False @@ -406,7 +406,7 @@ if __name__ == "__main__": sleep(0.5) s.clean() - ### GOLDEN + ### Create GOLDEN runs seeds = createGoldenRuns() @@ -455,8 +455,7 @@ if __name__ == "__main__": print(range(start_addr, start_addr + size_databss)) injectable_addresses.addi(start_addr, start_addr + size_databss) - print("before: ", injectable_addresses) - # nMaxIterations optimized out + # Removing non injectable ranges injectable_addresses.chop(z1_start - 1, z1_start + 4 + 1) injectable_addresses.chop(z2_start - 1, z2_start + 4 + 1) injectable_addresses.chop(z3_start - 1, z3_start + 4 + 1) @@ -475,11 +474,11 @@ if __name__ == "__main__": injectable_addresses.chop(crc_experiments_jpeg_compress_start - 1, crc_experiments_jpeg_compress_start + ExperimentData_size + 1) injectable_addresses.chop(crc_experiments_latnav_start - 1, crc_experiments_latnav_start + ExperimentData_size + 1) injectable_addresses.chop(crcTable_start - 1, crcTable_start + 256*2 + 1) - injectable_addresses.merge_overlaps() print("after: ", injectable_addresses) + # Get a list of ranges to simplify injection injectable_ranges = [range(r[0], r[1]) for r in injectable_addresses.items()] makeExperiments(nExperiments, seeds) diff --git a/src/entrypoints/ASPIS/Experiments/Postprocess.py b/src/entrypoints/ASPIS/Experiments/Postprocess.py index 49222554031811ecf59fb5fa94bf4f70777c5dac..8aedaa184354822ed32e10062d9a2360f1ba7b27 100644 --- a/src/entrypoints/ASPIS/Experiments/Postprocess.py +++ b/src/entrypoints/ASPIS/Experiments/Postprocess.py @@ -11,7 +11,14 @@ import os import subprocess as sp import matplotlib.pyplot as plt -baseFolder = "." + +createPlots = False + + +# baseFolder = "./ExperimentsBenchmark/Experiments_Time_2025_02_08" +baseFolder = "./ExperimentsBenchmark/Experiments_CRC_2025_02_19" +# baseFolder = "." + # Log IP, address/register injected and value injected @@ -22,33 +29,34 @@ class FailureMode(Enum): > 2 = silent data corruption (undetected and crc correct) > 3 = data corruption handler > 4 = signature mismatch handler - > 5 = hardware fault (reboot) + > 5 = HW fault (reboot) > 6 = infinite loop > 7 = other reason """ GOLDEN = 0, SDC = 1, - NO_EFFECT = 2, - DATA_HANDLER = 3, - SIG_HANDLER = 4, - HARDWARE = 5, + NE = 2, + DH = 3, + SH = 4, + HW = 5, LOOP = 6, OTHER = 7 + def fromFailurModeEnumToStr(FM): if FM == FailureMode.GOLDEN: return "GOLDEN" - elif FM == FailureMode.NO_EFFECT: - return "NO_EFFECT" elif FM == FailureMode.SDC: return "SDC" - elif FM == FailureMode.DATA_HANDLER: - return "DATA_HANDLER" - elif FM == FailureMode.SIG_HANDLER: - return "SIG_HANDLER" - elif FM == FailureMode.HARDWARE: - return "HARDWARE" + elif FM == FailureMode.NE: + return "NE" + elif FM == FailureMode.DH: + return "DH" + elif FM == FailureMode.SH: + return "SH" + elif FM == FailureMode.HW: + return "HW" elif FM == FailureMode.LOOP: return "LOOP" elif FM == FailureMode.OTHER: @@ -88,7 +96,8 @@ def calculate_bounds(data, z_thresh=100): return (None, median + const) -def outlier_aware_hist(data, ax, name, lower=None, upper=None): +def outlier_aware_hist(data, ax, name, lower=None, upper=None, color=None): + name = name.split('/')[-1] if not lower or lower < min(data): lower = min(data) lower_outliers = False @@ -101,32 +110,38 @@ def outlier_aware_hist(data, ax, name, lower=None, upper=None): else: upper_outliers = True - n, bins, patches = ax.hist(data, range=(lower, upper), bins='auto') + n, bins, patches = ax.hist(data, range=(lower, upper), bins='auto', label=name, color=color) + + if lower_outliers: + n_lower_outliers = (data < lower).sum() + patches[0].set_height(patches[0].get_height() + n_lower_outliers) + patches[0].set_facecolor('c') + patches[0].set_label('{} lower outliers: ({}, {})'.format(name, n_lower_outliers, max(patches[0]))) + patches[1].set_label('{}'.format(name)) + else: + patches[0].set_label('{}'.format(name)) - # if lower_outliers: - # n_lower_outliers = (data < lower).sum() - # patches[0].set_height(patches[0].get_height() + n_lower_outliers) - # patches[0].set_facecolor('c') - # patches[0].set_label('{} lower outliers: ({}, {})'.format(name, n_lower_outliers, max(patches[0]))) - # patches[1].set_label('{}'.format(name)) - # else: - # patches[0].set_label('{}'.format(name)) + p = 0.5 + if name.find("noProt") != -1: + p = 0.25 + elif name.find("reddi") != -1: + p = 0.75 - n_upper_outliers = (data > upper).sum() - patches[-1].set_label('{} ({} outliers, {:.2f} s)'.format(name, n_upper_outliers, sum([d for d in data if d > upper]) / 1e9)) + ax.axvline(mean(data), linestyle='dashed', linewidth=1, color=color) + min_ylim, max_ylim = ax.get_ylim() + ax.text(mean(data), max_ylim*p, 'Mean: {:.2f}'.format(mean(data)), size='small') + # n_upper_outliers = (data > upper).sum() + # patches[-1].set_label('{} ({} outliers, {:.2f} s)'.format(name, n_upper_outliers, sum([d for d in data if d > upper]) / 1e9)) # if upper_outliers: # patches[-1].set_height(patches[-1].get_height() + n_upper_outliers) # patches[-1].set_facecolor('m') # # patches[-1].set_label('{} upper outliers: ({}, {})'.format(name, n_upper_outliers, sum([d for d in data if d > upper]))) - - if lower_outliers or upper_outliers: - ax.legend() - + # ax.legend(loc="upper center", bbox_to_anchor=(1, 0.5), fancybox=True) -def postprocess_runs(folder, axs): +def postprocess_runs(folder, axsList, axTotList, createPlots, color): """ Loads in a structure the previous run with given seed and name: - name: the name of the file @@ -140,23 +155,24 @@ def postprocess_runs(folder, axs): dataFile = [f for f in files if f.endswith(".pkl")] outcomes = { - "NO_EFFECT":0, - "DATA_HANDLER" : 0, - "SIG_HANDLER" : 0, - "HARDWARE" : 0, - "LOOP" : 0, - "OTHER" : 0, + "GOLDEN":0, "SDC" : 0, + "NE" : 0, + "DH" : 0, + "SH" : 0, + "HW" : 0, + "LOOP" : 0, + "OTHER":0, "CRITICAL_CORRUPTION" : 0, "NON_CRITICAL_CORRUPTION" : 0, # Critical "acas_corruption":0, + "ann_corruption":0, "jpeg_compress_corruption":0, - "latnav_corruption":0, # Non critical - "ann_corruption":0, "canny_corruption":0, "img_scaling_corruption":0, + "latnav_corruption":0, "times_acas":[], "times_jpeg_compress":[], "times_latnav":[], @@ -169,6 +185,7 @@ def postprocess_runs(folder, axs): for file in dataFile: nFile += 1 crc_corruption = 0 + double_fail = False print(f"{nFile}/{len(dataFile)}", end='\r') with open(folder + "/" + file, 'rb') as f: @@ -184,37 +201,65 @@ def postprocess_runs(folder, axs): # critical if sum([golden_acas[i][1] != data["run_acas"][i][1] for i in range(min(len(golden_acas), len(data["run_acas"])))]) > 0: outcomes["acas_corruption"] += 1 - critical_corruption = True x = [golden_acas[i][1] ^ data["run_acas"][i][1] for i in range(min(len(golden_acas), len(data["run_acas"])))] - # print(sum([(el & (el-1) == 0) and el != 0 for el in x]), end='\t') + if(critical_corruption or non_critical_corruption): + double_fail = True + critical_corruption = True + if sum([golden_ann[i][1] != data["run_ann"][i][1] for i in range(min(len(golden_ann), len(data["run_ann"])))]) > 0: outcomes["ann_corruption"] += 1 - critical_corruption = True x = [golden_ann[i][1] ^ data["run_ann"][i][1] for i in range(min(len(golden_ann), len(data["run_ann"])))] - # print(sum([(el & (el-1) == 0) and el != 0 for el in x]), end='\t') + if(critical_corruption or non_critical_corruption): + double_fail = True + critical_corruption = True + if sum([golden_jpeg_compress[i][1] != data["run_jpeg_compress"][i][1] for i in range(min(len(golden_jpeg_compress), len(data["run_jpeg_compress"])))]) > 0: outcomes["jpeg_compress_corruption"] += 1 - critical_corruption = True x = [golden_jpeg_compress[i][1] ^ data["run_jpeg_compress"][i][1] for i in range(min(len(golden_jpeg_compress), len(data["run_jpeg_compress"])))] - # print(sum([(el & (el-1) == 0) and el != 0 for el in x]), end='\t') + if(critical_corruption or non_critical_corruption): + double_fail = True + critical_corruption = True + # non critical if sum([golden_canny[i][1] != data["run_canny"][i][1] for i in range(min(len(golden_canny), len(data["run_canny"])))]) > 0: outcomes["canny_corruption"] += 1 - non_critical_corruption = True x = [golden_canny[i][1] ^ data["run_canny"][i][1] for i in range(min(len(golden_canny), len(data["run_canny"])))] - # print(sum([(el & (el-1) == 0) and el != 0 for el in x]), end='\t') + if(critical_corruption or non_critical_corruption): + double_fail = True + non_critical_corruption = True + if sum([golden_img_scaling[i][1] != data["run_img_scaling"][i][1] for i in range(min(len(golden_img_scaling), len(data["run_img_scaling"])))]) > 0: outcomes["img_scaling_corruption"] += 1 - non_critical_corruption = True x = [golden_img_scaling[i][1] ^ data["run_img_scaling"][i][1] for i in range(min(len(golden_img_scaling), len(data["run_img_scaling"])))] - # print(sum([(el & (el-1) == 0) and el != 0 for el in x]), end='\t') + if(critical_corruption or non_critical_corruption): + double_fail = True + non_critical_corruption = True + if sum([golden_latnav[i][1] != data["run_latnav"][i][1] for i in range(min(len(golden_latnav), len(data["run_latnav"])))]) > 0: outcomes["latnav_corruption"] += 1 - non_critical_corruption = True x = [golden_latnav[i][1] ^ data["run_latnav"][i][1] for i in range(min(len(golden_latnav), len(data["run_latnav"])))] - # print(sum([(el & (el-1) == 0) and el != 0 for el in x]), end='\t') + if(critical_corruption or non_critical_corruption): + double_fail = True + non_critical_corruption = True + + if(double_fail): + print( + nFile, + seed, + sum([golden_acas[i][1] != data["run_acas"][i][1] for i in range(min(len(golden_acas), len(data["run_acas"])))]), + sum([golden_ann[i][1] != data["run_ann"][i][1] for i in range(min(len(golden_ann), len(data["run_ann"])))]), + sum([golden_canny[i][1] != data["run_canny"][i][1] for i in range(min(len(golden_canny), len(data["run_canny"])))]), + sum([golden_img_scaling[i][1] != data["run_img_scaling"][i][1] for i in range(min(len(golden_img_scaling), len(data["run_img_scaling"])))]), + sum([golden_jpeg_compress[i][1] != data["run_jpeg_compress"][i][1] for i in range(min(len(golden_jpeg_compress), len(data["run_jpeg_compress"])))]), + sum([golden_latnav[i][1] != data["run_latnav"][i][1] for i in range(min(len(golden_latnav), len(data["run_latnav"])))]), + data["ip"], + data["injMem"], + data["value"], + ) + + # print() if(critical_corruption or non_critical_corruption): outcomes[fromFailurModeEnumToStr(FailureMode.SDC)] += 1 @@ -224,114 +269,163 @@ def postprocess_runs(folder, axs): elif non_critical_corruption: outcomes["NON_CRITICAL_CORRUPTION"] += 1 - else: outcomes[fromFailurModeEnumToStr(data["result"])] += 1 - if folder.find("noProt") != -1 and data["result"] == FailureMode.DATA_HANDLER: + if folder.find("noProt") != -1 and data["result"] == FailureMode.DH: print(data) pass - if data["result"] in [FailureMode.SDC, FailureMode.NO_EFFECT]: - outcomes["times_acas"].extend([run[0] for run in data["run_acas"]]) - outcomes["times_jpeg_compress"].extend([run[0] for run in data["run_jpeg_compress"]]) - outcomes["times_latnav"].extend([run[0] for run in data["run_latnav"]]) - outcomes["times_ann"].extend([run[0] for run in data["run_ann"]]) - outcomes["times_canny"].extend([run[0] for run in data["run_canny"]]) - outcomes["times_img_scaling"].extend([run[0] for run in data["run_img_scaling"]]) - - - print(mean(outcomes["times_acas"]), stdev(outcomes["times_acas"])) - - # outlier_aware_hist(outcomes["times_acas"], axs[0, 0], folder, *calculate_bounds(outcomes["times_acas"])) - # outlier_aware_hist(outcomes["times_jpeg_compress"], axs[0, 1], folder, *calculate_bounds(outcomes["times_jpeg_compress"])) - # outlier_aware_hist(outcomes["times_latnav"], axs[0, 2], folder, *calculate_bounds(outcomes["times_latnav"])) - - # outlier_aware_hist(outcomes["times_ann"], axs[1, 0], folder, *calculate_bounds(outcomes["times_ann"])) - # outlier_aware_hist(outcomes["times_canny"], axs[1, 1], folder, *calculate_bounds(outcomes["times_canny"])) - # outlier_aware_hist(outcomes["times_img_scaling"], axs[1, 2], folder, *calculate_bounds(outcomes["times_img_scaling"])) + if data["result"] in [FailureMode.SDC, FailureMode.NE, FailureMode.GOLDEN]: + outcomes["times_acas"].append(sum([run[0] / 1e6 for run in data["run_acas"]])) + outcomes["times_ann"].append(sum([run[0] / 1e6 for run in data["run_ann"]])) + outcomes["times_jpeg_compress"].append(sum([run[0] / 1e6 for run in data["run_jpeg_compress"]])) + outcomes["times_canny"].append(sum([run[0] / 1e6 for run in data["run_canny"]])) + outcomes["times_img_scaling"].append(sum([run[0] / 1e6 for run in data["run_img_scaling"]])) + outcomes["times_latnav"].append(sum([run[0] / 1e6 for run in data["run_latnav"]])) + + # outcomes["times_acas"].extend([run[0] / 1e6 for run in data["run_acas"]]) + # outcomes["times_ann"].extend([run[0] / 1e6 for run in data["run_ann"]]) + # outcomes["times_jpeg_compress"].extend([run[0] / 1e6 for run in data["run_jpeg_compress"]]) + # outcomes["times_canny"].extend([run[0] / 1e6 for run in data["run_canny"]]) + # outcomes["times_img_scaling"].extend([run[0] / 1e6 for run in data["run_img_scaling"]]) + # outcomes["times_latnav"].extend([run[0] / 1e6 for run in data["run_latnav"]]) + + outcomesTot = [ + outcomes["times_acas"][i] + outcomes["times_ann"][i] + outcomes["times_jpeg_compress"][i] + outcomes["times_canny"][i] + outcomes["times_img_scaling"][i] + outcomes["times_latnav"][i] + for i in range(len(outcomes["times_acas"])) + ] + + print(mean(outcomes["times_acas"]), stdev(outcomes["times_acas"]), end="\t") + print(mean(outcomes["times_ann"]), stdev(outcomes["times_ann"]), end="\t") + print(mean(outcomes["times_jpeg_compress"]), stdev(outcomes["times_jpeg_compress"]), end="\t") + print(mean(outcomes["times_canny"]), stdev(outcomes["times_canny"]), end="\t") + print(mean(outcomes["times_img_scaling"]), stdev(outcomes["times_img_scaling"]), end="\t") + print(mean(outcomes["times_latnav"]), stdev(outcomes["times_latnav"]), end="\t") + print() + + print("hyperperiod: ", mean(outcomesTot), stdev(outcomesTot)) + + if createPlots: + for axs in axsList: + outlier_aware_hist(outcomes["times_acas"], axs[0, 0], folder, *calculate_bounds(outcomes["times_acas"]), color=color) + outlier_aware_hist(outcomes["times_ann"], axs[1, 0], folder, *calculate_bounds(outcomes["times_ann"]), color=color) + outlier_aware_hist(outcomes["times_jpeg_compress"], axs[2, 0], folder, *calculate_bounds(outcomes["times_jpeg_compress"]), color=color) + + outlier_aware_hist(outcomes["times_canny"], axs[0, 1], folder, *calculate_bounds(outcomes["times_canny"]), color=color) + outlier_aware_hist(outcomes["times_img_scaling"], axs[1, 1], folder, *calculate_bounds(outcomes["times_img_scaling"]), color=color) + outlier_aware_hist(outcomes["times_latnav"], axs[2, 1], folder, *calculate_bounds(outcomes["times_latnav"]), color=color) + + axs[0, 0].set_xlabel('Time [ms]') + axs[1, 0].set_xlabel('Time [ms]') + axs[2, 0].set_xlabel('Time [ms]') + axs[0, 1].set_xlabel('Time [ms]') + axs[1, 1].set_xlabel('Time [ms]') + axs[2, 1].set_xlabel('Time [ms]') + + + + for axTot in axTotList: + outlier_aware_hist(outcomesTot, axTot, folder, *calculate_bounds(outcomesTot), color=color) + axTot.set_xlabel('Time [ms]') outcomes.pop("times_acas") - outcomes.pop("times_jpeg_compress") - outcomes.pop("times_latnav") outcomes.pop("times_ann") + outcomes.pop("times_jpeg_compress") outcomes.pop("times_canny") outcomes.pop("times_img_scaling") + outcomes.pop("times_latnav") print(folder) print(outcomes) + if __name__ == "__main__": - fig, axs = plt.subplots(2, 3, tight_layout=True) - - axs[0, 0].title.set_text('acas') - axs[0, 1].title.set_text('jpeg_compress') - axs[0, 2].title.set_text('latnav') - axs[1, 0].title.set_text('ann') - axs[1, 1].title.set_text('canny') - axs[1, 2].title.set_text('img_scaling') - - postprocess_runs(f"{baseFolder}/noProt", axs) - postprocess_runs(f"{baseFolder}/reddi", axs) - postprocess_runs(f"{baseFolder}/eddi", axs) - - plt.show() - -""" -noProtection -{ - 'NO_EFFECT': 29372, - 'DATA_HANDLER': 0, - 'SIG_HANDLER': 0, - 'HARDWARE': 105, - 'LOOP': 0, - 'OTHER': 0, - 'SDC': 3856, - 'CRITICAL_CORRUPTION': 1536, - 'NON_CRITICAL_CORRUPTION': 2320, - 'acas_corruption': 635, - 'jpeg_compress_corruption': 779, - 'latnav_corruption': 656, - 'ann_corruption': 1703, - 'canny_corruption': 680, - 'img_scaling_corruption': 765 -} - -reddi -{ - 'NO_EFFECT': 28918, - 'DATA_HANDLER': 765, - 'SIG_HANDLER': 0, - 'HARDWARE': 78, - 'LOOP': 0, - 'OTHER': 0, - 'SDC': 3572, - 'CRITICAL_CORRUPTION': 1293, - 'NON_CRITICAL_CORRUPTION': 2279, - 'acas_corruption': 616, - 'jpeg_compress_corruption': 610, - 'latnav_corruption': 626, - 'ann_corruption': 1754, - 'canny_corruption': 654, - 'img_scaling_corruption': 724 -} - -eddi -{ - 'NO_EFFECT': 26176, - 'DATA_HANDLER': 4733, - 'SIG_HANDLER': 0, - 'HARDWARE': 79, - 'LOOP': 0, - 'OTHER': 0, - 'SDC': 2345, - 'CRITICAL_CORRUPTION': 1297, - 'NON_CRITICAL_CORRUPTION': 1048, - 'acas_corruption': 605, - 'jpeg_compress_corruption': 587, - 'latnav_corruption': 631, - 'ann_corruption': 597, - 'canny_corruption': 600, - 'img_scaling_corruption': 646 -} -""" + plt.rcParams.update({'font.size': 20}) + plt.rc('legend',fontsize=16) + + ## TODO: Add a plot with the hyperperiods + ## TODO: Compare REDDI with EDDI + ## TODO: Compare REDDIFunc with EDDI + + figReddi, axsReddi = plt.subplots(3, 2, tight_layout=True) + figReddiFunc, axsReddiFunc = plt.subplots(3, 2, tight_layout=True) + figEddi, axsEddi = plt.subplots(3, 2, tight_layout=True) + + figReddiEddi, axsReddiEddi = plt.subplots(3, 2, tight_layout=True) + figReddiFEddi, axsReddiFEddi = plt.subplots(3, 2, tight_layout=True) + + figTot, axsTot = plt.subplots(1, 1, tight_layout=True) + + figReddi.suptitle("REDDI on Global Variables") + figReddi.set_size_inches(10, 10) + axsReddi[0, 0].title.set_text('acas') + axsReddi[1, 0].title.set_text('ann') + axsReddi[2, 0].title.set_text('jpeg_compress') + axsReddi[0, 1].title.set_text('canny') + axsReddi[1, 1].title.set_text('img_scaling') + axsReddi[2, 1].title.set_text('latnav') + + figReddiFunc.suptitle("REDDI on Functions") + figReddiFunc.set_size_inches(10, 10) + axsReddiFunc[0, 0].title.set_text('acas') + axsReddiFunc[1, 0].title.set_text('ann') + axsReddiFunc[2, 0].title.set_text('jpeg_compress') + axsReddiFunc[0, 1].title.set_text('canny') + axsReddiFunc[1, 1].title.set_text('img_scaling') + axsReddiFunc[2, 1].title.set_text('latnav') + + figEddi.suptitle("EDDI") + figEddi.set_size_inches(10, 10) + axsEddi[0, 0].title.set_text('acas') + axsEddi[1, 0].title.set_text('ann') + axsEddi[2, 0].title.set_text('jpeg_compress') + axsEddi[0, 1].title.set_text('canny') + axsEddi[1, 1].title.set_text('img_scaling') + axsEddi[2, 1].title.set_text('latnav') + + figReddiEddi.suptitle("REDDI on GV vs EDDI") + figReddiEddi.set_size_inches(10, 10) + axsReddiEddi[0, 0].title.set_text('acas') + axsReddiEddi[1, 0].title.set_text('ann') + axsReddiEddi[2, 0].title.set_text('jpeg_compress') + axsReddiEddi[0, 1].title.set_text('canny') + axsReddiEddi[1, 1].title.set_text('img_scaling') + axsReddiEddi[2, 1].title.set_text('latnav') + + figReddiFEddi.suptitle("REDDI on Functions vs EDDI") + figReddiFEddi.set_size_inches(10, 10) + axsReddiFEddi[0, 0].title.set_text('acas') + axsReddiFEddi[1, 0].title.set_text('ann') + axsReddiFEddi[2, 0].title.set_text('jpeg_compress') + axsReddiFEddi[0, 1].title.set_text('canny') + axsReddiFEddi[1, 1].title.set_text('img_scaling') + axsReddiFEddi[2, 1].title.set_text('latnav') + + figTot.suptitle("Hyperperiods") + figTot.set_size_inches(10, 10) + + + postprocess_runs(f"{baseFolder}/noProt", [axsReddi, axsReddiFunc, axsEddi], [axsTot], createPlots, "orange") + + postprocess_runs(f"{baseFolder}/reddi", [axsReddi, axsReddiEddi], [axsTot], createPlots, "r") + postprocess_runs(f"{baseFolder}/reddiFunc", [axsReddiFunc, axsReddiFEddi], [axsTot], createPlots, "g") + + postprocess_runs(f"{baseFolder}/eddi", [axsEddi, axsReddiEddi, axsReddiFEddi], [axsTot], createPlots, "b") + + if createPlots: + figReddi.legend(["no protection", "_", "REDDI"], loc="lower center") + figReddiFunc.legend(["no protection", "_", "REDDI"], loc="lower center") + figEddi.legend(["no protection", "_", "EDDI"], loc="lower center") + figReddiEddi.legend(["REDDI on GV", "_", "EDDI"], loc="lower center") + figReddiFEddi.legend(["REDDI on Fn", "_", "EDDI"], loc="lower center") + + figTot.legend(["no protection", "_", "REDDI on GV", "_", "REDDI on Fn", "_", "EDDI"], loc="lower right") + + figReddi.savefig("Reddi.pdf", format="pdf", bbox_inches="tight") + figReddiFunc.savefig("ReddiFunc.pdf", format="pdf", bbox_inches="tight") + figEddi.savefig("Eddi.pdf", format="pdf", bbox_inches="tight") + figReddiEddi.savefig("ReddiEddi.pdf", format="pdf", bbox_inches="tight") + figReddiFEddi.savefig("ReddiFEddi.pdf", format="pdf", bbox_inches="tight") + figTot.savefig("Tot.pdf", format="pdf", bbox_inches="tight") + \ No newline at end of file diff --git a/src/entrypoints/Main/ExperimentHIL.py b/src/entrypoints/Main/ExperimentHIL.py index adeb734810be57c0a942e9f0c96bae40646d3d36..8d0ef0a473508fc4a06fe7619127bea4923d703a 100644 --- a/src/entrypoints/Main/ExperimentHIL.py +++ b/src/entrypoints/Main/ExperimentHIL.py @@ -9,9 +9,11 @@ import os import subprocess as sp import threading +InjectFaults = False + maxInjectionTime = 15 maxTestTime = 90 -nExperiments = 500 +nExperiments = 5000 cpu = [] # stores the cpu mean busy time and stdDev criticalFailure = '' @@ -69,8 +71,11 @@ def fromFailurModeEnumToStr(FM): def setup(s): s.sendline(b"delete") # delete all breakpoints - s.sendline(b"b waitActuatorData") # breakpoint for injection s.recvuntil(b"(gdb)") + + if InjectFaults: + s.sendline(b"b waitActuatorData") # breakpoint for injection + s.recvuntil(b"(gdb)") s.sendline(b"b simulationEnd") # breakpoint for end of experiment (cutters actuated) s.sendline(b"monitor reset halt") s.recvuntil(b"(gdb)") @@ -159,15 +164,14 @@ def simulatorThread(): liftoff = True elif line.find(b"CRITICAL FAILURE:") != -1: criticalFailure = line - print(line) - break - elif line.find(b"apogee: ") != -1: + print("Critical failure: ", line) + elif line.find(b"Expulsion: ") != -1: apogee = float(line.split()[1]) - break - # else: - # print(line) + vz = line.split()[2] + print("apogee: ", apogee, vz) line = simu.recvline(timeout=maxTestTime) except Exception: + print("Exception") pass os.system(f"kill -2 {GDB_PID}") @@ -186,10 +190,11 @@ def makeExperiments(nExperiments, folderName, needToFlash): global cpu # Extract paths for all needed - - elfPath = folderName + "/" + "main-entry.elf" - openOCDPath = folderName + "/" + "openocd.cfg" - experimentBase = folderName + "/" + folderName + # base_folder = "./HILInjections" + "/" + folderName # data + base_folder = folderName # utilization + elfPath = base_folder + "/" + "main-entry.elf" + openOCDPath = base_folder + "/" + "openocd.cfg" + experimentBase = base_folder + "/" + folderName BOARD_SERIAL = "" with open(openOCDPath, 'r') as f: @@ -210,7 +215,7 @@ def makeExperiments(nExperiments, folderName, needToFlash): print("Board serial not found!") exit(-1) - files = next(os.walk(folderName))[2] + files = next(os.walk(base_folder))[2] numbers = [int(f[len(folderName):-4].split('_')[0]) for f in files if f.startswith(folderName)] starting = 0 @@ -222,7 +227,7 @@ def makeExperiments(nExperiments, folderName, needToFlash): print(f"Flashing {folderName} ({BOARD_SERIAL})") os.system("pkill -SIGKILL openocd") os.system("pkill -SIGKILL gdb") - if os.system(f"st-flash --serial {BOARD_SERIAL} write {folderName}/main-entry.bin 0x8000000 && st-flash --serial {BOARD_SERIAL} reset") != 0: + if os.system(f"st-flash --serial {BOARD_SERIAL} write {base_folder}/main-entry.bin 0x8000000 && st-flash --serial {BOARD_SERIAL} reset") != 0: print("Couldn't flash!") exit() @@ -238,10 +243,10 @@ def makeExperiments(nExperiments, folderName, needToFlash): while(result == FailureMode.OTHER): recover = False - os.system("pkill -SIGKILL openocd") - os.system("pkill -SIGKILL gdb") + os.system("pkill -SIGKILL openocd > /dev/null ") + os.system("pkill -SIGKILL gdb > /dev/null ") - os.system(f"st-flash --serial {BOARD_SERIAL} reset") + os.system(f"st-flash --serial {BOARD_SERIAL} reset > /dev/null ") os.system(f"openocd -f {openOCDPath} --debug=0 > /dev/null 2> /dev/null &") s = process(["gdb", elfPath]) @@ -267,34 +272,37 @@ def makeExperiments(nExperiments, folderName, needToFlash): simulator = threading.Thread(target=simulatorThread) simulator.start() - - while not liftoff: - s.sendline(b"continue") - if s.recvuntil(b"Continuing.", timeout=maxTestTime) == b'': - print("RECOVERING") - recover = True - break - continue - - if(recover): - continue - - # Injecting after random amount of time (between 0 and 1.5 seconds) - injTime = rn.random() * maxInjectionTime - - # injection - injTs = time() + 5 + injTime - while time() < injTs: - s.sendline(b"continue") - if s.recvuntil(b"Continuing.", timeout=maxTestTime) == b'': - print("RECOVERING") - recover = True - break - if(recover): - continue - - injection = inject(s, ada_start, ada_size) + if InjectFaults: + while not liftoff: + s.sendline(b"continue") + if s.recvuntil(b"Continuing.", timeout=maxTestTime) == b'': + print("RECOVERING") + recover = True + break + continue + + if(recover): + continue + + # Injecting after random amount of time (between 0 and 1.5 seconds) + injTime = rn.random() * maxInjectionTime + + # injection + injTs = time() + 5 + injTime + while time() < injTs: + s.sendline(b"continue") + if s.recvuntil(b"Continuing.", timeout=maxTestTime) == b'': + print("RECOVERING") + recover = True + break + + if(recover): + continue + + injection = inject(s, ada_start, ada_size) + else: + injection = 0 # Here we check where we end up (endExperiment, fault handlers, start of main due to reboot, stuck in an infinite loop) msg = s.recvuntil(b"(gdb)", timeout=maxTestTime) @@ -325,12 +333,14 @@ def makeExperiments(nExperiments, folderName, needToFlash): # print(check_results(), end="\t") print(fromFailurModeEnumToStr(result), end="\t") print(injection) + if(len(cpu) > 0): + print(cpu[-1]) if(result != FailureMode.OTHER): save_run(experimentFile, result, injection, cpu) + s.close() - # server.close() """ argv: diff --git a/src/entrypoints/Main/PostprocessHIL.py b/src/entrypoints/Main/PostprocessHIL.py index f689f51b5411037c403c0cf1be2fdaa77137f041..443a5fa3f5a8b728d1e88f759bf3656362b43acc 100644 --- a/src/entrypoints/Main/PostprocessHIL.py +++ b/src/entrypoints/Main/PostprocessHIL.py @@ -56,7 +56,7 @@ def fromFailurModeEnumToStr(FM): return "Invalid FailurMode enum!" -def postprocess_runs(folder): +def postprocess_runs(folder, ax): """ Loads in a structure the previous run with given seed and name: - name: the name of the file @@ -66,9 +66,6 @@ def postprocess_runs(folder): - list of the runs of different benchmarks (time, crc) - result of the experiment as a FailureMode enum """ - files = next(os.walk(folder))[2] - dataFile = [f for f in files if f.endswith(".pkl")] - outcomes = { "GOLDEN" : 0, "DATA_CORRUPTED" : 0, @@ -84,27 +81,60 @@ def postprocess_runs(folder): cpuM = [] cpuV = [] - for file in dataFile: - nFile += 1 - - print(f"{nFile}/{len(dataFile)}", end='\r') - with open(folder + "/" + file, 'rb') as f: - data = pickle.load(f) - - outcomes[fromFailurModeEnumToStr(data["result"])] += 1 - if(data["result"] == FailureMode.SILENT_CORRUPTION): - cpuM.append(mean([float(c[0]) for c in data["cpu"] ])) - cpuV.append(mean([float(c[1]) for c in data["cpu"] ])) - - print(folder) + + for foldN in [""]: + files = next(os.walk(folder+foldN))[2] + dataFile = [f for f in files if f.endswith(".pkl")] + + print(folder+foldN) + for file in dataFile: + nFile += 1 + + print(f"{nFile}/{len(dataFile)}", end='\r') + with open(folder+foldN + "/" + file, 'rb') as f: + data = pickle.load(f) + + outcomes[fromFailurModeEnumToStr(data["result"])] += 1 + if(data["result"] == FailureMode.SILENT_CORRUPTION): + if(len(data["cpu"]) > 0): + if(float(data["cpu"][-1][0]) > 14): + print(nFile, data["cpu"]) + # cpuM.append(mean([float(c[0]) for c in data["cpu"] ])) + # cpuV.append(mean([float(c[1]) for c in data["cpu"] ])) + cpuM.append(float(data["cpu"][-1][0])) + cpuV.append(float(data["cpu"][-1][1])) + print() + print(outcomes) print(mean(cpuM), mean(cpuV)) - plt.hist(cpuM, bins='auto', label=folder) + print(nFile) + ax.hist(cpuM, bins='auto', label=folder+f" ({nFile})") + p = 0.5 + color = "k" + if folder.find("noProt") != -1: + p = 0.25 + color = "blue" + elif folder.find("reddi") != -1: + p = 0.75 + color = "orange" + + ax.axvline(mean(cpuM), linestyle='dashed', linewidth=1, color=color) + min_ylim, max_ylim = ax.get_ylim() + ax.text(mean(cpuM), max_ylim*p, 'Mean: {:.2f}'.format(mean(cpuM)), size='small') + if __name__ == "__main__": - postprocess_runs(f"{baseFolder}/noProtection") - postprocess_runs(f"{baseFolder}/reddi") - plt.legend(loc='upper right') + plt.rcParams.update({'font.size': 20}) + plt.rc('legend',fontsize=16) + + fig, ax = plt.subplots(1, 1, tight_layout=True) + fig.set_size_inches(10, 10) + + postprocess_runs(f"{baseFolder}/noProtection", ax) + postprocess_runs(f"{baseFolder}/reddi", ax) + fig.legend(["no protection", "_", "REDDI"], loc="center") + plt.xlabel('CPU Utilization [%]') + plt.savefig("HIL.pdf", format="pdf", bbox_inches="tight") plt.show()