nsdk_report.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. #!/usr/bin/env python3
  2. import os
  3. import sys
  4. import time
  5. import copy
  6. import glob
  7. import tempfile
  8. import argparse
  9. SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
  10. requirement_file = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "requirements.txt"))
  11. MARKDOWN_PLUGIN=True
  12. try:
  13. import serial
  14. import json
  15. import markdown
  16. import pyexcel as pe
  17. from prettytable import *
  18. except Exception as exc:
  19. MARKDOWN_PLUGIN=False
  20. print("Import Error: %s" % (exc))
  21. print("Please install requried packages using: pip3 install -r %s" % (requirement_file))
  22. sys.exit(1)
  23. from nsdk_utils import *
  24. def get_expected(config, app, cfg_name):
  25. if isinstance(config, dict) == False:
  26. return None
  27. found_app_expected = find_local_appconfig(app, config.get("expected", dict()))
  28. found_app_expecteds = find_local_appconfig(app, config.get("expecteds", dict()))
  29. if found_app_expected:
  30. app_expected = copy.deepcopy(config.get("expected", dict()).get(found_app_expected))
  31. else:
  32. app_expected = dict()
  33. if found_app_expecteds:
  34. app_cfgexpected = config.get("expecteds", dict()).get(found_app_expecteds).get(cfg_name, dict())
  35. else:
  36. app_cfgexpected = dict()
  37. app_expected = merge_two_config(app_expected, app_cfgexpected)
  38. return app_expected
  39. def check_expected(build_status, config, run=False):
  40. if isinstance(build_status, dict) == False or isinstance(config, dict) == False:
  41. return False
  42. ret = True
  43. for app in build_status:
  44. app_allstatus = build_status[app]
  45. for cfgname in app_allstatus:
  46. app_status = app_allstatus[cfgname]["status"]
  47. build_ret = app_status.get("build", False)
  48. run_ret = app_status.get("run", False)
  49. app_cfg_expected = get_expected(config, app, cfgname)
  50. if isinstance(app_cfg_expected, dict):
  51. expected_build_ret = app_cfg_expected.get("build", True)
  52. expected_run_ret = app_cfg_expected.get("run", True)
  53. if build_ret == False and expected_build_ret != build_ret:
  54. ret = False
  55. if run:
  56. if run_ret == False and expected_run_ret != run_ret:
  57. ret = False
  58. else:
  59. if build_ret == False:
  60. ret = False
  61. if run:
  62. if run_ret == False:
  63. ret = False
  64. return ret
  65. def save_results(appcfg, hwcfg, mergedcfg, result, savedir):
  66. if not (isinstance(savedir, str) and os.path.isdir(savedir)):
  67. return
  68. if isinstance(appcfg, dict):
  69. sfn = os.path.join(savedir, "appcfg.json")
  70. save_json(sfn, appcfg)
  71. if isinstance(hwcfg, dict):
  72. sfn = os.path.join(savedir, "hwcfg.json")
  73. save_json(sfn, hwcfg)
  74. if isinstance(mergedcfg, dict):
  75. sfn = os.path.join(savedir, "mergedcfg.json")
  76. save_json(sfn, mergedcfg)
  77. if isinstance(result, dict):
  78. sfn = os.path.join(savedir, "result.json")
  79. save_json(sfn, result)
  80. pass
  81. def analyze_report(config, result, runapp=False):
  82. apps_status = dict()
  83. passed_apps = dict()
  84. failed_apps = dict()
  85. build_cfgs = dict()
  86. glb_buildcfg = config.get("build_config", dict())
  87. # TODO currently this feature only cover cases that the application build_configs
  88. # just extend the global build_configs
  89. # get build configs used per cfgname
  90. if "build_configs" not in config:
  91. build_cfgs["default"] = config.get("build_config", dict())
  92. else:
  93. sub_configs = config["build_configs"]
  94. for cfgname in sub_configs:
  95. bcfg = copy.deepcopy(glb_buildcfg)
  96. bcfg.update(sub_configs[cfgname])
  97. build_cfgs[cfgname] = bcfg
  98. if "appconfig" in config:
  99. appcfgs = config.get("appconfig", dict())
  100. for app in appcfgs:
  101. if "build_configs" in appcfgs[app]:
  102. appsub_configs = appcfgs[app]["build_configs"]
  103. for cfgname in appsub_configs:
  104. bcfg = copy.deepcopy(glb_buildcfg)
  105. bcfg.update(appsub_configs[cfgname])
  106. build_cfgs[cfgname] = bcfg
  107. def check_app_status(status, expected, runapp=False):
  108. app_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  109. percase_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  110. app_percase_sts = dict()
  111. for cfgname in status:
  112. app_percase_sts[cfgname] = copy.deepcopy(percase_sts)
  113. app_cfg_expected = get_expected(config, app, cfgname)
  114. expected_build = app_cfg_expected.get("build", True)
  115. expected_run = app_cfg_expected.get("run", True)
  116. real_build = status[cfgname]["status"].get("build", False)
  117. real_run = status[cfgname]["status"].get("run", False)
  118. if real_build == False and expected_build != real_build:
  119. app_sts["exp_build"] = False
  120. app_percase_sts[cfgname]["exp_build"] = False
  121. if real_run == False and expected_run != real_run:
  122. app_sts["exp_run"] = False
  123. app_percase_sts[cfgname]["exp_run"] = False
  124. if real_build == False:
  125. app_sts["build"] = False
  126. if real_run == False:
  127. app_sts["run"] = False
  128. # get per case expected
  129. app_percase_sts[cfgname]["expected"] = app_sts["exp_build"]
  130. if runapp:
  131. app_percase_sts[cfgname]["expected"] = app_percase_sts[cfgname]["exp_build"] and app_percase_sts[cfgname]["exp_run"]
  132. if runapp:
  133. app_sts["expected"] = app_sts["exp_build"] and app_sts["exp_run"]
  134. else:
  135. app_sts["expected"] = app_sts["exp_build"]
  136. analayzed_app_status = {"summary": app_sts, "percase": app_percase_sts}
  137. return analayzed_app_status
  138. apps_expected = config.get("expected", dict())
  139. apps_percase_status = dict()
  140. apps_percase_failed = dict()
  141. apps_percase_passed = dict()
  142. # Get app status compared with expected
  143. for app in result:
  144. app_expected = apps_expected.get(app, dict())
  145. app_status = result[app]
  146. analayzed_app_status = check_app_status(app_status, app_expected, runapp)
  147. apps_status[app] = analayzed_app_status["summary"]
  148. apps_percase_status[app] = analayzed_app_status["percase"]
  149. apps_percase_failed[app] = list()
  150. apps_percase_passed[app] = list()
  151. # per case for 1 app
  152. for case in analayzed_app_status["percase"]:
  153. if analayzed_app_status["percase"][case]["expected"] == False:
  154. apps_percase_failed[app].append(case)
  155. else:
  156. apps_percase_passed[app].append(case)
  157. # per app
  158. if apps_status[app]["expected"] == True:
  159. passed_apps[app] = copy.deepcopy(apps_status[app])
  160. else:
  161. failed_apps[app] = copy.deepcopy(apps_status[app])
  162. # Create report_dict
  163. report_dict = {"passed": passed_apps, "failed": failed_apps, "status": apps_status, "configs": build_cfgs, \
  164. "percase": {"status": apps_percase_status, "failed": apps_percase_failed, "passed": apps_percase_passed} }
  165. return report_dict
  166. def generate_build_run_status_md(appresult, logdir, bold_false=True):
  167. if isinstance(appresult, dict) == False:
  168. if bold_false:
  169. return "**False**", "**False**", "-", "-"
  170. else:
  171. return "False", "False", "-", "-"
  172. else:
  173. appblog = appresult["logs"].get("build", None)
  174. apprlog = appresult["logs"].get("run", None)
  175. appbsts = appresult["status"].get("build", False)
  176. apprsts = appresult["status"].get("run", False)
  177. appbtm = appresult["time"].get("build", "-")
  178. apprtm = appresult["time"].get("run", "-")
  179. def gen_sts_md(sts, log, bold=True):
  180. if log:
  181. log = os.path.relpath(log, logdir)
  182. if bool(bold) ^ bool((not sts)):
  183. sts_md = "[%s](%s)" % (sts, log)
  184. else:
  185. sts_md = "[**%s**](%s)" % (sts, log)
  186. else:
  187. if bool(bold) ^ bool((not sts)):
  188. sts_md = "%s" % (sts)
  189. else:
  190. sts_md = "**%s**" % (sts)
  191. return sts_md
  192. bsts_md = gen_sts_md(appbsts, appblog, bold_false)
  193. rsts_md = gen_sts_md(apprsts, apprlog, bold_false)
  194. return bsts_md, rsts_md, appbtm, apprtm
  195. def md2html(mdfile, htmlfile):
  196. if MARKDOWN_PLUGIN == False or os.path.isfile(mdfile) == False:
  197. return
  198. css_style = \
  199. """
  200. <style>
  201. table, th, td {
  202. border: 1px solid #1132e791 !important;
  203. border-collapse: collapse;
  204. padding: 3px;
  205. text-align: center;
  206. }
  207. td:first-child {
  208. text-align: left;
  209. }
  210. </style>
  211. """
  212. with open(mdfile) as mdf:
  213. mdtxt = mdf.read()
  214. mdhtml = markdown.markdown(mdtxt, extensions=["extra"])
  215. mdhtml = css_style + mdhtml
  216. with open(htmlfile, 'w') as htf:
  217. htf.write(mdhtml)
  218. def generate_report(config, result, rptfile, rpthtml, logdir, runapp=False):
  219. if not(isinstance(config, dict) and isinstance(result, dict) and isinstance(rptfile, str)):
  220. return False
  221. report = analyze_report(config, result, runapp)
  222. rpt_passtxt = os.path.join(os.path.dirname(rptfile), "app_passed.txt")
  223. rpt_failtxt = os.path.join(os.path.dirname(rptfile), "app_failed.txt")
  224. # generate markdown file
  225. with open(rptfile, "w") as rf:
  226. # generate overall status
  227. rf.write("# Overall Status\n\n")
  228. passcnt = len(report["passed"])
  229. failcnt = len(report["failed"])
  230. totalcnt = passcnt + failcnt
  231. passrate = str(round(((passcnt / totalcnt) * 100), 2)) + "%"
  232. x = PrettyTable()
  233. x.set_style(MARKDOWN)
  234. x.field_names = ["Total", "Pass", "Fail", "PassRate"]
  235. x.add_row([totalcnt, passcnt, failcnt, passrate])
  236. rf.write(str(x))
  237. rf.write("\n\n")
  238. passed_appsstr = "* **Passed**: %s\n" % (",".join(report["passed"].keys()))
  239. failed_appsstr = "* **Failed**: %s\n" % (",".join(report["failed"].keys()))
  240. #rf.write(passed_appsstr)
  241. if failcnt > 0:
  242. rf.write(failed_appsstr)
  243. rf.write("\n\n")
  244. # generate detailed status
  245. rf.write("# Tested Nuclei SDK Applications/Test Cases\n\n")
  246. if len(report["passed"]) > 0:
  247. rf.write("\n## Passed\n\n")
  248. x = PrettyTable()
  249. x.set_style(MARKDOWN)
  250. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  251. for app in report["passed"]:
  252. app_sts = report["passed"][app]
  253. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  254. app_sts["build"], app_sts["run"]])
  255. rf.write(str(x))
  256. rf.write("\n")
  257. if len(report["failed"]) > 0:
  258. rf.write("\n## Failed\n\n")
  259. x = PrettyTable()
  260. x.set_style(MARKDOWN)
  261. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  262. for app in report["failed"]:
  263. app_sts = report["failed"][app]
  264. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  265. app_sts["build"], app_sts["run"]])
  266. rf.write(str(x))
  267. rf.write("\n")
  268. # Build configurations
  269. rf.write("\n# Build configurations\n\n")
  270. x = PrettyTable()
  271. x.set_style(MARKDOWN)
  272. x.field_names = ["Case Name", "Make Options"]
  273. for cfgname in report["configs"]:
  274. make_options = " ".join([ "%s=%s"%(key, value) for key, value in report["configs"][cfgname].items() ])
  275. x.add_row([cfgname, make_options])
  276. rf.write(str(x))
  277. rf.write("\n")
  278. # Build and run status
  279. rf.write("\n# Build and run status\n\n")
  280. x = PrettyTable()
  281. x.set_style(MARKDOWN)
  282. x.field_names = ["App/Test Case", "Case Name", "Build Status", "Run Status", "Build Time", \
  283. "Run Time", "Type", "Value", "Total", "Text", "Data", "Bss"]
  284. apps_buildsts = result
  285. for app in apps_buildsts:
  286. app_sts = apps_buildsts[app]
  287. for cfgname in app_sts:
  288. size = app_sts[cfgname]["size"]
  289. apprsttype, apprstval = get_app_runresult(app_sts[cfgname].get("result", dict()))
  290. bsts_md, rsts_md, appbtm, apprtm = generate_build_run_status_md(app_sts[cfgname], logdir, True)
  291. x.add_row([app, cfgname, bsts_md, rsts_md, appbtm, apprtm, apprsttype, apprstval, \
  292. size["total"], size["text"], size["data"], size["bss"]])
  293. rf.write(str(x))
  294. rf.write("\n")
  295. # Real expected pass or fail cases
  296. percase_status = report["percase"]
  297. rf.write("\n# Passed Cases(as expected) Per Applications\n\n")
  298. x = PrettyTable()
  299. x.set_style(MARKDOWN)
  300. x.field_names = ["App", "Passed Cases"]
  301. with open(rpt_passtxt, "w") as rpt_pf:
  302. for app in percase_status["passed"]:
  303. tmptxt = ", ".join(percase_status["passed"][app])
  304. if (len(tmptxt) > 0):
  305. rpt_pf.write("- %s : %s\n" % (app, tmptxt))
  306. x.add_row([app, tmptxt])
  307. rf.write(str(x))
  308. rf.write("\n")
  309. rf.write("\n# Failed Cases(as expected) Per Applications\n\n")
  310. x = PrettyTable()
  311. x.set_style(MARKDOWN)
  312. x.field_names = ["App", "Failed Cases"]
  313. with open(rpt_failtxt, "w") as rpt_ff:
  314. for app in percase_status["failed"]:
  315. tmptxt = ", ".join(percase_status["failed"][app])
  316. if (len(tmptxt) > 0):
  317. rpt_ff.write("- %s : %s\n" % (app, tmptxt))
  318. x.add_row([app, tmptxt])
  319. rf.write(str(x))
  320. rf.write("\n")
  321. # expected build or run failed cases
  322. x = PrettyTable()
  323. x.set_style(MARKDOWN)
  324. x.field_names = ["App/Test Case", "Case Name", "Expected Build", "Expected Run"]
  325. apps_buildsts = result
  326. with_expect = False
  327. for app in apps_buildsts:
  328. app_sts = apps_buildsts[app]
  329. for cfgname in app_sts:
  330. app_cfg_expected = get_expected(config, app, cfgname)
  331. expected_build = app_cfg_expected.get("build", True)
  332. expected_run = app_cfg_expected.get("run", True)
  333. if expected_build == False or expected_run == False:
  334. with_expect = True
  335. x.add_row([app, cfgname, expected_build, expected_run])
  336. if with_expect:
  337. rf.write("\n# Expected Build or Run Failed Cases\n\n")
  338. rf.write(str(x))
  339. rf.write("\n")
  340. # generate html from markdown
  341. md2html(rptfile, rpthtml)
  342. pass
  343. # check whether the result json is generate by nsdk_bench.py
  344. def is_bench_result(result):
  345. if isinstance(result, dict) == False:
  346. return False
  347. check = True
  348. try:
  349. for app in result:
  350. for cfgname in result[app]:
  351. if "app" not in result[app][cfgname]:
  352. check = False
  353. break
  354. if check == False:
  355. break
  356. except:
  357. check = False
  358. return check
  359. def update_list_items(list1, list2):
  360. for i in range(0, len(list2)):
  361. if list2[i] not in list1:
  362. list1.append(list2[i])
  363. return list1
  364. def merge_runconfig(all_mergedcfg, config, reldir):
  365. if "run_config" not in all_mergedcfg:
  366. all_mergedcfg["run_config"] = config["run_config"]
  367. if "build_target" not in all_mergedcfg:
  368. all_mergedcfg["build_target"] = config["build_target"]
  369. if "parallel" not in all_mergedcfg:
  370. all_mergedcfg["parallel"] = config.get("parallel", "-j")
  371. if "build_config" not in all_mergedcfg:
  372. all_mergedcfg["build_config"] = dict()
  373. if "build_configs" not in all_mergedcfg:
  374. all_mergedcfg["build_configs"] = dict()
  375. if "checks" not in all_mergedcfg:
  376. all_mergedcfg["checks"] = config.get("checks", dict())
  377. if "appdirs" not in all_mergedcfg:
  378. all_mergedcfg["appdirs"] = config.get("appdirs", [])
  379. if "appdirs_ignore" not in all_mergedcfg:
  380. all_mergedcfg["appdirs_ignore"] = config.get("appdirs_ignore", [])
  381. if "appconfig" not in all_mergedcfg:
  382. all_mergedcfg["appconfig"] = config.get("appconfig", dict())
  383. # TODO handle expecteds and expected
  384. def merge_buildcfgs(dstcfg, srccfg, rel):
  385. if "build_configs" in srccfg:
  386. for bcfg in srccfg["build_configs"]:
  387. new_bcfg = os.path.join(rel, bcfg).replace("\\", "/")
  388. dstcfg["build_configs"][new_bcfg] = merge_two_config(srccfg["build_config"], srccfg["build_configs"][bcfg])
  389. merge_buildcfgs(all_mergedcfg, config, reldir)
  390. all_mergedcfg["appdirs"] = update_list_items(all_mergedcfg.get("appdirs", []), config.get("appdirs", []))
  391. all_mergedcfg["appdirs_ignore"] = update_list_items(all_mergedcfg.get("appdirs_ignore", []), config.get("appdirs_ignore", []))
  392. appconfigs = config.get("appconfig", dict())
  393. for app in appconfigs:
  394. if app not in all_mergedcfg["appconfig"]:
  395. all_mergedcfg["appconfig"][app] = {"build_config": {}, "build_configs": {}, "checks": appconfigs[app].get("checks", dict())}
  396. merge_buildcfgs(all_mergedcfg["appconfig"][app], appconfigs[app], reldir)
  397. return
  398. def merge_split_config_and_result(logdir):
  399. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  400. all_mergedcfg = dict()
  401. all_result = dict()
  402. print("Start to merge config and result json files in %s" % (logdir))
  403. for mergedcfg_file in mergedcfg_files:
  404. configfile = mergedcfg_file
  405. resultdir = os.path.dirname(mergedcfg_file)
  406. reldir = os.path.relpath(resultdir, logdir)
  407. resultfile = os.path.join(resultdir, "result.json")
  408. if os.path.isfile(resultfile) == True:
  409. _, config = load_json(configfile)
  410. _, result = load_json(resultfile)
  411. if is_bench_result(result):
  412. for app in result:
  413. for cfg in result[app]:
  414. app_cfg = os.path.join(reldir, cfg).replace("\\", "/")
  415. if app not in all_result:
  416. all_result[app] = dict()
  417. all_result[app][app_cfg] = result[app][cfg]
  418. merge_runconfig(all_mergedcfg, config, reldir)
  419. else:
  420. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  421. return all_mergedcfg, all_result
  422. def merge_all_config_and_result(logdir):
  423. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  424. all_mergedcfg = dict()
  425. all_result = dict()
  426. print("Start to merge split config and result json files in %s" % (logdir))
  427. for mergedcfg_file in mergedcfg_files:
  428. configfile = mergedcfg_file
  429. resultdir = os.path.dirname(mergedcfg_file)
  430. resultfile = os.path.join(resultdir, "result.json")
  431. if os.path.isfile(resultfile) == True:
  432. _, config = load_json(configfile)
  433. _, result = load_json(resultfile)
  434. if is_bench_result(result):
  435. print("Merging config json file %s, result json file %s" %(configfile, resultfile))
  436. all_mergedcfg = merge_two_config(all_mergedcfg, config)
  437. all_result = merge_two_config(all_result, result)
  438. else:
  439. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  440. return all_mergedcfg, all_result
  441. def parse_result2dict(result):
  442. if not(isinstance(result, dict)):
  443. return None
  444. csvdict = dict()
  445. for app in result:
  446. appresult = result[app]
  447. for cfg in appresult:
  448. if cfg not in csvdict:
  449. csvdict[cfg] = dict()
  450. runsts = appresult[cfg]["status"].get("run", False)
  451. if runsts == False:
  452. continue
  453. apptype = appresult[cfg]["result"]["type"]
  454. appsubtype = appresult[cfg]["result"].get("subtype", "")
  455. if appsubtype == "":
  456. appsubtype = "default"
  457. if apptype == "unknown":
  458. continue
  459. if apptype not in csvdict[cfg]:
  460. csvdict[cfg][apptype] = dict()
  461. if appsubtype not in csvdict[cfg][apptype]:
  462. csvdict[cfg][apptype][appsubtype] = dict()
  463. csvdict[cfg][apptype][appsubtype]["meta"] = appresult[cfg].get("toolver", dict())
  464. if csvdict[cfg][apptype][appsubtype]["meta"] == None:
  465. csvdict[cfg][apptype][appsubtype]["meta"] = dict()
  466. csvdict[cfg][apptype][appsubtype]["meta"].update(appresult[cfg].get("flags", dict()))
  467. if "value" not in csvdict[cfg][apptype][appsubtype]:
  468. csvdict[cfg][apptype][appsubtype]["value"] = dict()
  469. csvdict[cfg][apptype][appsubtype]["value"].update(appresult[cfg]["result"]["value"])
  470. csvdict[cfg][apptype][appsubtype]["size"] = appresult[cfg]["size"]
  471. return csvdict
  472. def show_failed_apps(logdir):
  473. rpt_failtxt = os.path.join(logdir, "app_failed.txt")
  474. if os.path.isfile(rpt_failtxt) == False:
  475. return
  476. with open(rpt_failtxt, "r") as rpt_ff:
  477. failed_lines = rpt_ff.readlines()
  478. if len(failed_lines) > 0:
  479. print("Here are the failed applications list below")
  480. for line in failed_lines:
  481. print(line)
  482. return
  483. def save_report_files(logdir, config, result, run=False):
  484. if os.path.isdir(logdir) == False:
  485. os.makedirs(logdir)
  486. rptfile = os.path.join(logdir, "report.md")
  487. rpthtml = os.path.join(logdir, "report.html")
  488. generate_report(config, result, rptfile, rpthtml, logdir, run)
  489. csvfile = os.path.join(logdir, "result.csv")
  490. save_bench_csv(result, csvfile)
  491. print("Generate report csv file to %s" % (csvfile))
  492. print("Generate report markdown file to %s" % (rptfile))
  493. if run:
  494. csvdata = parse_result2dict(result)
  495. csvdatafile = os.path.join(logdir, "runresult.json")
  496. save_json(csvdatafile, csvdata)
  497. runresultexcel = os.path.join(logdir, "runresult.xlsx")
  498. save_runresult(csvdata, runresultexcel)
  499. # show failed apps
  500. show_failed_apps(logdir)
  501. pass
  502. def save_runresult(runresult, excelfile):
  503. if not(isinstance(runresult, dict)):
  504. return False
  505. # Get csv header for each header and column 1 & column 2
  506. csv_headers = dict()
  507. csv_col1 = dict()
  508. csv_col2 = dict()
  509. for cfg in runresult:
  510. splitcfgs = cfg.split('/')
  511. pretype = ""
  512. if len(splitcfgs) > 1:
  513. pretype = '-'.join(splitcfgs[:-1])
  514. runcfg = splitcfgs[-1]
  515. for apptype in runresult[cfg]:
  516. for subtype in runresult[cfg][apptype]:
  517. if pretype != "":
  518. final_apptype = pretype + "_" + apptype
  519. else:
  520. final_apptype = apptype
  521. if final_apptype not in csv_headers:
  522. csv_headers[final_apptype] = ["RUNCONFIG", "SUBTYPE"]
  523. csv_col1[final_apptype] = []
  524. csv_col2[final_apptype] = []
  525. # fill header and col1 / col2
  526. if runcfg not in csv_headers[final_apptype]:
  527. csv_headers[final_apptype].append(runcfg)
  528. rstvalues = runresult[cfg][apptype][subtype]["value"]
  529. for key in rstvalues:
  530. if key not in csv_col1[final_apptype]:
  531. csv_col1[final_apptype].append(key)
  532. csv_col2[final_apptype].append(subtype)
  533. # Fill the csvtable with -
  534. csvtable = dict()
  535. for cfg in csv_headers:
  536. csvtable[cfg] = [csv_headers[cfg]]
  537. for i in range(0, len(csv_col1[cfg])):
  538. rowlist = [csv_col1[cfg][i], csv_col2[cfg][i]]
  539. for j in range(0, len(csv_headers[cfg]) - 2):
  540. rowlist.append('-')
  541. csvtable[cfg].append(rowlist)
  542. # Fill the csvtable with real value if key found
  543. for cfg in runresult:
  544. splitcfgs = cfg.split('/')
  545. pretype = ""
  546. if len(splitcfgs) > 1:
  547. pretype = '-'.join(splitcfgs[:-1])
  548. runcfg = splitcfgs[-1]
  549. for apptype in runresult[cfg]:
  550. for subtype in runresult[cfg][apptype]:
  551. if pretype != "":
  552. final_apptype = pretype + "_" + apptype
  553. else:
  554. final_apptype = apptype
  555. rstvalues = runresult[cfg][apptype][subtype]["value"]
  556. header = csvtable[final_apptype][0]
  557. index = header.index(runcfg)
  558. for key in rstvalues:
  559. for i in range(0, len(csvtable[final_apptype])):
  560. if key == csvtable[final_apptype][i][0]:
  561. csvtable[final_apptype][i][index] = rstvalues[key]
  562. break
  563. # Fill csvdict using csvtable
  564. csvdict = dict()
  565. for cfg in csvtable:
  566. csvdict[cfg] = dict()
  567. for csvlist in csvtable[cfg]:
  568. csvdict[cfg][csvlist[0]] = csvlist[1:]
  569. # Save to excel
  570. try:
  571. csvtable_jf = excelfile + ".csvtable.json"
  572. csvdict_jf = excelfile + ".csvdict.json"
  573. save_json(csvtable_jf, csvtable)
  574. save_json(csvdict_jf, csvdict)
  575. pe.isave_book_as(bookdict=csvtable, dest_file_name=excelfile)
  576. print("Generate run result excel file to %s" % (excelfile))
  577. except:
  578. print("pyexcel package is not installed.!")
  579. return False
  580. return True
  581. def generate_report_for_logs(logdir, run=False, split=False):
  582. if logdir and os.path.isdir(logdir):
  583. if split == False:
  584. all_mergedcfg, all_result = merge_all_config_and_result(logdir)
  585. else:
  586. all_mergedcfg, all_result = merge_split_config_and_result(logdir)
  587. if all_mergedcfg and all_result:
  588. config_file = os.path.join(logdir, "allcfg.json")
  589. result_file = os.path.join(logdir, "allrst.json")
  590. print("Save all merged config file to %s" % (config_file))
  591. print("Save all result file to %s" % (result_file))
  592. save_json(config_file, all_mergedcfg)
  593. save_json(result_file, all_result)
  594. save_report_files(logdir, all_mergedcfg, all_result, run)
  595. else:
  596. print("Can't find any valid reports in %s generated by nsdk_bench.py" % (logdir))
  597. pass
  598. if __name__ == '__main__':
  599. parser = argparse.ArgumentParser(description="Nuclei SDK Bench report Generate Tools")
  600. parser.add_argument('--logdir', required=True, help="logs directory where saved the report json files")
  601. parser.add_argument('--split', action='store_true', help="Split for different configurations")
  602. parser.add_argument('--run', action='store_true', help="If specified, it means this is a runner report")
  603. args = parser.parse_args()
  604. if os.path.isdir(args.logdir) == False:
  605. print("The log directory doesn't exist, please check!")
  606. sys.exit(1)
  607. generate_report_for_logs(args.logdir, args.run, args.split)