nsdk_report.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. #!/usr/bin/env python3
  2. import os
  3. import sys
  4. import time
  5. import copy
  6. import glob
  7. import tempfile
  8. import argparse
  9. SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
  10. requirement_file = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "requirements.txt"))
  11. MARKDOWN_PLUGIN=True
  12. try:
  13. import serial
  14. import json
  15. import markdown
  16. import pyexcel as pe
  17. from prettytable import *
  18. except Exception as exc:
  19. MARKDOWN_PLUGIN=False
  20. print("Import Error: %s" % (exc))
  21. print("Please install requried packages using: pip3 install -r %s" % (requirement_file))
  22. sys.exit(1)
  23. from nsdk_utils import *
  24. def get_expected_build(expcfg):
  25. if isinstance(expcfg, dict) == False:
  26. return False
  27. return expcfg.get("build", True)
  28. def get_expected_run(expcfg, build=None):
  29. if isinstance(expcfg, dict) == False:
  30. return False
  31. if build is None:
  32. build = get_expected_build(expcfg)
  33. # if expected build is false, expected run should be false
  34. if build == False:
  35. return False
  36. return expcfg.get("run", True)
  37. def get_expected(config, app, cfg_name):
  38. if isinstance(config, dict) == False:
  39. return None
  40. found_app_expected = find_local_appconfig(app, config.get("expected", dict()))
  41. found_app_expecteds = find_local_appconfig(app, config.get("expecteds", dict()))
  42. if found_app_expected:
  43. app_expected = copy.deepcopy(config.get("expected", dict()).get(found_app_expected))
  44. else:
  45. app_expected = dict()
  46. if found_app_expecteds:
  47. allcfgs_expected = config.get("expecteds", dict()).get(found_app_expecteds)
  48. # find expecteds config match key startwith
  49. if allcfgs_expected is not None and len(allcfgs_expected) > 0:
  50. if cfg_name not in allcfgs_expected:
  51. app_cfgexpected = dict()
  52. for key in allcfgs_expected:
  53. if cfg_name.startswith(key):
  54. app_cfgexpected = allcfgs_expected[key]
  55. break
  56. else:
  57. app_cfgexpected = allcfgs_expected.get(cfg_name, dict())
  58. else:
  59. app_cfgexpected = dict()
  60. else:
  61. app_cfgexpected = dict()
  62. app_expected = merge_two_config(app_expected, app_cfgexpected)
  63. return app_expected
  64. def check_expected(build_status, config, run=False):
  65. if isinstance(build_status, dict) == False or isinstance(config, dict) == False:
  66. return False
  67. ret = True
  68. for app in build_status:
  69. app_allstatus = build_status[app]
  70. for cfgname in app_allstatus:
  71. app_status = app_allstatus[cfgname]["status"]
  72. build_ret = app_status.get("build", False)
  73. run_ret = app_status.get("run", False)
  74. app_cfg_expected = get_expected(config, app, cfgname)
  75. if isinstance(app_cfg_expected, dict):
  76. expected_build_ret = get_expected_build(app_cfg_expected)
  77. expected_run_ret = get_expected_run(app_cfg_expected)
  78. if build_ret == False and expected_build_ret != build_ret:
  79. ret = False
  80. if run:
  81. if run_ret == False and expected_run_ret != run_ret:
  82. ret = False
  83. else:
  84. if build_ret == False:
  85. ret = False
  86. if run:
  87. if run_ret == False:
  88. ret = False
  89. return ret
  90. def save_results(appcfg, hwcfg, mergedcfg, result, savedir):
  91. if not (isinstance(savedir, str) and os.path.isdir(savedir)):
  92. return
  93. if isinstance(appcfg, dict):
  94. sfn = os.path.join(savedir, "appcfg.json")
  95. save_json(sfn, appcfg)
  96. if isinstance(hwcfg, dict):
  97. sfn = os.path.join(savedir, "hwcfg.json")
  98. save_json(sfn, hwcfg)
  99. if isinstance(mergedcfg, dict):
  100. sfn = os.path.join(savedir, "mergedcfg.json")
  101. save_json(sfn, mergedcfg)
  102. if isinstance(result, dict):
  103. sfn = os.path.join(savedir, "result.json")
  104. save_json(sfn, result)
  105. pass
  106. def analyze_report(config, result, runapp=False):
  107. apps_status = dict()
  108. passed_apps = dict()
  109. failed_apps = dict()
  110. build_cfgs = dict()
  111. glb_buildcfg = config.get("build_config", dict())
  112. # TODO currently this feature only cover cases that the application build_configs
  113. # just extend the global build_configs
  114. # get build configs used per cfgname
  115. if "build_configs" not in config:
  116. build_cfgs["default"] = config.get("build_config", dict())
  117. else:
  118. sub_configs = config["build_configs"]
  119. for cfgname in sub_configs:
  120. bcfg = copy.deepcopy(glb_buildcfg)
  121. bcfg.update(sub_configs[cfgname])
  122. build_cfgs[cfgname] = bcfg
  123. if "appconfig" in config:
  124. appcfgs = config.get("appconfig", dict())
  125. for app in appcfgs:
  126. if "build_configs" in appcfgs[app]:
  127. appsub_configs = appcfgs[app]["build_configs"]
  128. for cfgname in appsub_configs:
  129. bcfg = copy.deepcopy(glb_buildcfg)
  130. bcfg.update(appsub_configs[cfgname])
  131. build_cfgs[cfgname] = bcfg
  132. def check_app_status(status, expected, runapp=False):
  133. app_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  134. percase_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  135. app_percase_sts = dict()
  136. for cfgname in status:
  137. app_percase_sts[cfgname] = copy.deepcopy(percase_sts)
  138. app_cfg_expected = get_expected(config, app, cfgname)
  139. expected_build = get_expected_build(app_cfg_expected)
  140. expected_run = get_expected_run(app_cfg_expected)
  141. real_build = status[cfgname]["status"].get("build", False)
  142. real_run = status[cfgname]["status"].get("run", False)
  143. if real_build == False and expected_build != real_build:
  144. app_sts["exp_build"] = False
  145. app_percase_sts[cfgname]["exp_build"] = False
  146. if real_run == False and expected_run != real_run:
  147. app_sts["exp_run"] = False
  148. app_percase_sts[cfgname]["exp_run"] = False
  149. if real_build == False:
  150. app_sts["build"] = False
  151. if real_run == False:
  152. app_sts["run"] = False
  153. # get per case expected
  154. app_percase_sts[cfgname]["expected"] = app_sts["exp_build"]
  155. if runapp:
  156. app_percase_sts[cfgname]["expected"] = app_percase_sts[cfgname]["exp_build"] and app_percase_sts[cfgname]["exp_run"]
  157. if runapp:
  158. app_sts["expected"] = app_sts["exp_build"] and app_sts["exp_run"]
  159. else:
  160. app_sts["expected"] = app_sts["exp_build"]
  161. analayzed_app_status = {"summary": app_sts, "percase": app_percase_sts}
  162. return analayzed_app_status
  163. apps_expected = config.get("expected", dict())
  164. apps_percase_status = dict()
  165. apps_percase_failed = dict()
  166. apps_percase_passed = dict()
  167. # Get app status compared with expected
  168. for app in result:
  169. app_expected = apps_expected.get(app, dict())
  170. app_status = result[app]
  171. analayzed_app_status = check_app_status(app_status, app_expected, runapp)
  172. apps_status[app] = analayzed_app_status["summary"]
  173. apps_percase_status[app] = analayzed_app_status["percase"]
  174. apps_percase_failed[app] = list()
  175. apps_percase_passed[app] = list()
  176. # per case for 1 app
  177. for case in analayzed_app_status["percase"]:
  178. if analayzed_app_status["percase"][case]["expected"] == False:
  179. apps_percase_failed[app].append(case)
  180. else:
  181. apps_percase_passed[app].append(case)
  182. # per app
  183. if apps_status[app]["expected"] == True:
  184. passed_apps[app] = copy.deepcopy(apps_status[app])
  185. else:
  186. failed_apps[app] = copy.deepcopy(apps_status[app])
  187. # Create report_dict
  188. report_dict = {"passed": passed_apps, "failed": failed_apps, "status": apps_status, "configs": build_cfgs, \
  189. "percase": {"status": apps_percase_status, "failed": apps_percase_failed, "passed": apps_percase_passed} }
  190. return report_dict
  191. def gen_mdtxt(key, value=None, bold=True):
  192. if bold:
  193. key = "**%s**" % (key)
  194. return "[%s](%s)" % (key, value) if value else key
  195. def generate_build_run_status_md(appresult, logdir, casefail=True):
  196. if isinstance(appresult, dict) == False:
  197. if casefail:
  198. return "**False**", "**False**", "-", "-"
  199. else:
  200. return "False", "False", "-", "-"
  201. else:
  202. appblog = appresult["logs"].get("build", None)
  203. apprlog = appresult["logs"].get("run", None)
  204. appbsts = appresult["status"].get("build", False)
  205. apprsts = appresult["status"].get("run", False)
  206. appbtm = appresult["time"].get("build", "-")
  207. apprtm = appresult["time"].get("run", "-")
  208. def gen_sts_md(sts, log, bold=True):
  209. if log:
  210. log = os.path.relpath(log, logdir)
  211. return gen_mdtxt(sts, log, bold)
  212. bsts_md = gen_sts_md(appbsts, appblog, casefail)
  213. rsts_md = gen_sts_md(apprsts, apprlog, casefail)
  214. return bsts_md, rsts_md, appbtm, apprtm
  215. def generate_build_cfgname_md(cfgname, appresult, logdir, casefail=False):
  216. if isinstance(appresult, dict) == False or appresult["logs"].get("build", None) is None:
  217. caselogdir = None
  218. else:
  219. appblog = appresult["logs"].get("build", None)
  220. caselogdir = os.path.dirname(os.path.relpath(appblog, logdir))
  221. return gen_mdtxt(cfgname, caselogdir, casefail)
  222. def md2html(mdfile, htmlfile):
  223. if MARKDOWN_PLUGIN == False or os.path.isfile(mdfile) == False:
  224. return
  225. css_style = \
  226. """
  227. <style>
  228. table, th, td {
  229. border: 1px solid #1132e791 !important;
  230. border-collapse: collapse;
  231. padding: 3px;
  232. text-align: center;
  233. }
  234. td:first-child {
  235. text-align: left;
  236. }
  237. </style>
  238. """
  239. with open(mdfile) as mdf:
  240. mdtxt = mdf.read()
  241. mdhtml = markdown.markdown(mdtxt, extensions=["extra"])
  242. mdhtml = css_style + mdhtml
  243. with open(htmlfile, 'w') as htf:
  244. htf.write(mdhtml)
  245. def generate_report(config, result, rptfile, rpthtml, logdir, runapp=False):
  246. if not(isinstance(config, dict) and isinstance(result, dict) and isinstance(rptfile, str)):
  247. return None
  248. report = analyze_report(config, result, runapp)
  249. rpt_passtxt = os.path.join(os.path.dirname(rptfile), "app_passed.txt")
  250. rpt_failtxt = os.path.join(os.path.dirname(rptfile), "app_failed.txt")
  251. rpt_summary = dict()
  252. # generate markdown file
  253. with open(rptfile, "w") as rf:
  254. # generate overall status
  255. rf.write("# Overall Status\n\n")
  256. passcnt = len(report["passed"])
  257. failcnt = len(report["failed"])
  258. totalcnt = passcnt + failcnt
  259. passrate = str(round(((passcnt / totalcnt) * 100), 2)) + "%"
  260. x = PrettyTable()
  261. x.set_style(MARKDOWN)
  262. x.field_names = ["Total", "Pass", "Fail", "PassRate"]
  263. x.add_row([totalcnt, passcnt, failcnt, passrate])
  264. rf.write(str(x))
  265. rf.write("\n\n")
  266. passed_appsstr = "* **Passed**: %s\n" % (",".join(report["passed"].keys()))
  267. failed_appsstr = "* **Failed**: %s\n" % (",".join(report["failed"].keys()))
  268. #rf.write(passed_appsstr)
  269. if failcnt > 0:
  270. rf.write(failed_appsstr)
  271. rf.write("\n\n")
  272. # generate detailed status
  273. rf.write("# Tested Nuclei SDK Applications/Test Cases\n\n")
  274. if len(report["passed"]) > 0:
  275. rf.write("\n## Passed\n\n")
  276. x = PrettyTable()
  277. x.set_style(MARKDOWN)
  278. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  279. for app in report["passed"]:
  280. app_sts = report["passed"][app]
  281. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  282. app_sts["build"], app_sts["run"]])
  283. rf.write(str(x))
  284. rf.write("\n")
  285. if len(report["failed"]) > 0:
  286. rf.write("\n## Failed\n\n")
  287. x = PrettyTable()
  288. x.set_style(MARKDOWN)
  289. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  290. for app in report["failed"]:
  291. app_sts = report["failed"][app]
  292. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  293. app_sts["build"], app_sts["run"]])
  294. rf.write(str(x))
  295. rf.write("\n")
  296. # Build configurations
  297. rf.write("\n# Build configurations\n\n")
  298. x = PrettyTable()
  299. x.set_style(MARKDOWN)
  300. x.field_names = ["Case Name", "Make Options"]
  301. for cfgname in report["configs"]:
  302. make_options = " ".join([ "%s=%s"%(key, value) for key, value in report["configs"][cfgname].items() ])
  303. x.add_row([cfgname, make_options])
  304. rf.write(str(x))
  305. rf.write("\n")
  306. # Build and run status
  307. rf.write("\n# Build and run status\n\n")
  308. x = PrettyTable()
  309. x.set_style(MARKDOWN)
  310. sts_title = ["App/Test Case", "Case Name", "Build Status", "Run Status", "Build Time", \
  311. "Run Time", "Type", "Value", "Total", "Text", "Data", "Bss"]
  312. x.field_names = sts_title
  313. apps_buildsts = result
  314. percase_status = report["percase"]
  315. # failed status
  316. failed_sts = []
  317. # summary of all app cases
  318. tot_cases_count = 0
  319. tot_cases_btm = []
  320. tot_cases_rtm = []
  321. tot_cases_sztot = []
  322. tot_cases_sztext = []
  323. tot_cases_szdata = []
  324. tot_cases_szbss = []
  325. for app in apps_buildsts:
  326. app_sts = apps_buildsts[app]
  327. for cfgname in app_sts:
  328. tot_cases_count += 1
  329. caseisfail = False
  330. if cfgname in percase_status["failed"][app]:
  331. caseisfail = True
  332. size = app_sts[cfgname]["size"]
  333. apprsttype, apprstval = get_app_runresult(app_sts[cfgname].get("result", dict()))
  334. bsts_md, rsts_md, appbtm, apprtm = generate_build_run_status_md(app_sts[cfgname], logdir, caseisfail)
  335. cfgname_md = generate_build_cfgname_md(cfgname, app_sts[cfgname], logdir, caseisfail)
  336. sts_row = [app, cfgname_md, bsts_md, rsts_md, appbtm, apprtm, apprsttype, apprstval, \
  337. size["total"], size["text"], size["data"], size["bss"]]
  338. x.add_row(sts_row)
  339. # add failed status into list
  340. if caseisfail:
  341. failed_sts.append(sts_row)
  342. # record total cases build and run time
  343. appwithcfg = "%s:%s" % (app, cfgname)
  344. tot_cases_btm.append((str(appbtm), appwithcfg))
  345. tot_cases_rtm.append((str(apprtm), appwithcfg))
  346. tot_cases_sztot.append((str(size["total"]), appwithcfg))
  347. tot_cases_sztext.append((str(size["text"]), appwithcfg))
  348. tot_cases_szdata.append((str(size["data"]), appwithcfg))
  349. tot_cases_szbss.append((str(size["bss"]), appwithcfg))
  350. rf.write(str(x))
  351. rf.write("\n")
  352. # save report summary
  353. rpt_summary["count"] = tot_cases_count
  354. rpt_summary["buildtime"] = tot_cases_btm
  355. rpt_summary["runtime"] = tot_cases_rtm
  356. rpt_summary["sztotal"] = tot_cases_sztot
  357. rpt_summary["sztext"] = tot_cases_sztext
  358. rpt_summary["szdata"] = tot_cases_szdata
  359. rpt_summary["szbss"] = tot_cases_szbss
  360. # show only failed cases
  361. if len(failed_sts) > 0:
  362. rf.write("\n# Failed Cases Details\n\n")
  363. x = PrettyTable()
  364. x.set_style(MARKDOWN)
  365. x.field_names = sts_title
  366. for row in failed_sts:
  367. x.add_row(row)
  368. rf.write(str(x))
  369. rf.write("\n")
  370. # Real expected pass or fail cases
  371. percase_status = report["percase"]
  372. rf.write("\n# Passed Cases(as expected) Per Applications\n\n")
  373. x = PrettyTable()
  374. x.set_style(MARKDOWN)
  375. x.field_names = ["App", "Passed Cases"]
  376. with open(rpt_passtxt, "w") as rpt_pf:
  377. for app in percase_status["passed"]:
  378. tmptxt = ", ".join(percase_status["passed"][app])
  379. if (len(tmptxt) > 0):
  380. rpt_pf.write("- %s : %s\n" % (app, tmptxt))
  381. x.add_row([app, tmptxt])
  382. rf.write(str(x))
  383. rf.write("\n")
  384. rf.write("\n# Failed Cases(as expected) Per Applications\n\n")
  385. x = PrettyTable()
  386. x.set_style(MARKDOWN)
  387. x.field_names = ["App", "Failed Cases"]
  388. with open(rpt_failtxt, "w") as rpt_ff:
  389. for app in percase_status["failed"]:
  390. tmptxt = ", ".join(percase_status["failed"][app])
  391. if (len(tmptxt) > 0):
  392. rpt_ff.write("- %s : %s\n" % (app, tmptxt))
  393. x.add_row([app, tmptxt])
  394. rf.write(str(x))
  395. rf.write("\n")
  396. # expected build or run failed cases
  397. x = PrettyTable()
  398. x.set_style(MARKDOWN)
  399. x.field_names = ["App/Test Case", "Case Name", "Expected Build", "Expected Run"]
  400. apps_buildsts = result
  401. with_expect = False
  402. for app in apps_buildsts:
  403. app_sts = apps_buildsts[app]
  404. for cfgname in app_sts:
  405. app_cfg_expected = get_expected(config, app, cfgname)
  406. expected_build = get_expected_build(app_cfg_expected)
  407. expected_run = get_expected_run(app_cfg_expected)
  408. if expected_build == False or expected_run == False:
  409. with_expect = True
  410. x.add_row([app, cfgname, expected_build, expected_run])
  411. if with_expect:
  412. rf.write("\n# Expected Build or Run Failed Cases\n\n")
  413. rf.write(str(x))
  414. rf.write("\n")
  415. # generate html from markdown
  416. md2html(rptfile, rpthtml)
  417. return rpt_summary
  418. # check whether the result json is generate by nsdk_bench.py
  419. def is_bench_result(result):
  420. if isinstance(result, dict) == False:
  421. return False
  422. check = True
  423. try:
  424. for app in result:
  425. for cfgname in result[app]:
  426. if "app" not in result[app][cfgname]:
  427. check = False
  428. break
  429. if check == False:
  430. break
  431. except:
  432. check = False
  433. return check
  434. def update_list_items(list1, list2):
  435. for i in range(0, len(list2)):
  436. if list2[i] not in list1:
  437. list1.append(list2[i])
  438. return list1
  439. def merge_runconfig(all_mergedcfg, config, reldir):
  440. if "run_config" not in all_mergedcfg:
  441. all_mergedcfg["run_config"] = config["run_config"]
  442. if "build_target" not in all_mergedcfg:
  443. all_mergedcfg["build_target"] = config["build_target"]
  444. if "parallel" not in all_mergedcfg:
  445. all_mergedcfg["parallel"] = config.get("parallel", "-j")
  446. if "build_config" not in all_mergedcfg:
  447. all_mergedcfg["build_config"] = dict()
  448. if "build_configs" not in all_mergedcfg:
  449. all_mergedcfg["build_configs"] = dict()
  450. if "expecteds" not in all_mergedcfg:
  451. all_mergedcfg["expecteds"] = dict()
  452. if "checks" not in all_mergedcfg:
  453. all_mergedcfg["checks"] = config.get("checks", dict())
  454. if "appdirs" not in all_mergedcfg:
  455. all_mergedcfg["appdirs"] = config.get("appdirs", [])
  456. if "appdirs_ignore" not in all_mergedcfg:
  457. all_mergedcfg["appdirs_ignore"] = config.get("appdirs_ignore", [])
  458. if "appconfig" not in all_mergedcfg:
  459. all_mergedcfg["appconfig"] = config.get("appconfig", dict())
  460. # TODO handle expecteds and expected
  461. if "expected" in config:
  462. for app in config["expected"]:
  463. if app not in all_mergedcfg["expecteds"]:
  464. all_mergedcfg["expecteds"][app] = dict()
  465. newcfgname = reldir.replace("\\", "/")
  466. all_mergedcfg["expecteds"][app][newcfgname] = config["expected"][app]
  467. if "expecteds" in config:
  468. for app in config["expecteds"]:
  469. if app not in all_mergedcfg["expecteds"]:
  470. all_mergedcfg["expecteds"][app] = dict()
  471. for cfgname in config["expecteds"][app]:
  472. newcfgname = os.path.join(reldir, cfgname).replace("\\", "/")
  473. all_mergedcfg["expecteds"][app][newcfgname] = config["expecteds"][app][cfgname]
  474. def merge_buildcfgs(dstcfg, srccfg, rel):
  475. if "build_configs" in srccfg:
  476. for bcfg in srccfg["build_configs"]:
  477. new_bcfg = os.path.join(rel, bcfg).replace("\\", "/")
  478. dstcfg["build_configs"][new_bcfg] = merge_two_config(srccfg.get("build_config", None), srccfg["build_configs"][bcfg])
  479. merge_buildcfgs(all_mergedcfg, config, reldir)
  480. all_mergedcfg["appdirs"] = update_list_items(all_mergedcfg.get("appdirs", []), config.get("appdirs", []))
  481. all_mergedcfg["appdirs_ignore"] = update_list_items(all_mergedcfg.get("appdirs_ignore", []), config.get("appdirs_ignore", []))
  482. appconfigs = config.get("appconfig", dict())
  483. for app in appconfigs:
  484. if app not in all_mergedcfg["appconfig"]:
  485. all_mergedcfg["appconfig"][app] = {"build_config": {}, "build_configs": {}, "checks": appconfigs[app].get("checks", dict())}
  486. merge_buildcfgs(all_mergedcfg["appconfig"][app], appconfigs[app], reldir)
  487. return
  488. def merge_split_config_and_result(logdir):
  489. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  490. all_mergedcfg = dict()
  491. all_result = dict()
  492. print("Start to merge config and result json files in %s" % (logdir))
  493. for mergedcfg_file in mergedcfg_files:
  494. configfile = mergedcfg_file
  495. resultdir = os.path.dirname(mergedcfg_file)
  496. reldir = os.path.relpath(resultdir, logdir)
  497. resultfile = os.path.join(resultdir, "result.json")
  498. if os.path.isfile(resultfile) == True:
  499. _, config = load_json(configfile)
  500. _, result = load_json(resultfile)
  501. if is_bench_result(result):
  502. for app in result:
  503. for cfg in result[app]:
  504. app_cfg = os.path.join(reldir, cfg).replace("\\", "/")
  505. if app not in all_result:
  506. all_result[app] = dict()
  507. all_result[app][app_cfg] = result[app][cfg]
  508. merge_runconfig(all_mergedcfg, config, reldir)
  509. else:
  510. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  511. return all_mergedcfg, all_result
  512. def merge_all_config_and_result(logdir):
  513. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  514. all_mergedcfg = dict()
  515. all_result = dict()
  516. print("Start to merge split config and result json files in %s" % (logdir))
  517. for mergedcfg_file in mergedcfg_files:
  518. configfile = mergedcfg_file
  519. resultdir = os.path.dirname(mergedcfg_file)
  520. resultfile = os.path.join(resultdir, "result.json")
  521. if os.path.isfile(resultfile) == True:
  522. _, config = load_json(configfile)
  523. _, result = load_json(resultfile)
  524. if is_bench_result(result):
  525. print("Merging config json file %s, result json file %s" %(configfile, resultfile))
  526. all_mergedcfg = merge_two_config(all_mergedcfg, config)
  527. all_result = merge_two_config(all_result, result)
  528. else:
  529. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  530. return all_mergedcfg, all_result
  531. def parse_result2dict(result):
  532. if not(isinstance(result, dict)):
  533. return None
  534. csvdict = dict()
  535. for app in result:
  536. appresult = result[app]
  537. for cfg in appresult:
  538. if cfg not in csvdict:
  539. csvdict[cfg] = dict()
  540. runsts = appresult[cfg]["status"].get("run", False)
  541. if runsts == False:
  542. continue
  543. apptype = appresult[cfg]["result"]["type"]
  544. appsubtype = appresult[cfg]["result"].get("subtype", "")
  545. if appsubtype == "":
  546. appsubtype = "default"
  547. if apptype == "unknown":
  548. continue
  549. if apptype not in csvdict[cfg]:
  550. csvdict[cfg][apptype] = dict()
  551. if appsubtype not in csvdict[cfg][apptype]:
  552. csvdict[cfg][apptype][appsubtype] = dict()
  553. csvdict[cfg][apptype][appsubtype]["meta"] = appresult[cfg].get("toolver", dict())
  554. if csvdict[cfg][apptype][appsubtype]["meta"] == None:
  555. csvdict[cfg][apptype][appsubtype]["meta"] = dict()
  556. csvdict[cfg][apptype][appsubtype]["meta"].update(appresult[cfg].get("flags", dict()))
  557. # record ci information
  558. ciinfo = get_ci_info()
  559. if ciinfo:
  560. csvdict[cfg][apptype][appsubtype]["meta"]["ci"] = ciinfo
  561. if "value" not in csvdict[cfg][apptype][appsubtype]:
  562. csvdict[cfg][apptype][appsubtype]["value"] = dict()
  563. csvdict[cfg][apptype][appsubtype]["value"].update(appresult[cfg]["result"]["value"])
  564. csvdict[cfg][apptype][appsubtype]["size"] = appresult[cfg]["size"]
  565. return csvdict
  566. def show_failed_apps(logdir):
  567. rpt_failtxt = os.path.join(logdir, "app_failed.txt")
  568. if os.path.isfile(rpt_failtxt) == False:
  569. return
  570. with open(rpt_failtxt, "r") as rpt_ff:
  571. failed_lines = rpt_ff.readlines()
  572. if len(failed_lines) > 0:
  573. print("Here are the failed applications list below")
  574. for line in failed_lines:
  575. print(line)
  576. return
  577. def show_report_summary(summary, sfl):
  578. if not(isinstance(summary, dict)):
  579. return
  580. if len(summary) == 0:
  581. return
  582. def decsort(key):
  583. try:
  584. return float(key[0])
  585. except:
  586. return 0
  587. summary["buildtime"].sort(reverse=True, key=decsort)
  588. summary["runtime"].sort(reverse=True, key=decsort)
  589. summary["sztotal"].sort(reverse=True, key=decsort)
  590. summary["sztext"].sort(reverse=True, key=decsort)
  591. summary["szdata"].sort(reverse=True, key=decsort)
  592. summary["szbss"].sort(reverse=True, key=decsort)
  593. mostnum = min(10, summary["count"])
  594. with open(sfl, "w") as sflh:
  595. sflh.write("There are %d cases executed\r\n" % (summary["count"]))
  596. sflh.write("The most %d build time costing cases are %s\r\n" % (mostnum, summary["buildtime"][:mostnum]))
  597. sflh.write("The most %d run time costing cases are %s\r\n" % (mostnum, summary["runtime"][:mostnum]))
  598. sflh.write("The most %d program total size costing cases are %s\r\n" % (mostnum, summary["sztotal"][:mostnum]))
  599. sflh.write("The most %d program text section size costing cases are %s\r\n" % (mostnum, summary["sztext"][:mostnum]))
  600. sflh.write("The most %d program data section size costing cases are %s\r\n" % (mostnum, summary["szdata"][:mostnum]))
  601. sflh.write("The most %d program bss section size costing cases are %s\r\n" % (mostnum, summary["szbss"][:mostnum]))
  602. print("\r\n=====Here is the report summary:=====")
  603. with open(sfl, "r") as sflh:
  604. for line in sflh.readlines():
  605. print(line)
  606. pass
  607. def save_report_files(logdir, config, result, run=False):
  608. if os.path.isdir(logdir) == False:
  609. os.makedirs(logdir)
  610. rptfile = os.path.join(logdir, "report.md")
  611. rpthtml = os.path.join(logdir, "report.html")
  612. rptsumfile = os.path.join(logdir, "summary.txt")
  613. rptsum = generate_report(config, result, rptfile, rpthtml, logdir, run)
  614. csvfile = os.path.join(logdir, "result.csv")
  615. save_bench_csv(result, csvfile)
  616. print("Generate report csv file to %s" % (csvfile))
  617. print("Generate report markdown file to %s" % (rptfile))
  618. if run:
  619. csvdata = parse_result2dict(result)
  620. csvdatafile = os.path.join(logdir, "runresult.json")
  621. save_json(csvdatafile, csvdata)
  622. runresultexcel = os.path.join(logdir, "runresult.xlsx")
  623. save_runresult(csvdata, runresultexcel)
  624. show_report_summary(rptsum, rptsumfile)
  625. # show failed apps
  626. show_failed_apps(logdir)
  627. pass
  628. # save excel and csv for selected csv table
  629. def save_excel(csvtable, excelfile):
  630. if len(csvtable) == 0:
  631. print("Not run result found, no need to generate excel!")
  632. return
  633. newcsvtable = {}
  634. csvsummary = []
  635. keylens = [ len(key) for key in csvtable.keys() ]
  636. keylens.sort()
  637. maxlen = keylens[-1]
  638. for cfg in csvtable:
  639. if len(csvtable[cfg]) > 1:
  640. csvsummary.append([cfg])
  641. csvsummary.extend(csvtable[cfg])
  642. if maxlen >= 31:
  643. cfglist = cfg.split('-')
  644. newcfg = "-".join(cfglist[1:])
  645. newcsvtable[newcfg] = csvtable[cfg]
  646. else:
  647. newcsvtable[cfg] = csvtable[cfg]
  648. newcsvtable["summary"] = csvsummary
  649. pe.isave_book_as(bookdict=newcsvtable, dest_file_name=excelfile)
  650. print("Generate run result excel file to %s" % (excelfile))
  651. csvfile = excelfile + ".csv"
  652. with open(csvfile, "w") as cf:
  653. for row in csvsummary:
  654. cf.write("%s\n" % (",".join(str(e) for e in row)))
  655. print("Generate run result csv file to %s" % (csvfile))
  656. pass
  657. def save_runresult(runresult, excelfile):
  658. if not(isinstance(runresult, dict)):
  659. return False
  660. # Get csv header for each header and column 1 & column 2
  661. csv_headers = dict()
  662. csv_col1 = dict()
  663. csv_col2 = dict()
  664. for cfg in runresult:
  665. splitcfgs = cfg.split('/')
  666. pretype = ""
  667. if len(splitcfgs) > 1:
  668. pretype = '-'.join(splitcfgs[:-1])
  669. runcfg = splitcfgs[-1]
  670. for apptype in runresult[cfg]:
  671. for subtype in runresult[cfg][apptype]:
  672. if pretype != "":
  673. final_apptype = pretype + "_" + apptype
  674. else:
  675. final_apptype = apptype
  676. if final_apptype not in csv_headers:
  677. csv_headers[final_apptype] = ["RUNCONFIG", "SUBTYPE"]
  678. csv_col1[final_apptype] = []
  679. csv_col2[final_apptype] = []
  680. # fill header and col1 / col2
  681. if runcfg not in csv_headers[final_apptype]:
  682. csv_headers[final_apptype].append(runcfg)
  683. rstvalues = runresult[cfg][apptype][subtype]["value"]
  684. for key in rstvalues:
  685. if key not in csv_col1[final_apptype]:
  686. csv_col1[final_apptype].append(key)
  687. csv_col2[final_apptype].append(subtype)
  688. # Fill the csvtable with -
  689. csvtable = dict()
  690. for cfg in csv_headers:
  691. csvtable[cfg] = [csv_headers[cfg]]
  692. for i in range(0, len(csv_col1[cfg])):
  693. rowlist = [csv_col1[cfg][i], csv_col2[cfg][i]]
  694. for j in range(0, len(csv_headers[cfg]) - 2):
  695. rowlist.append('-')
  696. csvtable[cfg].append(rowlist)
  697. # Fill the csvtable with real value if key found
  698. for cfg in runresult:
  699. splitcfgs = cfg.split('/')
  700. pretype = ""
  701. if len(splitcfgs) > 1:
  702. pretype = '-'.join(splitcfgs[:-1])
  703. runcfg = splitcfgs[-1]
  704. for apptype in runresult[cfg]:
  705. for subtype in runresult[cfg][apptype]:
  706. if pretype != "":
  707. final_apptype = pretype + "_" + apptype
  708. else:
  709. final_apptype = apptype
  710. rstvalues = runresult[cfg][apptype][subtype]["value"]
  711. header = csvtable[final_apptype][0]
  712. index = header.index(runcfg)
  713. for key in rstvalues:
  714. for i in range(0, len(csvtable[final_apptype])):
  715. if key == csvtable[final_apptype][i][0]:
  716. csvtable[final_apptype][i][index] = rstvalues[key]
  717. break
  718. # Fill csvdict using csvtable
  719. csvdict = dict()
  720. for cfg in csvtable:
  721. csvdict[cfg] = dict()
  722. for csvlist in csvtable[cfg]:
  723. csvdict[cfg][csvlist[0]] = csvlist[1:]
  724. # Save to excel
  725. try:
  726. csvtable_jf = excelfile + ".csvtable.json"
  727. csvdict_jf = excelfile + ".csvdict.json"
  728. save_json(csvtable_jf, csvtable)
  729. save_json(csvdict_jf, csvdict)
  730. save_excel(csvtable, excelfile)
  731. except Exception as exc:
  732. print("Exception happend during save excel, error %s" % (exc))
  733. return False
  734. return True
  735. def generate_report_for_logs(logdir, run=False, split=False):
  736. if logdir and os.path.isdir(logdir):
  737. if split == False:
  738. all_mergedcfg, all_result = merge_all_config_and_result(logdir)
  739. else:
  740. all_mergedcfg, all_result = merge_split_config_and_result(logdir)
  741. if all_mergedcfg and all_result:
  742. config_file = os.path.join(logdir, "allcfg.json")
  743. result_file = os.path.join(logdir, "allrst.json")
  744. print("Save all merged config file to %s" % (config_file))
  745. print("Save all result file to %s" % (result_file))
  746. save_json(config_file, all_mergedcfg)
  747. save_json(result_file, all_result)
  748. save_report_files(logdir, all_mergedcfg, all_result, run)
  749. else:
  750. print("Can't find any valid reports in %s generated by nsdk_bench.py" % (logdir))
  751. pass
  752. if __name__ == '__main__':
  753. parser = argparse.ArgumentParser(description="Nuclei SDK Bench report Generate Tools")
  754. parser.add_argument('--logdir', required=True, help="logs directory where saved the report json files")
  755. parser.add_argument('--split', action='store_true', help="Split for different configurations")
  756. parser.add_argument('--run', action='store_true', help="If specified, it means this is a runner report")
  757. args = parser.parse_args()
  758. if os.path.isdir(args.logdir) == False:
  759. print("The log directory doesn't exist, please check!")
  760. sys.exit(1)
  761. generate_report_for_logs(args.logdir, args.run, args.split)