nsdk_report.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. #!/usr/bin/env python3
  2. import os
  3. import sys
  4. import time
  5. import copy
  6. import glob
  7. import tempfile
  8. import argparse
  9. SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
  10. requirement_file = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "requirements.txt"))
  11. MARKDOWN_PLUGIN=True
  12. try:
  13. import serial
  14. import json
  15. import markdown
  16. import pyexcel as pe
  17. from prettytable import *
  18. except Exception as exc:
  19. MARKDOWN_PLUGIN=False
  20. print("Import Error: %s" % (exc))
  21. print("Please install requried packages using: pip3 install -r %s" % (requirement_file))
  22. sys.exit(1)
  23. from nsdk_utils import *
  24. def get_expected_build(expcfg):
  25. if isinstance(expcfg, dict) == False:
  26. return False
  27. return expcfg.get("build", True)
  28. def get_expected_run(expcfg, build=None):
  29. if isinstance(expcfg, dict) == False:
  30. return False
  31. if build is None:
  32. build = get_expected_build(expcfg)
  33. # if expected build is false, expected run should be false
  34. if build == False:
  35. return False
  36. return expcfg.get("run", True)
  37. def get_expected(config, app, cfg_name):
  38. if isinstance(config, dict) == False:
  39. return None
  40. found_app_expected = find_local_appconfig(app, config.get("expected", dict()))
  41. found_app_expecteds = find_local_appconfig(app, config.get("expecteds", dict()))
  42. if found_app_expected:
  43. app_expected = copy.deepcopy(config.get("expected", dict()).get(found_app_expected))
  44. else:
  45. app_expected = dict()
  46. if found_app_expecteds:
  47. allcfgs_expected = config.get("expecteds", dict()).get(found_app_expecteds)
  48. # find expecteds config match key startwith
  49. if allcfgs_expected is not None and len(allcfgs_expected) > 0:
  50. if cfg_name not in allcfgs_expected:
  51. app_cfgexpected = dict()
  52. for key in allcfgs_expected:
  53. if cfg_name.startswith(key):
  54. app_cfgexpected = allcfgs_expected[key]
  55. break
  56. else:
  57. app_cfgexpected = allcfgs_expected.get(cfg_name, dict())
  58. else:
  59. app_cfgexpected = dict()
  60. else:
  61. app_cfgexpected = dict()
  62. if found_app_expected and found_app_expecteds and \
  63. app == found_app_expected and app == found_app_expecteds:
  64. app_expected = merge_two_config(app_expected, app_cfgexpected)
  65. elif found_app_expected and app == found_app_expected and len(app_expected) > 0:
  66. app_expected = app_expected
  67. elif found_app_expecteds and app == found_app_expecteds and len(app_cfgexpected) > 0:
  68. app_expected = app_cfgexpected
  69. else:
  70. tmp_app_expected = merge_two_config(app_expected, app_cfgexpected)
  71. if len(app_cfgexpected) and len(app_expected):
  72. if len(found_app_expecteds) > len(found_app_expected):
  73. tmp_app_expected = app_cfgexpected
  74. elif len(found_app_expecteds) < len(found_app_expected):
  75. tmp_app_expected = app_expected
  76. app_expected = tmp_app_expected
  77. return app_expected
  78. def check_expected(build_status, config, run=False):
  79. if isinstance(build_status, dict) == False or isinstance(config, dict) == False:
  80. return False
  81. ret = True
  82. for app in build_status:
  83. app_allstatus = build_status[app]
  84. for cfgname in app_allstatus:
  85. app_status = app_allstatus[cfgname]["status"]
  86. build_ret = app_status.get("build", False)
  87. run_ret = app_status.get("run", False)
  88. app_cfg_expected = get_expected(config, app, cfgname)
  89. if isinstance(app_cfg_expected, dict):
  90. expected_build_ret = get_expected_build(app_cfg_expected)
  91. expected_run_ret = get_expected_run(app_cfg_expected)
  92. if build_ret == False and expected_build_ret != build_ret:
  93. ret = False
  94. if run:
  95. if run_ret == False and expected_run_ret != run_ret:
  96. ret = False
  97. else:
  98. if build_ret == False:
  99. ret = False
  100. if run:
  101. if run_ret == False:
  102. ret = False
  103. return ret
  104. def save_results(appcfg, hwcfg, mergedcfg, result, savedir):
  105. if not (isinstance(savedir, str) and os.path.isdir(savedir)):
  106. return
  107. if isinstance(appcfg, dict):
  108. sfn = os.path.join(savedir, "appcfg.json")
  109. save_json(sfn, appcfg)
  110. if isinstance(hwcfg, dict):
  111. sfn = os.path.join(savedir, "hwcfg.json")
  112. save_json(sfn, hwcfg)
  113. if isinstance(mergedcfg, dict):
  114. sfn = os.path.join(savedir, "mergedcfg.json")
  115. save_json(sfn, mergedcfg)
  116. if isinstance(result, dict):
  117. sfn = os.path.join(savedir, "result.json")
  118. save_json(sfn, result)
  119. pass
  120. def analyze_report(config, result, runapp=False):
  121. apps_status = dict()
  122. passed_apps = dict()
  123. failed_apps = dict()
  124. build_cfgs = dict()
  125. glb_buildcfg = config.get("build_config", dict())
  126. # TODO currently this feature only cover cases that the application build_configs
  127. # just extend the global build_configs
  128. # get build configs used per cfgname
  129. if "build_configs" not in config:
  130. build_cfgs["default"] = config.get("build_config", dict())
  131. else:
  132. sub_configs = config["build_configs"]
  133. for cfgname in sub_configs:
  134. bcfg = copy.deepcopy(glb_buildcfg)
  135. bcfg.update(sub_configs[cfgname])
  136. build_cfgs[cfgname] = bcfg
  137. if "appconfig" in config:
  138. appcfgs = config.get("appconfig", dict())
  139. for app in appcfgs:
  140. if "build_configs" in appcfgs[app]:
  141. appsub_configs = appcfgs[app]["build_configs"]
  142. for cfgname in appsub_configs:
  143. bcfg = copy.deepcopy(glb_buildcfg)
  144. bcfg.update(appsub_configs[cfgname])
  145. build_cfgs[cfgname] = bcfg
  146. def check_app_status(status, expected, runapp=False):
  147. app_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  148. percase_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  149. app_percase_sts = dict()
  150. for cfgname in status:
  151. app_percase_sts[cfgname] = copy.deepcopy(percase_sts)
  152. app_cfg_expected = get_expected(config, app, cfgname)
  153. expected_build = get_expected_build(app_cfg_expected)
  154. expected_run = get_expected_run(app_cfg_expected)
  155. real_build = status[cfgname]["status"].get("build", False)
  156. real_run = status[cfgname]["status"].get("run", False)
  157. if real_build == False and expected_build != real_build:
  158. app_sts["exp_build"] = False
  159. app_percase_sts[cfgname]["exp_build"] = False
  160. if real_run == False and expected_run != real_run:
  161. app_sts["exp_run"] = False
  162. app_percase_sts[cfgname]["exp_run"] = False
  163. if real_build == False:
  164. app_sts["build"] = False
  165. if real_run == False:
  166. app_sts["run"] = False
  167. # get per case expected
  168. app_percase_sts[cfgname]["expected"] = app_sts["exp_build"]
  169. if runapp:
  170. app_percase_sts[cfgname]["expected"] = app_percase_sts[cfgname]["exp_build"] and app_percase_sts[cfgname]["exp_run"]
  171. if runapp:
  172. app_sts["expected"] = app_sts["exp_build"] and app_sts["exp_run"]
  173. else:
  174. app_sts["expected"] = app_sts["exp_build"]
  175. analayzed_app_status = {"summary": app_sts, "percase": app_percase_sts}
  176. return analayzed_app_status
  177. apps_expected = config.get("expected", dict())
  178. apps_percase_status = dict()
  179. apps_percase_failed = dict()
  180. apps_percase_passed = dict()
  181. # Get app status compared with expected
  182. for app in result:
  183. app_expected = apps_expected.get(app, dict())
  184. app_status = result[app]
  185. analayzed_app_status = check_app_status(app_status, app_expected, runapp)
  186. apps_status[app] = analayzed_app_status["summary"]
  187. apps_percase_status[app] = analayzed_app_status["percase"]
  188. apps_percase_failed[app] = list()
  189. apps_percase_passed[app] = list()
  190. # per case for 1 app
  191. for case in analayzed_app_status["percase"]:
  192. if analayzed_app_status["percase"][case]["expected"] == False:
  193. apps_percase_failed[app].append(case)
  194. else:
  195. apps_percase_passed[app].append(case)
  196. # per app
  197. if apps_status[app]["expected"] == True:
  198. passed_apps[app] = copy.deepcopy(apps_status[app])
  199. else:
  200. failed_apps[app] = copy.deepcopy(apps_status[app])
  201. # Create report_dict
  202. report_dict = {"passed": passed_apps, "failed": failed_apps, "status": apps_status, "configs": build_cfgs, \
  203. "percase": {"status": apps_percase_status, "failed": apps_percase_failed, "passed": apps_percase_passed} }
  204. return report_dict
  205. def gen_mdtxt(key, value=None, bold=True):
  206. if bold:
  207. key = "**%s**" % (key)
  208. return "[%s](%s)" % (key, value) if value else key
  209. def generate_build_run_status_md(appresult, logdir, casefail=True):
  210. if isinstance(appresult, dict) == False:
  211. if casefail:
  212. return "**False**", "**False**", "-", "-"
  213. else:
  214. return "False", "False", "-", "-"
  215. else:
  216. appblog = appresult["logs"].get("build", None)
  217. apprlog = appresult["logs"].get("run", None)
  218. appbsts = appresult["status"].get("build", False)
  219. apprsts = appresult["status"].get("run", False)
  220. appbtm = appresult["time"].get("build", "-")
  221. apprtm = appresult["time"].get("run", "-")
  222. def gen_sts_md(sts, log, bold=True):
  223. if log:
  224. log = os.path.relpath(log, logdir)
  225. return gen_mdtxt(sts, log, bold)
  226. bsts_md = gen_sts_md(appbsts, appblog, casefail)
  227. rsts_md = gen_sts_md(apprsts, apprlog, casefail)
  228. return bsts_md, rsts_md, appbtm, apprtm
  229. def generate_build_cfgname_md(cfgname, appresult, logdir, casefail=False):
  230. if isinstance(appresult, dict) == False or appresult["logs"].get("build", None) is None:
  231. caselogdir = None
  232. else:
  233. appblog = appresult["logs"].get("build", None)
  234. caselogdir = os.path.dirname(os.path.relpath(appblog, logdir))
  235. return gen_mdtxt(cfgname, caselogdir, casefail)
  236. def md2html(mdfile, htmlfile):
  237. if MARKDOWN_PLUGIN == False or os.path.isfile(mdfile) == False:
  238. return
  239. css_style = \
  240. """
  241. <style>
  242. table, th, td {
  243. border: 1px solid #1132e791 !important;
  244. border-collapse: collapse;
  245. padding: 3px;
  246. text-align: center;
  247. }
  248. td:first-child {
  249. text-align: left;
  250. }
  251. </style>
  252. """
  253. with open(mdfile) as mdf:
  254. mdtxt = mdf.read()
  255. mdhtml = markdown.markdown(mdtxt, extensions=["extra"])
  256. mdhtml = css_style + mdhtml
  257. with open(htmlfile, 'w') as htf:
  258. htf.write(mdhtml)
  259. def generate_report(config, result, rptfile, rpthtml, logdir, runapp=False):
  260. if not(isinstance(config, dict) and isinstance(result, dict) and isinstance(rptfile, str)):
  261. return None
  262. report = analyze_report(config, result, runapp)
  263. rpt_passtxt = os.path.join(os.path.dirname(rptfile), "app_passed.txt")
  264. rpt_failtxt = os.path.join(os.path.dirname(rptfile), "app_failed.txt")
  265. rpt_summary = dict()
  266. # generate markdown file
  267. with open(rptfile, "w") as rf:
  268. # generate overall status
  269. rf.write("# Overall Status\n\n")
  270. passcnt = len(report["passed"])
  271. failcnt = len(report["failed"])
  272. totalcnt = passcnt + failcnt
  273. passrate = str(round(((passcnt / totalcnt) * 100), 2)) + "%"
  274. x = PrettyTable()
  275. x.set_style(MARKDOWN)
  276. x.field_names = ["Total", "Pass", "Fail", "PassRate"]
  277. x.add_row([totalcnt, passcnt, failcnt, passrate])
  278. rf.write(str(x))
  279. rf.write("\n\n")
  280. passed_appsstr = "* **Passed**: %s\n" % (",".join(report["passed"].keys()))
  281. failed_appsstr = "* **Failed**: %s\n" % (",".join(report["failed"].keys()))
  282. #rf.write(passed_appsstr)
  283. if failcnt > 0:
  284. rf.write(failed_appsstr)
  285. rf.write("\n\n")
  286. # generate detailed status
  287. rf.write("# Tested Nuclei SDK Applications/Test Cases\n\n")
  288. if len(report["passed"]) > 0:
  289. rf.write("\n## Passed\n\n")
  290. x = PrettyTable()
  291. x.set_style(MARKDOWN)
  292. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  293. for app in report["passed"]:
  294. app_sts = report["passed"][app]
  295. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  296. app_sts["build"], app_sts["run"]])
  297. rf.write(str(x))
  298. rf.write("\n")
  299. if len(report["failed"]) > 0:
  300. rf.write("\n## Failed\n\n")
  301. x = PrettyTable()
  302. x.set_style(MARKDOWN)
  303. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  304. for app in report["failed"]:
  305. app_sts = report["failed"][app]
  306. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  307. app_sts["build"], app_sts["run"]])
  308. rf.write(str(x))
  309. rf.write("\n")
  310. # Build configurations
  311. rf.write("\n# Build configurations\n\n")
  312. x = PrettyTable()
  313. x.set_style(MARKDOWN)
  314. x.field_names = ["Case Name", "Make Options"]
  315. for cfgname in report["configs"]:
  316. make_options = " ".join([ "%s=%s"%(key, value) for key, value in report["configs"][cfgname].items() ])
  317. x.add_row([cfgname, make_options])
  318. rf.write(str(x))
  319. rf.write("\n")
  320. # Build and run status
  321. rf.write("\n# Build and run status\n\n")
  322. x = PrettyTable()
  323. x.set_style(MARKDOWN)
  324. sts_title = ["App/Test Case", "Case Name", "Build Status", "Run Status", "Build Time", \
  325. "Run Time", "Type", "Value", "Total", "Text", "Data", "Bss"]
  326. x.field_names = sts_title
  327. apps_buildsts = result
  328. percase_status = report["percase"]
  329. # failed status
  330. failed_sts = []
  331. # summary of all app cases
  332. tot_cases_count = 0
  333. tot_cases_btm = []
  334. tot_cases_rtm = []
  335. tot_cases_sztot = []
  336. tot_cases_sztext = []
  337. tot_cases_szdata = []
  338. tot_cases_szbss = []
  339. for app in apps_buildsts:
  340. app_sts = apps_buildsts[app]
  341. for cfgname in app_sts:
  342. tot_cases_count += 1
  343. caseisfail = False
  344. if cfgname in percase_status["failed"][app]:
  345. caseisfail = True
  346. size = app_sts[cfgname]["size"]
  347. apprsttype, apprstval = get_app_runresult(app_sts[cfgname].get("result", dict()))
  348. bsts_md, rsts_md, appbtm, apprtm = generate_build_run_status_md(app_sts[cfgname], logdir, caseisfail)
  349. cfgname_md = generate_build_cfgname_md(cfgname, app_sts[cfgname], logdir, caseisfail)
  350. sts_row = [app, cfgname_md, bsts_md, rsts_md, appbtm, apprtm, apprsttype, apprstval, \
  351. size["total"], size["text"], size["data"], size["bss"]]
  352. x.add_row(sts_row)
  353. # add failed status into list
  354. if caseisfail:
  355. failed_sts.append(sts_row)
  356. # record total cases build and run time
  357. appwithcfg = "%s:%s" % (app, cfgname)
  358. tot_cases_btm.append((str(appbtm), appwithcfg))
  359. tot_cases_rtm.append((str(apprtm), appwithcfg))
  360. tot_cases_sztot.append((str(size["total"]), appwithcfg))
  361. tot_cases_sztext.append((str(size["text"]), appwithcfg))
  362. tot_cases_szdata.append((str(size["data"]), appwithcfg))
  363. tot_cases_szbss.append((str(size["bss"]), appwithcfg))
  364. rf.write(str(x))
  365. rf.write("\n")
  366. # save report summary
  367. rpt_summary["count"] = tot_cases_count
  368. rpt_summary["buildtime"] = tot_cases_btm
  369. rpt_summary["runtime"] = tot_cases_rtm
  370. rpt_summary["sztotal"] = tot_cases_sztot
  371. rpt_summary["sztext"] = tot_cases_sztext
  372. rpt_summary["szdata"] = tot_cases_szdata
  373. rpt_summary["szbss"] = tot_cases_szbss
  374. # show only failed cases
  375. if len(failed_sts) > 0:
  376. rf.write("\n# Failed Cases Details\n\n")
  377. x = PrettyTable()
  378. x.set_style(MARKDOWN)
  379. x.field_names = sts_title
  380. for row in failed_sts:
  381. x.add_row(row)
  382. rf.write(str(x))
  383. rf.write("\n")
  384. # Real expected pass or fail cases
  385. percase_status = report["percase"]
  386. rf.write("\n# Passed Cases(as expected) Per Applications\n\n")
  387. x = PrettyTable()
  388. x.set_style(MARKDOWN)
  389. x.field_names = ["App", "Passed Cases"]
  390. with open(rpt_passtxt, "w") as rpt_pf:
  391. for app in percase_status["passed"]:
  392. tmptxt = ", ".join(percase_status["passed"][app])
  393. if (len(tmptxt) > 0):
  394. rpt_pf.write("- %s : %s\n" % (app, tmptxt))
  395. x.add_row([app, tmptxt])
  396. rf.write(str(x))
  397. rf.write("\n")
  398. rf.write("\n# Failed Cases(as expected) Per Applications\n\n")
  399. x = PrettyTable()
  400. x.set_style(MARKDOWN)
  401. x.field_names = ["App", "Failed Cases"]
  402. with open(rpt_failtxt, "w") as rpt_ff:
  403. for app in percase_status["failed"]:
  404. tmptxt = ", ".join(percase_status["failed"][app])
  405. if (len(tmptxt) > 0):
  406. rpt_ff.write("- %s : %s\n" % (app, tmptxt))
  407. x.add_row([app, tmptxt])
  408. rf.write(str(x))
  409. rf.write("\n")
  410. # expected build or run failed cases
  411. x = PrettyTable()
  412. x.set_style(MARKDOWN)
  413. x.field_names = ["App/Test Case", "Case Name", "Expected Build", "Expected Run"]
  414. apps_buildsts = result
  415. with_expect = False
  416. for app in apps_buildsts:
  417. app_sts = apps_buildsts[app]
  418. for cfgname in app_sts:
  419. app_cfg_expected = get_expected(config, app, cfgname)
  420. expected_build = get_expected_build(app_cfg_expected)
  421. expected_run = get_expected_run(app_cfg_expected)
  422. if expected_build == False or expected_run == False:
  423. with_expect = True
  424. x.add_row([app, cfgname, expected_build, expected_run])
  425. if with_expect:
  426. rf.write("\n# Expected Build or Run Failed Cases\n\n")
  427. rf.write(str(x))
  428. rf.write("\n")
  429. # generate html from markdown
  430. md2html(rptfile, rpthtml)
  431. return rpt_summary
  432. # check whether the result json is generate by nsdk_bench.py
  433. def is_bench_result(result):
  434. if isinstance(result, dict) == False:
  435. return False
  436. check = True
  437. try:
  438. for app in result:
  439. for cfgname in result[app]:
  440. if "app" not in result[app][cfgname]:
  441. check = False
  442. break
  443. if check == False:
  444. break
  445. except:
  446. check = False
  447. return check
  448. def update_list_items(list1, list2):
  449. for i in range(0, len(list2)):
  450. if list2[i] not in list1:
  451. list1.append(list2[i])
  452. return list1
  453. def merge_runconfig(all_mergedcfg, config, reldir):
  454. if "run_config" not in all_mergedcfg:
  455. all_mergedcfg["run_config"] = config["run_config"]
  456. if "build_target" not in all_mergedcfg:
  457. all_mergedcfg["build_target"] = config["build_target"]
  458. if "parallel" not in all_mergedcfg:
  459. all_mergedcfg["parallel"] = config.get("parallel", "-j")
  460. if "build_config" not in all_mergedcfg:
  461. all_mergedcfg["build_config"] = dict()
  462. if "build_configs" not in all_mergedcfg:
  463. all_mergedcfg["build_configs"] = dict()
  464. if "expecteds" not in all_mergedcfg:
  465. all_mergedcfg["expecteds"] = dict()
  466. if "checks" not in all_mergedcfg:
  467. all_mergedcfg["checks"] = config.get("checks", dict())
  468. if "appdirs" not in all_mergedcfg:
  469. all_mergedcfg["appdirs"] = config.get("appdirs", [])
  470. if "appdirs_ignore" not in all_mergedcfg:
  471. all_mergedcfg["appdirs_ignore"] = config.get("appdirs_ignore", [])
  472. if "appconfig" not in all_mergedcfg:
  473. all_mergedcfg["appconfig"] = config.get("appconfig", dict())
  474. # TODO handle expecteds and expected
  475. if "expected" in config:
  476. for app in config["expected"]:
  477. if app not in all_mergedcfg["expecteds"]:
  478. all_mergedcfg["expecteds"][app] = dict()
  479. newcfgname = reldir.replace("\\", "/")
  480. all_mergedcfg["expecteds"][app][newcfgname] = config["expected"][app]
  481. if "expecteds" in config:
  482. for app in config["expecteds"]:
  483. if app not in all_mergedcfg["expecteds"]:
  484. all_mergedcfg["expecteds"][app] = dict()
  485. for cfgname in config["expecteds"][app]:
  486. newcfgname = os.path.join(reldir, cfgname).replace("\\", "/")
  487. all_mergedcfg["expecteds"][app][newcfgname] = config["expecteds"][app][cfgname]
  488. def merge_buildcfgs(dstcfg, srccfg, rel):
  489. if "build_configs" in srccfg:
  490. for bcfg in srccfg["build_configs"]:
  491. new_bcfg = os.path.join(rel, bcfg).replace("\\", "/")
  492. dstcfg["build_configs"][new_bcfg] = merge_two_config(srccfg.get("build_config", None), srccfg["build_configs"][bcfg])
  493. merge_buildcfgs(all_mergedcfg, config, reldir)
  494. all_mergedcfg["appdirs"] = update_list_items(all_mergedcfg.get("appdirs", []), config.get("appdirs", []))
  495. all_mergedcfg["appdirs_ignore"] = update_list_items(all_mergedcfg.get("appdirs_ignore", []), config.get("appdirs_ignore", []))
  496. appconfigs = config.get("appconfig", dict())
  497. for app in appconfigs:
  498. if app not in all_mergedcfg["appconfig"]:
  499. all_mergedcfg["appconfig"][app] = {"build_config": {}, "build_configs": {}, "checks": appconfigs[app].get("checks", dict())}
  500. merge_buildcfgs(all_mergedcfg["appconfig"][app], appconfigs[app], reldir)
  501. return
  502. def merge_split_config_and_result(logdir):
  503. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  504. all_mergedcfg = dict()
  505. all_result = dict()
  506. print("Start to merge config and result json files in %s" % (logdir))
  507. for mergedcfg_file in mergedcfg_files:
  508. configfile = mergedcfg_file
  509. resultdir = os.path.dirname(mergedcfg_file)
  510. reldir = os.path.relpath(resultdir, logdir)
  511. resultfile = os.path.join(resultdir, "result.json")
  512. if os.path.isfile(resultfile) == True:
  513. _, config = load_json(configfile)
  514. _, result = load_json(resultfile)
  515. if is_bench_result(result):
  516. for app in result:
  517. for cfg in result[app]:
  518. app_cfg = os.path.join(reldir, cfg).replace("\\", "/")
  519. if app not in all_result:
  520. all_result[app] = dict()
  521. all_result[app][app_cfg] = result[app][cfg]
  522. merge_runconfig(all_mergedcfg, config, reldir)
  523. else:
  524. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  525. return all_mergedcfg, all_result
  526. def merge_all_config_and_result(logdir):
  527. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  528. all_mergedcfg = dict()
  529. all_result = dict()
  530. print("Start to merge split config and result json files in %s" % (logdir))
  531. for mergedcfg_file in mergedcfg_files:
  532. configfile = mergedcfg_file
  533. resultdir = os.path.dirname(mergedcfg_file)
  534. resultfile = os.path.join(resultdir, "result.json")
  535. if os.path.isfile(resultfile) == True:
  536. _, config = load_json(configfile)
  537. _, result = load_json(resultfile)
  538. if is_bench_result(result):
  539. print("Merging config json file %s, result json file %s" %(configfile, resultfile))
  540. all_mergedcfg = merge_two_config(all_mergedcfg, config)
  541. all_result = merge_two_config(all_result, result)
  542. else:
  543. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  544. return all_mergedcfg, all_result
  545. def parse_result2dict(result):
  546. if not(isinstance(result, dict)):
  547. return None
  548. csvdict = dict()
  549. for app in result:
  550. appresult = result[app]
  551. for cfg in appresult:
  552. if cfg not in csvdict:
  553. csvdict[cfg] = dict()
  554. runsts = appresult[cfg]["status"].get("run", False)
  555. if runsts == False:
  556. continue
  557. apptype = appresult[cfg]["result"]["type"]
  558. appsubtype = appresult[cfg]["result"].get("subtype", "")
  559. if appsubtype == "":
  560. appsubtype = "default"
  561. if apptype == "unknown":
  562. continue
  563. if apptype not in csvdict[cfg]:
  564. csvdict[cfg][apptype] = dict()
  565. if appsubtype not in csvdict[cfg][apptype]:
  566. csvdict[cfg][apptype][appsubtype] = dict()
  567. csvdict[cfg][apptype][appsubtype]["meta"] = appresult[cfg].get("toolver", dict())
  568. if csvdict[cfg][apptype][appsubtype]["meta"] == None:
  569. csvdict[cfg][apptype][appsubtype]["meta"] = dict()
  570. csvdict[cfg][apptype][appsubtype]["meta"].update(appresult[cfg].get("flags", dict()))
  571. # record ci information
  572. ciinfo = get_ci_info()
  573. if ciinfo:
  574. csvdict[cfg][apptype][appsubtype]["meta"]["ci"] = ciinfo
  575. if "value" not in csvdict[cfg][apptype][appsubtype]:
  576. csvdict[cfg][apptype][appsubtype]["value"] = dict()
  577. csvdict[cfg][apptype][appsubtype]["value"].update(appresult[cfg]["result"]["value"])
  578. csvdict[cfg][apptype][appsubtype]["size"] = appresult[cfg]["size"]
  579. return csvdict
  580. def show_failed_apps(logdir):
  581. rpt_failtxt = os.path.join(logdir, "app_failed.txt")
  582. if os.path.isfile(rpt_failtxt) == False:
  583. return
  584. with open(rpt_failtxt, "r") as rpt_ff:
  585. failed_lines = rpt_ff.readlines()
  586. if len(failed_lines) > 0:
  587. print("Here are the failed applications list below")
  588. for line in failed_lines:
  589. print(line)
  590. return
  591. def show_report_summary(summary, sfl):
  592. if not(isinstance(summary, dict)):
  593. return
  594. if len(summary) == 0:
  595. return
  596. def decsort(key):
  597. try:
  598. return float(key[0])
  599. except:
  600. return 0
  601. summary["buildtime"].sort(reverse=True, key=decsort)
  602. summary["runtime"].sort(reverse=True, key=decsort)
  603. summary["sztotal"].sort(reverse=True, key=decsort)
  604. summary["sztext"].sort(reverse=True, key=decsort)
  605. summary["szdata"].sort(reverse=True, key=decsort)
  606. summary["szbss"].sort(reverse=True, key=decsort)
  607. mostnum = min(10, summary["count"])
  608. with open(sfl, "w") as sflh:
  609. sflh.write("There are %d cases executed\r\n" % (summary["count"]))
  610. sflh.write("The most %d build time costing cases are %s\r\n" % (mostnum, summary["buildtime"][:mostnum]))
  611. sflh.write("The most %d run time costing cases are %s\r\n" % (mostnum, summary["runtime"][:mostnum]))
  612. sflh.write("The most %d program total size costing cases are %s\r\n" % (mostnum, summary["sztotal"][:mostnum]))
  613. sflh.write("The most %d program text section size costing cases are %s\r\n" % (mostnum, summary["sztext"][:mostnum]))
  614. sflh.write("The most %d program data section size costing cases are %s\r\n" % (mostnum, summary["szdata"][:mostnum]))
  615. sflh.write("The most %d program bss section size costing cases are %s\r\n" % (mostnum, summary["szbss"][:mostnum]))
  616. print("\r\n=====Here is the report summary:=====")
  617. with open(sfl, "r") as sflh:
  618. for line in sflh.readlines():
  619. print(line)
  620. pass
  621. def save_report_files(logdir, config, result, run=False):
  622. if os.path.isdir(logdir) == False:
  623. os.makedirs(logdir)
  624. rptfile = os.path.join(logdir, "report.md")
  625. rpthtml = os.path.join(logdir, "report.html")
  626. rptsumfile = os.path.join(logdir, "summary.txt")
  627. rptsum = generate_report(config, result, rptfile, rpthtml, logdir, run)
  628. csvfile = os.path.join(logdir, "result.csv")
  629. save_bench_csv(result, csvfile)
  630. print("Generate report csv file to %s" % (csvfile))
  631. print("Generate report markdown file to %s" % (rptfile))
  632. if run:
  633. csvdata = parse_result2dict(result)
  634. csvdatafile = os.path.join(logdir, "runresult.json")
  635. save_json(csvdatafile, csvdata)
  636. runresultexcel = os.path.join(logdir, "runresult.xlsx")
  637. save_runresult(csvdata, runresultexcel)
  638. show_report_summary(rptsum, rptsumfile)
  639. # show failed apps
  640. show_failed_apps(logdir)
  641. pass
  642. # save excel and csv for selected csv table
  643. def save_excel(csvtable, excelfile):
  644. if len(csvtable) == 0:
  645. print("Not run result found, no need to generate excel!")
  646. return
  647. newcsvtable = {}
  648. csvsummary = []
  649. keylens = [ len(key) for key in csvtable.keys() ]
  650. keylens.sort()
  651. maxlen = keylens[-1]
  652. for cfg in csvtable:
  653. if len(csvtable[cfg]) > 1:
  654. csvsummary.append([cfg])
  655. csvsummary.extend(csvtable[cfg])
  656. if maxlen >= 31:
  657. cfglist = cfg.split('-')
  658. newcfg = "-".join(cfglist[1:])
  659. newcsvtable[newcfg] = csvtable[cfg]
  660. else:
  661. newcsvtable[cfg] = csvtable[cfg]
  662. newcsvtable["summary"] = csvsummary
  663. pe.isave_book_as(bookdict=newcsvtable, dest_file_name=excelfile)
  664. print("Generate run result excel file to %s" % (excelfile))
  665. csvfile = excelfile + ".csv"
  666. with open(csvfile, "w") as cf:
  667. for row in csvsummary:
  668. cf.write("%s\n" % (",".join(str(e) for e in row)))
  669. print("Generate run result csv file to %s" % (csvfile))
  670. pass
  671. def save_runresult(runresult, excelfile):
  672. if not(isinstance(runresult, dict)):
  673. return False
  674. # Get csv header for each header and column 1 & column 2
  675. csv_headers = dict()
  676. csv_col1 = dict()
  677. csv_col2 = dict()
  678. for cfg in runresult:
  679. splitcfgs = cfg.split('/')
  680. pretype = ""
  681. if len(splitcfgs) > 1:
  682. pretype = '-'.join(splitcfgs[:-1])
  683. runcfg = splitcfgs[-1]
  684. for apptype in runresult[cfg]:
  685. for subtype in runresult[cfg][apptype]:
  686. if pretype != "":
  687. final_apptype = pretype + "_" + apptype
  688. else:
  689. final_apptype = apptype
  690. if final_apptype not in csv_headers:
  691. csv_headers[final_apptype] = ["RUNCONFIG", "SUBTYPE"]
  692. csv_col1[final_apptype] = []
  693. csv_col2[final_apptype] = []
  694. # fill header and col1 / col2
  695. if runcfg not in csv_headers[final_apptype]:
  696. csv_headers[final_apptype].append(runcfg)
  697. rstvalues = runresult[cfg][apptype][subtype]["value"]
  698. for key in rstvalues:
  699. if key not in csv_col1[final_apptype]:
  700. csv_col1[final_apptype].append(key)
  701. csv_col2[final_apptype].append(subtype)
  702. # Fill the csvtable with -
  703. csvtable = dict()
  704. for cfg in csv_headers:
  705. csvtable[cfg] = [csv_headers[cfg]]
  706. for i in range(0, len(csv_col1[cfg])):
  707. rowlist = [csv_col1[cfg][i], csv_col2[cfg][i]]
  708. for j in range(0, len(csv_headers[cfg]) - 2):
  709. rowlist.append('-')
  710. csvtable[cfg].append(rowlist)
  711. # Fill the csvtable with real value if key found
  712. for cfg in runresult:
  713. splitcfgs = cfg.split('/')
  714. pretype = ""
  715. if len(splitcfgs) > 1:
  716. pretype = '-'.join(splitcfgs[:-1])
  717. runcfg = splitcfgs[-1]
  718. for apptype in runresult[cfg]:
  719. for subtype in runresult[cfg][apptype]:
  720. if pretype != "":
  721. final_apptype = pretype + "_" + apptype
  722. else:
  723. final_apptype = apptype
  724. rstvalues = runresult[cfg][apptype][subtype]["value"]
  725. header = csvtable[final_apptype][0]
  726. index = header.index(runcfg)
  727. for key in rstvalues:
  728. for i in range(0, len(csvtable[final_apptype])):
  729. if key == csvtable[final_apptype][i][0]:
  730. csvtable[final_apptype][i][index] = rstvalues[key]
  731. break
  732. # Fill csvdict using csvtable
  733. csvdict = dict()
  734. for cfg in csvtable:
  735. csvdict[cfg] = dict()
  736. for csvlist in csvtable[cfg]:
  737. csvdict[cfg][csvlist[0]] = csvlist[1:]
  738. # Save to excel
  739. try:
  740. csvtable_jf = excelfile + ".csvtable.json"
  741. csvdict_jf = excelfile + ".csvdict.json"
  742. save_json(csvtable_jf, csvtable)
  743. save_json(csvdict_jf, csvdict)
  744. save_excel(csvtable, excelfile)
  745. except Exception as exc:
  746. print("Exception happend during save excel, error %s" % (exc))
  747. return False
  748. return True
  749. def generate_report_for_logs(logdir, run=False, split=False):
  750. if logdir and os.path.isdir(logdir):
  751. if split == False:
  752. all_mergedcfg, all_result = merge_all_config_and_result(logdir)
  753. else:
  754. all_mergedcfg, all_result = merge_split_config_and_result(logdir)
  755. if all_mergedcfg and all_result:
  756. config_file = os.path.join(logdir, "allcfg.json")
  757. result_file = os.path.join(logdir, "allrst.json")
  758. print("Save all merged config file to %s" % (config_file))
  759. print("Save all result file to %s" % (result_file))
  760. save_json(config_file, all_mergedcfg)
  761. save_json(result_file, all_result)
  762. save_report_files(logdir, all_mergedcfg, all_result, run)
  763. else:
  764. print("Can't find any valid reports in %s generated by nsdk_bench.py" % (logdir))
  765. pass
  766. if __name__ == '__main__':
  767. parser = argparse.ArgumentParser(description="Nuclei SDK Bench report Generate Tools")
  768. parser.add_argument('--logdir', required=True, help="logs directory where saved the report json files")
  769. parser.add_argument('--split', action='store_true', help="Split for different configurations")
  770. parser.add_argument('--run', action='store_true', help="If specified, it means this is a runner report")
  771. args = parser.parse_args()
  772. if os.path.isdir(args.logdir) == False:
  773. print("The log directory doesn't exist, please check!")
  774. sys.exit(1)
  775. generate_report_for_logs(args.logdir, args.run, args.split)