nsdk_report.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. #!/usr/bin/env python3
  2. import os
  3. import sys
  4. import time
  5. import copy
  6. import glob
  7. import tempfile
  8. import argparse
  9. SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
  10. requirement_file = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "requirements.txt"))
  11. MARKDOWN_PLUGIN=True
  12. try:
  13. import serial
  14. import json
  15. import markdown
  16. import pyexcel as pe
  17. from prettytable import *
  18. except Exception as exc:
  19. MARKDOWN_PLUGIN=False
  20. print("Import Error: %s" % (exc))
  21. print("Please install requried packages using: pip3 install -r %s" % (requirement_file))
  22. sys.exit(1)
  23. from nsdk_utils import *
  24. def get_expected_build(expcfg):
  25. if isinstance(expcfg, dict) == False:
  26. return False
  27. return expcfg.get("build", True)
  28. def get_expected_run(expcfg, build=None):
  29. if isinstance(expcfg, dict) == False:
  30. return False
  31. if build is None:
  32. build = get_expected_build(expcfg)
  33. # if expected build is false, expected run should be false
  34. if build == False:
  35. return False
  36. return expcfg.get("run", True)
  37. def get_expected(config, app, cfg_name):
  38. if isinstance(config, dict) == False:
  39. return None
  40. found_app_expected = find_local_appconfig(app, config.get("expected", dict()))
  41. found_app_expecteds = find_local_appconfig(app, config.get("expecteds", dict()))
  42. if found_app_expected:
  43. app_expected = copy.deepcopy(config.get("expected", dict()).get(found_app_expected))
  44. else:
  45. app_expected = dict()
  46. if found_app_expecteds:
  47. allcfgs_expected = config.get("expecteds", dict()).get(found_app_expecteds)
  48. # find expecteds config match key startwith
  49. if allcfgs_expected is not None and len(allcfgs_expected) > 0:
  50. if cfg_name not in allcfgs_expected:
  51. app_cfgexpected = dict()
  52. for key in allcfgs_expected:
  53. if cfg_name.startswith(key):
  54. app_cfgexpected = allcfgs_expected[key]
  55. break
  56. else:
  57. app_cfgexpected = allcfgs_expected.get(cfg_name, dict())
  58. else:
  59. app_cfgexpected = dict()
  60. else:
  61. app_cfgexpected = dict()
  62. if found_app_expected and found_app_expecteds and \
  63. app == found_app_expected and app == found_app_expecteds:
  64. app_expected = merge_two_config(app_expected, app_cfgexpected)
  65. elif found_app_expected and app == found_app_expected and len(app_expected) > 0:
  66. app_expected = app_expected
  67. elif found_app_expecteds and app == found_app_expecteds and len(app_cfgexpected) > 0:
  68. app_expected = app_cfgexpected
  69. else:
  70. tmp_app_expected = merge_two_config(app_expected, app_cfgexpected)
  71. if len(app_cfgexpected) and len(app_expected):
  72. if len(found_app_expecteds) > len(found_app_expected):
  73. tmp_app_expected = app_cfgexpected
  74. elif len(found_app_expecteds) < len(found_app_expected):
  75. tmp_app_expected = app_expected
  76. app_expected = tmp_app_expected
  77. return app_expected
  78. def check_expected(build_status, config, run=False):
  79. if isinstance(build_status, dict) == False or isinstance(config, dict) == False:
  80. return False
  81. ret = True
  82. for app in build_status:
  83. app_allstatus = build_status[app]
  84. for cfgname in app_allstatus:
  85. app_status = app_allstatus[cfgname]["status"]
  86. build_ret = app_status.get("build", False)
  87. run_ret = app_status.get("run", False)
  88. app_cfg_expected = get_expected(config, app, cfgname)
  89. if isinstance(app_cfg_expected, dict):
  90. expected_build_ret = get_expected_build(app_cfg_expected)
  91. expected_run_ret = get_expected_run(app_cfg_expected)
  92. if build_ret == False and expected_build_ret != build_ret:
  93. ret = False
  94. if run:
  95. if run_ret == False and expected_run_ret != run_ret:
  96. ret = False
  97. else:
  98. if build_ret == False:
  99. ret = False
  100. if run:
  101. if run_ret == False:
  102. ret = False
  103. return ret
  104. def save_results(appcfg, hwcfg, mergedcfg, result, savedir):
  105. if not (isinstance(savedir, str) and os.path.isdir(savedir)):
  106. return
  107. if isinstance(appcfg, dict):
  108. sfn = os.path.join(savedir, "appcfg.json")
  109. save_json(sfn, appcfg)
  110. if isinstance(hwcfg, dict):
  111. sfn = os.path.join(savedir, "hwcfg.json")
  112. save_json(sfn, hwcfg)
  113. if isinstance(mergedcfg, dict):
  114. sfn = os.path.join(savedir, "mergedcfg.json")
  115. save_json(sfn, mergedcfg)
  116. if isinstance(result, dict):
  117. sfn = os.path.join(savedir, "result.json")
  118. save_json(sfn, result)
  119. pass
  120. def analyze_report(config, result, runapp=False):
  121. apps_status = dict()
  122. passed_apps = dict()
  123. failed_apps = dict()
  124. build_cfgs = dict()
  125. glb_buildcfg = config.get("build_config", dict())
  126. # TODO currently this feature only cover cases that the application build_configs
  127. # just extend the global build_configs
  128. # get build configs used per cfgname
  129. if "build_configs" not in config:
  130. build_cfgs["default"] = config.get("build_config", dict())
  131. else:
  132. sub_configs = config["build_configs"]
  133. for cfgname in sub_configs:
  134. bcfg = copy.deepcopy(glb_buildcfg)
  135. bcfg.update(sub_configs[cfgname])
  136. build_cfgs[cfgname] = bcfg
  137. if "appconfig" in config:
  138. appcfgs = config.get("appconfig", dict())
  139. for app in appcfgs:
  140. if "build_configs" in appcfgs[app]:
  141. appsub_configs = appcfgs[app]["build_configs"]
  142. for cfgname in appsub_configs:
  143. bcfg = copy.deepcopy(glb_buildcfg)
  144. bcfg.update(appsub_configs[cfgname])
  145. build_cfgs[cfgname] = bcfg
  146. def check_app_status(status, expected, runapp=False):
  147. app_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  148. percase_sts = {"expected": True, "exp_build": True, "exp_run": True, "build": True, "run": True}
  149. app_percase_sts = dict()
  150. for cfgname in status:
  151. app_percase_sts[cfgname] = copy.deepcopy(percase_sts)
  152. app_cfg_expected = get_expected(config, app, cfgname)
  153. expected_build = get_expected_build(app_cfg_expected)
  154. expected_run = get_expected_run(app_cfg_expected)
  155. real_build = status[cfgname]["status"].get("build", False)
  156. real_run = status[cfgname]["status"].get("run", False)
  157. if real_build == False and expected_build != real_build:
  158. app_sts["exp_build"] = False
  159. app_percase_sts[cfgname]["exp_build"] = False
  160. if real_run == False and expected_run != real_run:
  161. app_sts["exp_run"] = False
  162. app_percase_sts[cfgname]["exp_run"] = False
  163. if real_build == False:
  164. app_sts["build"] = False
  165. if real_run == False:
  166. app_sts["run"] = False
  167. # get per case expected
  168. app_percase_sts[cfgname]["expected"] = app_sts["exp_build"]
  169. if runapp:
  170. app_percase_sts[cfgname]["expected"] = app_percase_sts[cfgname]["exp_build"] and app_percase_sts[cfgname]["exp_run"]
  171. if runapp:
  172. app_sts["expected"] = app_sts["exp_build"] and app_sts["exp_run"]
  173. else:
  174. app_sts["expected"] = app_sts["exp_build"]
  175. analayzed_app_status = {"summary": app_sts, "percase": app_percase_sts}
  176. return analayzed_app_status
  177. apps_expected = config.get("expected", dict())
  178. apps_percase_status = dict()
  179. apps_percase_failed = dict()
  180. apps_percase_passed = dict()
  181. # Get app status compared with expected
  182. for app in result:
  183. app_expected = apps_expected.get(app, dict())
  184. app_status = result[app]
  185. analayzed_app_status = check_app_status(app_status, app_expected, runapp)
  186. apps_status[app] = analayzed_app_status["summary"]
  187. apps_percase_status[app] = analayzed_app_status["percase"]
  188. apps_percase_failed[app] = list()
  189. apps_percase_passed[app] = list()
  190. # per case for 1 app
  191. for case in analayzed_app_status["percase"]:
  192. if analayzed_app_status["percase"][case]["expected"] == False:
  193. apps_percase_failed[app].append(case)
  194. else:
  195. apps_percase_passed[app].append(case)
  196. # per app
  197. if apps_status[app]["expected"] == True:
  198. passed_apps[app] = copy.deepcopy(apps_status[app])
  199. else:
  200. failed_apps[app] = copy.deepcopy(apps_status[app])
  201. # Create report_dict
  202. report_dict = {"passed": passed_apps, "failed": failed_apps, "status": apps_status, "configs": build_cfgs, \
  203. "percase": {"status": apps_percase_status, "failed": apps_percase_failed, "passed": apps_percase_passed} }
  204. return report_dict
  205. def gen_mdtxt(key, value=None, bold=True):
  206. if bold:
  207. key = "**%s**" % (key)
  208. return "[%s](%s)" % (key, value) if value else key
  209. def generate_build_run_status_md(appresult, logdir, casefail=True):
  210. if isinstance(appresult, dict) == False:
  211. if casefail:
  212. return "**False**", "**False**", "-", "-"
  213. else:
  214. return "False", "False", "-", "-"
  215. else:
  216. appblog = appresult["logs"].get("build", None)
  217. apprlog = appresult["logs"].get("run", None)
  218. appbsts = appresult["status"].get("build", False)
  219. apprsts = appresult["status"].get("run", False)
  220. appbtm = appresult["time"].get("build", "-")
  221. apprtm = appresult["time"].get("run", "-")
  222. def gen_sts_md(sts, log, bold=True):
  223. if log:
  224. log = os.path.relpath(log, logdir)
  225. return gen_mdtxt(sts, log, bold)
  226. bsts_md = gen_sts_md(appbsts, appblog, casefail)
  227. rsts_md = gen_sts_md(apprsts, apprlog, casefail)
  228. return bsts_md, rsts_md, appbtm, apprtm
  229. def generate_build_cfgname_md(cfgname, appresult, logdir, casefail=False):
  230. if isinstance(appresult, dict) == False or appresult["logs"].get("build", None) is None:
  231. caselogdir = None
  232. else:
  233. appblog = appresult["logs"].get("build", None)
  234. caselogdir = os.path.dirname(os.path.relpath(appblog, logdir))
  235. return gen_mdtxt(cfgname, caselogdir, casefail)
  236. def md2html(mdfile, htmlfile):
  237. if MARKDOWN_PLUGIN == False or os.path.isfile(mdfile) == False:
  238. return
  239. css_style = \
  240. """
  241. <style>
  242. table, th, td {
  243. border: 1px solid #1132e791 !important;
  244. border-collapse: collapse;
  245. padding: 3px;
  246. text-align: center;
  247. }
  248. td:first-child {
  249. text-align: left;
  250. }
  251. </style>
  252. """
  253. with open(mdfile) as mdf:
  254. mdtxt = mdf.read()
  255. mdhtml = markdown.markdown(mdtxt, extensions=["extra"])
  256. mdhtml = css_style + mdhtml
  257. with open(htmlfile, 'w') as htf:
  258. htf.write(mdhtml)
  259. def generate_report(config, result, rptfile, rpthtml, logdir, runapp=False):
  260. if not(isinstance(config, dict) and isinstance(result, dict) and isinstance(rptfile, str)):
  261. return None
  262. report = analyze_report(config, result, runapp)
  263. rpt_passtxt = os.path.join(os.path.dirname(rptfile), "app_passed.txt")
  264. rpt_failtxt = os.path.join(os.path.dirname(rptfile), "app_failed.txt")
  265. rpt_summary = dict()
  266. # generate markdown file
  267. with open(rptfile, "w") as rf:
  268. # generate overall status
  269. rf.write("# Overall Status\n\n")
  270. passcnt = len(report["passed"])
  271. failcnt = len(report["failed"])
  272. totalcnt = passcnt + failcnt
  273. passrate = str(round(((passcnt / totalcnt) * 100), 2)) + "%"
  274. x = PrettyTable()
  275. x.set_style(MARKDOWN)
  276. x.field_names = ["Total", "Pass", "Fail", "PassRate"]
  277. x.add_row([totalcnt, passcnt, failcnt, passrate])
  278. rf.write(str(x))
  279. rf.write("\n\n")
  280. passed_appsstr = "* **Passed**: %s\n" % (",".join(report["passed"].keys()))
  281. failed_appsstr = "* **Failed**: %s\n" % (",".join(report["failed"].keys()))
  282. #rf.write(passed_appsstr)
  283. if failcnt > 0:
  284. rf.write(failed_appsstr)
  285. rf.write("\n\n")
  286. # generate detailed status
  287. rf.write("# Tested Nuclei SDK Applications/Test Cases\n\n")
  288. if len(report["passed"]) > 0:
  289. rf.write("\n## Passed\n\n")
  290. x = PrettyTable()
  291. x.set_style(MARKDOWN)
  292. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  293. for app in report["passed"]:
  294. app_sts = report["passed"][app]
  295. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  296. app_sts["build"], app_sts["run"]])
  297. rf.write(str(x))
  298. rf.write("\n")
  299. if len(report["failed"]) > 0:
  300. rf.write("\n## Failed\n\n")
  301. x = PrettyTable()
  302. x.set_style(MARKDOWN)
  303. x.field_names = ["App/Test Case", "All as Expected", "Build As Expected", "Run As Expected", "Build Status", "Run Status"]
  304. for app in report["failed"]:
  305. app_sts = report["failed"][app]
  306. x.add_row([app, app_sts["expected"], app_sts["exp_build"], app_sts["exp_run"], \
  307. app_sts["build"], app_sts["run"]])
  308. rf.write(str(x))
  309. rf.write("\n")
  310. # Build configurations
  311. rf.write("\n# Build configurations\n\n")
  312. x = PrettyTable()
  313. x.set_style(MARKDOWN)
  314. x.field_names = ["Case Name", "Make Options"]
  315. for cfgname in report["configs"]:
  316. make_options = " ".join([ "%s=%s"%(key, value) for key, value in report["configs"][cfgname].items() ])
  317. x.add_row([cfgname, make_options])
  318. rf.write(str(x))
  319. rf.write("\n")
  320. # Build and run status
  321. rf.write("\n# Build and run status\n\n")
  322. x = PrettyTable()
  323. x.set_style(MARKDOWN)
  324. sts_title = ["App/Test Case", "Case Name", "Build Status", "Run Status", "Build Time", \
  325. "Run Time", "Type", "Value", "Total", "Text", "Data", "Bss"]
  326. x.field_names = sts_title
  327. apps_buildsts = result
  328. percase_status = report["percase"]
  329. # failed status
  330. failed_sts = []
  331. # summary of all app cases
  332. tot_cases_count = 0
  333. tot_cases_btm = []
  334. tot_cases_rtm = []
  335. tot_cases_sztot = []
  336. tot_cases_sztext = []
  337. tot_cases_szdata = []
  338. tot_cases_szbss = []
  339. for app in apps_buildsts:
  340. app_sts = apps_buildsts[app]
  341. for cfgname in app_sts:
  342. tot_cases_count += 1
  343. caseisfail = False
  344. if cfgname in percase_status["failed"][app]:
  345. caseisfail = True
  346. size = app_sts[cfgname]["size"]
  347. apprsttype, apprstval = get_app_runresult(app_sts[cfgname].get("result", dict()))
  348. bsts_md, rsts_md, appbtm, apprtm = generate_build_run_status_md(app_sts[cfgname], logdir, caseisfail)
  349. cfgname_md = generate_build_cfgname_md(cfgname, app_sts[cfgname], logdir, caseisfail)
  350. sts_row = [app, cfgname_md, bsts_md, rsts_md, appbtm, apprtm, apprsttype, apprstval, \
  351. size["total"], size["text"], size["data"], size["bss"]]
  352. x.add_row(sts_row)
  353. # add failed status into list
  354. if caseisfail:
  355. failed_sts.append(sts_row)
  356. # record total cases build and run time
  357. appwithcfg = "%s:%s" % (app, cfgname)
  358. tot_cases_btm.append((str(appbtm), appwithcfg))
  359. tot_cases_rtm.append((str(apprtm), appwithcfg))
  360. tot_cases_sztot.append((str(size["total"]), appwithcfg))
  361. tot_cases_sztext.append((str(size["text"]), appwithcfg))
  362. tot_cases_szdata.append((str(size["data"]), appwithcfg))
  363. tot_cases_szbss.append((str(size["bss"]), appwithcfg))
  364. rf.write(str(x))
  365. rf.write("\n")
  366. # save report summary
  367. rpt_summary["count"] = tot_cases_count
  368. rpt_summary["buildtime"] = tot_cases_btm
  369. rpt_summary["runtime"] = tot_cases_rtm
  370. rpt_summary["sztotal"] = tot_cases_sztot
  371. rpt_summary["sztext"] = tot_cases_sztext
  372. rpt_summary["szdata"] = tot_cases_szdata
  373. rpt_summary["szbss"] = tot_cases_szbss
  374. # show only failed cases
  375. if len(failed_sts) > 0:
  376. rf.write("\n# Failed Cases Details\n\n")
  377. x = PrettyTable()
  378. x.set_style(MARKDOWN)
  379. x.field_names = sts_title
  380. for row in failed_sts:
  381. x.add_row(row)
  382. rf.write(str(x))
  383. rf.write("\n")
  384. # Real expected pass or fail cases
  385. percase_status = report["percase"]
  386. rf.write("\n# Passed Cases(as expected) Per Applications\n\n")
  387. x = PrettyTable()
  388. x.set_style(MARKDOWN)
  389. x.field_names = ["App", "Passed Cases"]
  390. with open(rpt_passtxt, "w") as rpt_pf:
  391. for app in percase_status["passed"]:
  392. tmptxt = ", ".join(percase_status["passed"][app])
  393. if (len(tmptxt) > 0):
  394. rpt_pf.write("- %s : %s\n" % (app, tmptxt))
  395. x.add_row([app, tmptxt])
  396. rf.write(str(x))
  397. rf.write("\n")
  398. rf.write("\n# Failed Cases(as expected) Per Applications\n\n")
  399. x = PrettyTable()
  400. x.set_style(MARKDOWN)
  401. x.field_names = ["App", "Failed Cases"]
  402. with open(rpt_failtxt, "w") as rpt_ff:
  403. for app in percase_status["failed"]:
  404. tmptxt = ", ".join(percase_status["failed"][app])
  405. if (len(tmptxt) > 0):
  406. rpt_ff.write("- %s : %s\n" % (app, tmptxt))
  407. x.add_row([app, tmptxt])
  408. rf.write(str(x))
  409. rf.write("\n")
  410. # expected build or run failed cases
  411. x = PrettyTable()
  412. x.set_style(MARKDOWN)
  413. x.field_names = ["App/Test Case", "Case Name", "Expected Build", "Expected Run"]
  414. apps_buildsts = result
  415. with_expect = False
  416. for app in apps_buildsts:
  417. app_sts = apps_buildsts[app]
  418. for cfgname in app_sts:
  419. app_cfg_expected = get_expected(config, app, cfgname)
  420. expected_build = get_expected_build(app_cfg_expected)
  421. expected_run = get_expected_run(app_cfg_expected)
  422. if expected_build == False or expected_run == False:
  423. with_expect = True
  424. x.add_row([app, cfgname, expected_build, expected_run])
  425. if with_expect:
  426. rf.write("\n# Expected Build or Run Failed Cases\n\n")
  427. rf.write(str(x))
  428. rf.write("\n")
  429. # generate html from markdown
  430. md2html(rptfile, rpthtml)
  431. return rpt_summary
  432. # check whether the result json is generate by nsdk_bench.py
  433. def is_bench_result(result):
  434. if isinstance(result, dict) == False:
  435. return False
  436. check = True
  437. try:
  438. for app in result:
  439. for cfgname in result[app]:
  440. if "app" not in result[app][cfgname]:
  441. check = False
  442. break
  443. if check == False:
  444. break
  445. except:
  446. check = False
  447. return check
  448. def merge_runconfig(all_mergedcfg, config, reldir):
  449. if "run_config" not in all_mergedcfg:
  450. all_mergedcfg["run_config"] = config["run_config"]
  451. if "build_target" not in all_mergedcfg:
  452. all_mergedcfg["build_target"] = config["build_target"]
  453. if "parallel" not in all_mergedcfg:
  454. all_mergedcfg["parallel"] = config.get("parallel", "-j")
  455. if "build_config" not in all_mergedcfg:
  456. all_mergedcfg["build_config"] = dict()
  457. if "build_configs" not in all_mergedcfg:
  458. all_mergedcfg["build_configs"] = dict()
  459. if "expecteds" not in all_mergedcfg:
  460. all_mergedcfg["expecteds"] = dict()
  461. if "checks" not in all_mergedcfg:
  462. all_mergedcfg["checks"] = config.get("checks", dict())
  463. if "appdirs" not in all_mergedcfg:
  464. all_mergedcfg["appdirs"] = config.get("appdirs", [])
  465. if "appdirs_ignore" not in all_mergedcfg:
  466. all_mergedcfg["appdirs_ignore"] = config.get("appdirs_ignore", [])
  467. if "appconfig" not in all_mergedcfg:
  468. all_mergedcfg["appconfig"] = config.get("appconfig", dict())
  469. # TODO handle expecteds and expected
  470. if "expected" in config:
  471. for app in config["expected"]:
  472. if app not in all_mergedcfg["expecteds"]:
  473. all_mergedcfg["expecteds"][app] = dict()
  474. newcfgname = reldir.replace("\\", "/")
  475. all_mergedcfg["expecteds"][app][newcfgname] = config["expected"][app]
  476. if "expecteds" in config:
  477. for app in config["expecteds"]:
  478. if app not in all_mergedcfg["expecteds"]:
  479. all_mergedcfg["expecteds"][app] = dict()
  480. for cfgname in config["expecteds"][app]:
  481. newcfgname = os.path.join(reldir, cfgname).replace("\\", "/")
  482. all_mergedcfg["expecteds"][app][newcfgname] = config["expecteds"][app][cfgname]
  483. def merge_buildcfgs(dstcfg, srccfg, rel):
  484. if "build_configs" in srccfg:
  485. for bcfg in srccfg["build_configs"]:
  486. new_bcfg = os.path.join(rel, bcfg).replace("\\", "/")
  487. dstcfg["build_configs"][new_bcfg] = merge_two_config(srccfg.get("build_config", None), srccfg["build_configs"][bcfg])
  488. merge_buildcfgs(all_mergedcfg, config, reldir)
  489. all_mergedcfg["appdirs"] = update_list_items(all_mergedcfg.get("appdirs", []), config.get("appdirs", []))
  490. all_mergedcfg["appdirs_ignore"] = update_list_items(all_mergedcfg.get("appdirs_ignore", []), config.get("appdirs_ignore", []))
  491. appconfigs = config.get("appconfig", dict())
  492. for app in appconfigs:
  493. if app not in all_mergedcfg["appconfig"]:
  494. all_mergedcfg["appconfig"][app] = {"build_config": {}, "build_configs": {}, "checks": appconfigs[app].get("checks", dict())}
  495. merge_buildcfgs(all_mergedcfg["appconfig"][app], appconfigs[app], reldir)
  496. return
  497. def merge_split_config_and_result(logdir):
  498. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  499. all_mergedcfg = dict()
  500. all_result = dict()
  501. print("Start to merge config and result json files in %s" % (logdir))
  502. for mergedcfg_file in mergedcfg_files:
  503. configfile = mergedcfg_file
  504. resultdir = os.path.dirname(mergedcfg_file)
  505. reldir = os.path.relpath(resultdir, logdir)
  506. resultfile = os.path.join(resultdir, "result.json")
  507. if os.path.isfile(resultfile) == True:
  508. _, config = load_json(configfile)
  509. _, result = load_json(resultfile)
  510. if is_bench_result(result):
  511. for app in result:
  512. for cfg in result[app]:
  513. app_cfg = os.path.join(reldir, cfg).replace("\\", "/")
  514. if app not in all_result:
  515. all_result[app] = dict()
  516. all_result[app][app_cfg] = result[app][cfg]
  517. merge_runconfig(all_mergedcfg, config, reldir)
  518. else:
  519. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  520. return all_mergedcfg, all_result
  521. def merge_all_config_and_result(logdir):
  522. mergedcfg_files = find_files(logdir, "**/mergedcfg.json", True)
  523. all_mergedcfg = dict()
  524. all_result = dict()
  525. print("Start to merge split config and result json files in %s" % (logdir))
  526. for mergedcfg_file in mergedcfg_files:
  527. configfile = mergedcfg_file
  528. resultdir = os.path.dirname(mergedcfg_file)
  529. resultfile = os.path.join(resultdir, "result.json")
  530. if os.path.isfile(resultfile) == True:
  531. _, config = load_json(configfile)
  532. _, result = load_json(resultfile)
  533. if is_bench_result(result):
  534. print("Merging config json file %s, result json file %s" %(configfile, resultfile))
  535. all_mergedcfg = merge_two_config(all_mergedcfg, config)
  536. all_result = merge_two_config(all_result, result)
  537. else:
  538. print("result json file %s is not generated by nsdk_bench.py" % (resultfile))
  539. return all_mergedcfg, all_result
  540. def parse_result2dict(result):
  541. if not(isinstance(result, dict)):
  542. return None
  543. csvdict = dict()
  544. for app in result:
  545. appresult = result[app]
  546. for cfg in appresult:
  547. if cfg not in csvdict:
  548. csvdict[cfg] = dict()
  549. runsts = appresult[cfg]["status"].get("run", False)
  550. if runsts == False:
  551. continue
  552. apptype = appresult[cfg]["result"]["type"]
  553. appsubtype = appresult[cfg]["result"].get("subtype", "")
  554. appvalue = appresult[cfg]["result"].get("value", None)
  555. if appsubtype == "":
  556. appsubtype = "default"
  557. if apptype == "unknown":
  558. continue
  559. if not appvalue:
  560. continue
  561. if apptype not in csvdict[cfg]:
  562. csvdict[cfg][apptype] = dict()
  563. if appsubtype not in csvdict[cfg][apptype]:
  564. csvdict[cfg][apptype][appsubtype] = dict()
  565. csvdict[cfg][apptype][appsubtype]["meta"] = appresult[cfg].get("toolver", dict())
  566. if csvdict[cfg][apptype][appsubtype]["meta"] == None:
  567. csvdict[cfg][apptype][appsubtype]["meta"] = dict()
  568. csvdict[cfg][apptype][appsubtype]["meta"].update(appresult[cfg].get("flags", dict()))
  569. # record ci information
  570. ciinfo = get_ci_info()
  571. if ciinfo:
  572. csvdict[cfg][apptype][appsubtype]["meta"]["ci"] = ciinfo
  573. if "value" not in csvdict[cfg][apptype][appsubtype]:
  574. csvdict[cfg][apptype][appsubtype]["value"] = dict()
  575. csvdict[cfg][apptype][appsubtype]["value"].update(appvalue)
  576. csvdict[cfg][apptype][appsubtype]["size"] = appresult[cfg]["size"]
  577. return csvdict
  578. def show_failed_apps(logdir):
  579. rpt_failtxt = os.path.join(logdir, "app_failed.txt")
  580. if os.path.isfile(rpt_failtxt) == False:
  581. return
  582. with open(rpt_failtxt, "r") as rpt_ff:
  583. failed_lines = rpt_ff.readlines()
  584. if len(failed_lines) > 0:
  585. print("Here are the failed applications list below")
  586. for line in failed_lines:
  587. print(line)
  588. return
  589. def show_report_summary(summary, sfl):
  590. if not(isinstance(summary, dict)):
  591. return
  592. if len(summary) == 0:
  593. return
  594. def decsort(key):
  595. try:
  596. return float(key[0])
  597. except:
  598. return 0
  599. summary["buildtime"].sort(reverse=True, key=decsort)
  600. summary["runtime"].sort(reverse=True, key=decsort)
  601. summary["sztotal"].sort(reverse=True, key=decsort)
  602. summary["sztext"].sort(reverse=True, key=decsort)
  603. summary["szdata"].sort(reverse=True, key=decsort)
  604. summary["szbss"].sort(reverse=True, key=decsort)
  605. mostnum = min(10, summary["count"])
  606. with open(sfl, "w") as sflh:
  607. sflh.write("There are %d cases executed\r\n" % (summary["count"]))
  608. sflh.write("The most %d build time costing cases are %s\r\n" % (mostnum, summary["buildtime"][:mostnum]))
  609. sflh.write("The most %d run time costing cases are %s\r\n" % (mostnum, summary["runtime"][:mostnum]))
  610. sflh.write("The most %d program total size costing cases are %s\r\n" % (mostnum, summary["sztotal"][:mostnum]))
  611. sflh.write("The most %d program text section size costing cases are %s\r\n" % (mostnum, summary["sztext"][:mostnum]))
  612. sflh.write("The most %d program data section size costing cases are %s\r\n" % (mostnum, summary["szdata"][:mostnum]))
  613. sflh.write("The most %d program bss section size costing cases are %s\r\n" % (mostnum, summary["szbss"][:mostnum]))
  614. print("\r\n=====Here is the report summary:=====")
  615. with open(sfl, "r") as sflh:
  616. for line in sflh.readlines():
  617. print(line)
  618. pass
  619. def save_report_files(logdir, config, result, run=False):
  620. if os.path.isdir(logdir) == False:
  621. os.makedirs(logdir)
  622. rptfile = os.path.join(logdir, "report.md")
  623. rpthtml = os.path.join(logdir, "report.html")
  624. rptsumfile = os.path.join(logdir, "summary.txt")
  625. rptsum = generate_report(config, result, rptfile, rpthtml, logdir, run)
  626. csvfile = os.path.join(logdir, "result.csv")
  627. save_bench_csv(result, csvfile)
  628. print("Generate report csv file to %s" % (csvfile))
  629. print("Generate report markdown file to %s" % (rptfile))
  630. if run:
  631. csvdata = parse_result2dict(result)
  632. csvdatafile = os.path.join(logdir, "runresult.json")
  633. save_json(csvdatafile, csvdata)
  634. runresultexcel = os.path.join(logdir, "runresult.xlsx")
  635. save_runresult(csvdata, runresultexcel)
  636. show_report_summary(rptsum, rptsumfile)
  637. # show failed apps
  638. show_failed_apps(logdir)
  639. pass
  640. # save excel and csv for selected csv table
  641. def save_excel(csvtable, excelfile):
  642. if len(csvtable) == 0:
  643. print("Not run result found, no need to generate excel!")
  644. return
  645. newcsvtable = {}
  646. csvsummary = []
  647. keylens = [ len(key) for key in csvtable.keys() ]
  648. keylens.sort()
  649. maxlen = keylens[-1]
  650. for cfg in csvtable:
  651. if len(csvtable[cfg]) > 1:
  652. csvsummary.append([cfg])
  653. csvsummary.extend(csvtable[cfg])
  654. if maxlen >= 31:
  655. cfglist = cfg.split('-')
  656. newcfg = "-".join(cfglist[1:])
  657. newcsvtable[newcfg] = csvtable[cfg]
  658. else:
  659. newcsvtable[cfg] = csvtable[cfg]
  660. newcsvtable["summary"] = csvsummary
  661. pe.isave_book_as(bookdict=newcsvtable, dest_file_name=excelfile)
  662. print("Generate run result excel file to %s" % (excelfile))
  663. csvfile = excelfile + ".csv"
  664. with open(csvfile, "w") as cf:
  665. for row in csvsummary:
  666. cf.write("%s\n" % (",".join(str(e) for e in row)))
  667. print("Generate run result csv file to %s" % (csvfile))
  668. pass
  669. def save_runresult(runresult, excelfile):
  670. if not(isinstance(runresult, dict)):
  671. return False
  672. # Get csv header for each header and column 1 & column 2
  673. csv_headers = dict()
  674. csv_col1 = dict()
  675. csv_col2 = dict()
  676. for cfg in runresult:
  677. splitcfgs = cfg.split('/')
  678. pretype = ""
  679. if len(splitcfgs) > 1:
  680. pretype = '-'.join(splitcfgs[:-1])
  681. runcfg = splitcfgs[-1]
  682. for apptype in runresult[cfg]:
  683. for subtype in runresult[cfg][apptype]:
  684. if pretype != "":
  685. final_apptype = pretype + "_" + apptype
  686. else:
  687. final_apptype = apptype
  688. if final_apptype not in csv_headers:
  689. csv_headers[final_apptype] = ["RUNCONFIG", "SUBTYPE"]
  690. csv_col1[final_apptype] = []
  691. csv_col2[final_apptype] = []
  692. # fill header and col1 / col2
  693. if runcfg not in csv_headers[final_apptype]:
  694. csv_headers[final_apptype].append(runcfg)
  695. rstvalues = runresult[cfg][apptype][subtype]["value"]
  696. for key in rstvalues:
  697. if key not in csv_col1[final_apptype]:
  698. csv_col1[final_apptype].append(key)
  699. csv_col2[final_apptype].append(subtype)
  700. # Fill the csvtable with -
  701. csvtable = dict()
  702. for cfg in csv_headers:
  703. csvtable[cfg] = [csv_headers[cfg]]
  704. for i in range(0, len(csv_col1[cfg])):
  705. rowlist = [csv_col1[cfg][i], csv_col2[cfg][i]]
  706. for j in range(0, len(csv_headers[cfg]) - 2):
  707. rowlist.append('-')
  708. csvtable[cfg].append(rowlist)
  709. # Fill the csvtable with real value if key found
  710. for cfg in runresult:
  711. splitcfgs = cfg.split('/')
  712. pretype = ""
  713. if len(splitcfgs) > 1:
  714. pretype = '-'.join(splitcfgs[:-1])
  715. runcfg = splitcfgs[-1]
  716. for apptype in runresult[cfg]:
  717. for subtype in runresult[cfg][apptype]:
  718. if pretype != "":
  719. final_apptype = pretype + "_" + apptype
  720. else:
  721. final_apptype = apptype
  722. rstvalues = runresult[cfg][apptype][subtype]["value"]
  723. header = csvtable[final_apptype][0]
  724. index = header.index(runcfg)
  725. for key in rstvalues:
  726. for i in range(0, len(csvtable[final_apptype])):
  727. if key == csvtable[final_apptype][i][0]:
  728. csvtable[final_apptype][i][index] = rstvalues[key]
  729. break
  730. # Fill csvdict using csvtable
  731. csvdict = dict()
  732. for cfg in csvtable:
  733. csvdict[cfg] = dict()
  734. for csvlist in csvtable[cfg]:
  735. csvdict[cfg][csvlist[0]] = csvlist[1:]
  736. # Save to excel
  737. try:
  738. csvtable_jf = excelfile + ".csvtable.json"
  739. csvdict_jf = excelfile + ".csvdict.json"
  740. save_json(csvtable_jf, csvtable)
  741. save_json(csvdict_jf, csvdict)
  742. save_excel(csvtable, excelfile)
  743. except Exception as exc:
  744. print("Exception happend during save excel, error %s" % (exc))
  745. return False
  746. return True
  747. def generate_report_for_logs(logdir, run=False, split=False):
  748. if logdir and os.path.isdir(logdir):
  749. if split == False:
  750. all_mergedcfg, all_result = merge_all_config_and_result(logdir)
  751. else:
  752. all_mergedcfg, all_result = merge_split_config_and_result(logdir)
  753. if all_mergedcfg and all_result:
  754. config_file = os.path.join(logdir, "allcfg.json")
  755. result_file = os.path.join(logdir, "allrst.json")
  756. print("Save all merged config file to %s" % (config_file))
  757. print("Save all result file to %s" % (result_file))
  758. save_json(config_file, all_mergedcfg)
  759. save_json(result_file, all_result)
  760. save_report_files(logdir, all_mergedcfg, all_result, run)
  761. else:
  762. print("Can't find any valid reports in %s generated by nsdk_bench.py" % (logdir))
  763. pass
  764. if __name__ == '__main__':
  765. parser = argparse.ArgumentParser(description="Nuclei SDK Bench report Generate Tools")
  766. parser.add_argument('--logdir', required=True, help="logs directory where saved the report json files")
  767. parser.add_argument('--split', action='store_true', help="Split for different configurations")
  768. parser.add_argument('--run', action='store_true', help="If specified, it means this is a runner report")
  769. args = parser.parse_args()
  770. if os.path.isdir(args.logdir) == False:
  771. print("The log directory doesn't exist, please check!")
  772. sys.exit(1)
  773. generate_report_for_logs(args.logdir, args.run, args.split)