Convolutions.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. import os.path
  2. import numpy as np
  3. import itertools
  4. import Tools
  5. import statsmodels.tsa.stattools
  6. # Those patterns are used for tests and benchmarks.
  7. # For tests, there is the need to add tests for saturation
  8. def cartesian(*somelists):
  9. r=[]
  10. for element in itertools.product(*somelists):
  11. r.append(element)
  12. return(r)
  13. def autocorr(x):
  14. result = np.correlate(x, x, mode='full')
  15. return result[result.size//2:]
  16. def writeTests(config,format):
  17. config.setOverwrite(False)
  18. NBSAMPLES=128
  19. inputsA=np.random.randn(NBSAMPLES)
  20. inputsB=np.random.randn(NBSAMPLES)
  21. inputsA = Tools.normalize(inputsA)
  22. inputsB = Tools.normalize(inputsB)
  23. if format==31:
  24. # To avoid overflow. There is no saturation in CMSIS code for Q31 conv/corr
  25. inputsA = inputsA / 16
  26. inputsB = inputsB / 16
  27. config.writeInput(1, inputsA,"InputsA")
  28. config.writeInput(1, inputsB,"InputsB")
  29. if format == 15:
  30. nbs = [(14, 15), (14, 16), (14, 17), (14, 18), (14, 33), (15, 15),
  31. (15, 16), (15, 17), (15, 18), (15, 33), (16, 15), (16, 16),
  32. (16, 17), (16, 18), (16, 33), (17, 15), (17, 16), (17, 17),
  33. (17, 18), (17, 33), (32, 15), (32, 16), (32, 17), (32, 18), (32, 33)]
  34. elif format == 7 :
  35. nbs = [(30, 31), (30, 32), (30, 33), (30, 34), (30, 49), (31, 31),
  36. (31,32), (31, 33), (31, 34), (31, 49), (32, 31), (32, 32),
  37. (32, 33), (32,34), (32, 49), (33, 31), (33, 32), (33, 33), (33, 34),
  38. (33, 49), (48,31), (48, 32), (48, 33), (48, 34), (48, 49)]
  39. else:
  40. nbs = [(4, 1), (4, 2), (4, 3), (4, 8), (4, 11), (5, 1), (5, 2), (5, 3), (5, 8), (5, 11), (6, 1), (6, 2), (6, 3), (6, 8), (6, 11), (9, 1), (9, 2),
  41. (9, 3), (9, 8), (9, 11), (10, 1), (10, 2), (10, 3), (10, 8), (10, 11), (11, 1), (11, 2), (11, 3), (11, 8), (11, 11), (12, 1), (12, 2),
  42. (12, 3), (12, 8), (12, 11), (13, 1), (13, 2), (13, 3), (13, 8), (13, 11)]
  43. nbTest = 1
  44. for (na,nb) in nbs:
  45. #print(na,nb)
  46. ref = np.correlate(inputsA[0:na],inputsB[0:nb],"full")
  47. if na > nb:
  48. padding = na - nb
  49. z = np.zeros(padding)
  50. ref = np.concatenate((z,ref))
  51. else:
  52. padding = nb - na
  53. z = np.zeros(padding)
  54. ref = np.concatenate((ref,z))
  55. config.writeReference(nbTest, ref)
  56. nbTest = nbTest + 1
  57. for (na,nb) in nbs:
  58. #print(na,nb)
  59. ref = np.convolve(inputsA[0:na],inputsB[0:nb],"full")
  60. config.writeReference(nbTest, ref)
  61. nbTest = nbTest + 1
  62. # Levinson durbin tests
  63. a = [Tools.loopnb(format,Tools.TAILONLY),
  64. Tools.loopnb(format,Tools.BODYONLY),
  65. Tools.loopnb(format,Tools.BODYANDTAIL),
  66. ]
  67. a = list(np.unique(np.array(a)))
  68. #a = [3]
  69. # Errors of each levinson durbin test
  70. err=[]
  71. errTestID = nbTest
  72. for na in a:
  73. s = np.random.randn(na+1)
  74. s = Tools.normalize(s)
  75. phi = autocorr(s)
  76. phi = Tools.normalize(phi)
  77. config.writeInput(nbTest, phi,"InputPhi")
  78. sigmav,arcoef,pacf,sigma,phi=statsmodels.tsa.stattools.levinson_durbin(phi,nlags=na,isacov=True)
  79. err.append(sigmav)
  80. config.writeReference(nbTest, arcoef)
  81. nbTest = nbTest + 1
  82. config.writeReference(errTestID, err,"LDErrors")
  83. # Partial convolutions
  84. config.setOverwrite(True)
  85. inputsA=np.random.randn(NBSAMPLES)
  86. inputsB=np.random.randn(NBSAMPLES)
  87. inputsA = Tools.normalize(inputsA)
  88. inputsB = Tools.normalize(inputsB)
  89. config.writeInput(2, inputsA,"InputsA")
  90. config.writeInput(2, inputsB,"InputsB")
  91. (na,nb) = (6, 8)
  92. # First = 3
  93. numPoints=4
  94. ref = np.convolve(inputsA[0:na],inputsB[0:nb],"full")
  95. first=3
  96. config.writeReference(nbTest, ref[first:first+numPoints])
  97. nbTest = nbTest + 1
  98. first=9
  99. config.writeReference(nbTest, ref[first:first+numPoints])
  100. nbTest = nbTest + 1
  101. first=7
  102. config.writeReference(nbTest, ref[first:first+numPoints])
  103. nbTest = nbTest + 1
  104. def generatePatterns():
  105. PATTERNDIR = os.path.join("Patterns","DSP","Filtering","MISC","MISC")
  106. PARAMDIR = os.path.join("Parameters","DSP","Filtering","MISC","MISC")
  107. configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
  108. configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
  109. configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
  110. configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
  111. configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
  112. configq7=Tools.Config(PATTERNDIR,PARAMDIR,"q7")
  113. configf32.setOverwrite(False)
  114. configf16.setOverwrite(False)
  115. configq31.setOverwrite(False)
  116. configq15.setOverwrite(False)
  117. configq7.setOverwrite(False)
  118. writeTests(configf64,Tools.F64)
  119. writeTests(configf32,0)
  120. writeTests(configf16,16)
  121. writeTests(configq31,31)
  122. writeTests(configq15,15)
  123. writeTests(configq7,7)
  124. if __name__ == '__main__':
  125. generatePatterns()