esp_psram.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /*----------------------------------------------------------------------------------------------------
  7. * Abstraction layer for PSRAM. PSRAM device related registers and MMU/Cache related code shouls be
  8. * abstracted to lower layers.
  9. *
  10. * When we add more types of external RAM memory, this can be made into a more intelligent dispatcher.
  11. *----------------------------------------------------------------------------------------------------*/
  12. #include <sys/param.h>
  13. #include "sdkconfig.h"
  14. #include "esp_attr.h"
  15. #include "esp_err.h"
  16. #include "esp_log.h"
  17. #include "freertos/FreeRTOS.h"
  18. #include "freertos/xtensa_api.h"
  19. #include "esp_heap_caps_init.h"
  20. #include "hal/mmu_hal.h"
  21. #include "hal/cache_ll.h"
  22. #include "esp_private/esp_psram_io.h"
  23. #include "esp_private/esp_psram_extram.h"
  24. #include "esp_private/mmu_psram_flash.h"
  25. #include "esp_psram_impl.h"
  26. #include "esp_psram.h"
  27. #include "esp_private/esp_mmu_map_private.h"
  28. #include "esp_mmu_map.h"
  29. #if CONFIG_IDF_TARGET_ESP32
  30. #include "esp32/himem.h"
  31. #include "esp32/rom/cache.h"
  32. #include "esp_private/esp_cache_esp32_private.h"
  33. #endif
  34. #if CONFIG_IDF_TARGET_ESP32
  35. #if CONFIG_FREERTOS_UNICORE
  36. #define PSRAM_MODE PSRAM_VADDR_MODE_NORMAL
  37. #else
  38. #define PSRAM_MODE PSRAM_VADDR_MODE_LOWHIGH
  39. #endif
  40. #else
  41. #define PSRAM_MODE PSRAM_VADDR_MODE_NORMAL
  42. #endif
  43. /**
  44. * Two types of PSRAM memory regions for now:
  45. * - 8bit aligned
  46. * - 32bit aligned
  47. */
  48. #define PSRAM_MEM_TYPE_NUM 2
  49. #define PSRAM_MEM_8BIT_ALIGNED 0
  50. #define PSRAM_MEM_32BIT_ALIGNED 1
  51. #if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
  52. extern uint8_t _ext_ram_bss_start;
  53. extern uint8_t _ext_ram_bss_end;
  54. #endif //#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
  55. #if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
  56. extern uint8_t _ext_ram_noinit_start;
  57. extern uint8_t _ext_ram_noinit_end;
  58. #endif //#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
  59. typedef struct {
  60. intptr_t vaddr_start;
  61. intptr_t vaddr_end;
  62. size_t size; //in bytes
  63. } psram_mem_t;
  64. typedef struct {
  65. bool is_initialised;
  66. /**
  67. * @note 1
  68. * As we can't use heap allocator during this stage, we need to statically declare these regions.
  69. * Luckily only S2 has two different types of memory regions:
  70. * - byte-aligned memory
  71. * - word-aligned memory
  72. * On the other hand, the type number usually won't be very big
  73. *
  74. * On other chips, only one region is needed.
  75. * So for chips other than S2, size of `regions_to_heap[1]` and `mapped_regions[1]`will always be zero.
  76. *
  77. * If in the future, this condition is worse (dbus memory isn't consecutive), we need to delegate this context
  78. * to chip-specific files, and only keep a (void *) pointer here pointing to those chip-specific contexts
  79. */
  80. psram_mem_t regions_to_heap[PSRAM_MEM_TYPE_NUM]; //memory regions that are available to be added to the heap allocator
  81. psram_mem_t mapped_regions[PSRAM_MEM_TYPE_NUM]; //mapped memory regions
  82. } psram_ctx_t;
  83. static psram_ctx_t s_psram_ctx;
  84. static const char* TAG = "esp_psram";
  85. #if CONFIG_IDF_TARGET_ESP32
  86. //If no function in esp_himem.c is used, this function will be linked into the
  87. //binary instead of the one in esp_himem.c, automatically making sure no memory
  88. //is reserved if no himem function is used.
  89. size_t __attribute__((weak)) esp_himem_reserved_area_size(void) {
  90. return 0;
  91. }
  92. static void IRAM_ATTR s_mapping(int v_start, int size)
  93. {
  94. //Enable external RAM in MMU
  95. cache_sram_mmu_set(0, 0, v_start, 0, 32, (size / 1024 / 32));
  96. //Flush and enable icache for APP CPU
  97. #if !CONFIG_FREERTOS_UNICORE
  98. DPORT_CLEAR_PERI_REG_MASK(DPORT_APP_CACHE_CTRL1_REG, DPORT_APP_CACHE_MASK_DRAM1);
  99. cache_sram_mmu_set(1, 0, v_start, 0, 32, (size / 1024 / 32));
  100. #endif
  101. }
  102. #endif //CONFIG_IDF_TARGET_ESP32
  103. esp_err_t esp_psram_init(void)
  104. {
  105. if (s_psram_ctx.is_initialised) {
  106. return ESP_ERR_INVALID_STATE;
  107. }
  108. esp_err_t ret = ESP_FAIL;
  109. ret = esp_psram_impl_enable(PSRAM_MODE);
  110. if (ret != ESP_OK) {
  111. #if CONFIG_SPIRAM_IGNORE_NOTFOUND
  112. ESP_EARLY_LOGE(TAG, "PSRAM enabled but initialization failed. Bailing out.");
  113. #endif
  114. return ret;
  115. }
  116. s_psram_ctx.is_initialised = true;
  117. uint32_t psram_physical_size = 0;
  118. ret = esp_psram_impl_get_physical_size(&psram_physical_size);
  119. assert(ret == ESP_OK);
  120. ESP_EARLY_LOGI(TAG, "Found %dMB PSRAM device", psram_physical_size / (1024 * 1024));
  121. ESP_EARLY_LOGI(TAG, "Speed: %dMHz", CONFIG_SPIRAM_SPEED);
  122. #if CONFIG_IDF_TARGET_ESP32
  123. ESP_EARLY_LOGI(TAG, "PSRAM initialized, cache is in %s mode.", \
  124. (PSRAM_MODE==PSRAM_VADDR_MODE_EVENODD)?"even/odd (2-core)": \
  125. (PSRAM_MODE==PSRAM_VADDR_MODE_LOWHIGH)?"low/high (2-core)": \
  126. (PSRAM_MODE==PSRAM_VADDR_MODE_NORMAL)?"normal (1-core)":"ERROR");
  127. #endif
  128. uint32_t psram_available_size = 0;
  129. ret = esp_psram_impl_get_available_size(&psram_available_size);
  130. assert(ret == ESP_OK);
  131. __attribute__((unused)) uint32_t total_available_size = psram_available_size;
  132. /**
  133. * `start_page` is the psram physical address in MMU page size.
  134. * MMU page size on ESP32S2 is 64KB
  135. * e.g.: psram physical address 16 is in page 0
  136. *
  137. * Here we plan to copy FLASH instructions to psram physical address 0, which is the No.0 page.
  138. */
  139. __attribute__((unused)) uint32_t start_page = 0;
  140. #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
  141. uint32_t used_page = 0;
  142. #endif
  143. //------------------------------------Copy Flash .text to PSRAM-------------------------------------//
  144. #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
  145. ret = mmu_config_psram_text_segment(start_page, total_available_size, &used_page);
  146. if (ret != ESP_OK) {
  147. ESP_EARLY_LOGE(TAG, "No enough psram memory for instructon!");
  148. abort();
  149. }
  150. start_page += used_page;
  151. psram_available_size -= MMU_PAGE_TO_BYTES(used_page);
  152. ESP_EARLY_LOGV(TAG, "after copy .text, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
  153. #endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
  154. //------------------------------------Copy Flash .rodata to PSRAM-------------------------------------//
  155. #if CONFIG_SPIRAM_RODATA
  156. ret = mmu_config_psram_rodata_segment(start_page, total_available_size, &used_page);
  157. if (ret != ESP_OK) {
  158. ESP_EARLY_LOGE(TAG, "No enough psram memory for rodata!");
  159. abort();
  160. }
  161. start_page += used_page;
  162. psram_available_size -= MMU_PAGE_TO_BYTES(used_page);
  163. ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
  164. #endif //#if CONFIG_SPIRAM_RODATA
  165. //----------------------------------Map the PSRAM physical range to MMU-----------------------------//
  166. /**
  167. * @note 2
  168. * Similarly to @note 1, we expect HW DBUS memory to be consecutive.
  169. *
  170. * If situation is worse in the future (memory region isn't consecutive), we need to put these logics into chip-specific files
  171. */
  172. size_t total_mapped_size = 0;
  173. size_t size_to_map = 0;
  174. size_t byte_aligned_size = 0;
  175. #if CONFIG_IDF_TARGET_ESP32P4
  176. //TODO: IDF-7495
  177. ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_PSRAM, MMU_TARGET_PSRAM0, &byte_aligned_size);
  178. #else
  179. ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
  180. #endif
  181. assert(ret == ESP_OK);
  182. size_to_map = MIN(byte_aligned_size, psram_available_size);
  183. const void *v_start_8bit_aligned = NULL;
  184. #if CONFIG_IDF_TARGET_ESP32P4
  185. //TODO: IDF-7495
  186. ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_PSRAM, MMU_TARGET_PSRAM0, &v_start_8bit_aligned);
  187. #else
  188. ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_8bit_aligned);
  189. #endif
  190. assert(ret == ESP_OK);
  191. #if CONFIG_IDF_TARGET_ESP32
  192. s_mapping((int)v_start_8bit_aligned, size_to_map);
  193. #else
  194. uint32_t actual_mapped_len = 0;
  195. #if CONFIG_IDF_TARGET_ESP32P4
  196. //TODO: IDF-7495
  197. mmu_hal_map_region(1, MMU_TARGET_PSRAM0, (intptr_t)v_start_8bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
  198. #else
  199. mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_8bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
  200. #endif
  201. start_page += BYTES_TO_MMU_PAGE(actual_mapped_len);
  202. ESP_EARLY_LOGV(TAG, "8bit-aligned-region: actual_mapped_len is 0x%x bytes", actual_mapped_len);
  203. cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)v_start_8bit_aligned, actual_mapped_len);
  204. cache_ll_l1_enable_bus(0, bus_mask);
  205. #if !CONFIG_FREERTOS_UNICORE
  206. bus_mask = cache_ll_l1_get_bus(1, (uint32_t)v_start_8bit_aligned, actual_mapped_len);
  207. cache_ll_l1_enable_bus(1, bus_mask);
  208. #endif
  209. #endif //#if CONFIG_IDF_TARGET_ESP32
  210. s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size = size_to_map;
  211. s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start = (intptr_t)v_start_8bit_aligned;
  212. s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end = (intptr_t)v_start_8bit_aligned + size_to_map;
  213. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size = size_to_map;
  214. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start = (intptr_t)v_start_8bit_aligned;
  215. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_end = (intptr_t)v_start_8bit_aligned + size_to_map;
  216. ESP_EARLY_LOGV(TAG, "8bit-aligned-range: 0x%x B, starting from: 0x%x", s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size, v_start_8bit_aligned);
  217. total_mapped_size += size_to_map;
  218. #if CONFIG_IDF_TARGET_ESP32S2
  219. /**
  220. * On ESP32S2, there are 2 types of DBUS memory:
  221. * - byte-aligned-memory
  222. * - word-aligned-memory
  223. *
  224. * If byte-aligned-memory isn't enough, we search for word-aligned-memory to do mapping
  225. */
  226. if (total_mapped_size < psram_available_size) {
  227. size_to_map = psram_available_size - total_mapped_size;
  228. size_t word_aligned_size = 0;
  229. ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
  230. assert(ret == ESP_OK);
  231. size_to_map = MIN(word_aligned_size, size_to_map);
  232. const void *v_start_32bit_aligned = NULL;
  233. ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_32bit_aligned);
  234. assert(ret == ESP_OK);
  235. mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_32bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
  236. ESP_EARLY_LOGV(TAG, "32bit-aligned-region: actual_mapped_len is 0x%x bytes", actual_mapped_len);
  237. cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)v_start_32bit_aligned, actual_mapped_len);
  238. cache_ll_l1_enable_bus(0, bus_mask);
  239. s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size = size_to_map;
  240. s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start = (intptr_t)v_start_32bit_aligned;
  241. s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end = (intptr_t)v_start_32bit_aligned + size_to_map;
  242. s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size = size_to_map;
  243. s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start = (intptr_t)v_start_32bit_aligned;
  244. s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_end = (intptr_t)v_start_32bit_aligned + size_to_map;
  245. ESP_EARLY_LOGV(TAG, "32bit-aligned-range: 0x%x B, starting from: 0x%x", s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size, v_start_32bit_aligned);
  246. total_mapped_size += size_to_map;
  247. }
  248. #endif // #if CONFIG_IDF_TARGET_ESP32S2
  249. if (total_mapped_size < psram_available_size) {
  250. ESP_EARLY_LOGW(TAG, "Virtual address not enough for PSRAM, map as much as we can. %dMB is mapped", total_mapped_size / 1024 / 1024);
  251. }
  252. /*------------------------------------------------------------------------------
  253. * After mapping, we DON'T care about the PSRAM PHYSICAL ADDRESSS ANYMORE!
  254. *----------------------------------------------------------------------------*/
  255. //------------------------------------Configure .bss in PSRAM-------------------------------------//
  256. #if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
  257. //should never be negative number
  258. uint32_t ext_bss_size = ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
  259. ESP_EARLY_LOGV(TAG, "ext_bss_size is %d", ext_bss_size);
  260. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start += ext_bss_size;
  261. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= ext_bss_size;
  262. #endif //#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
  263. #if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
  264. uint32_t ext_noinit_size = ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
  265. ESP_EARLY_LOGV(TAG, "ext_noinit_size is %d", ext_noinit_size);
  266. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start += ext_noinit_size;
  267. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= ext_noinit_size;
  268. #endif
  269. #if CONFIG_IDF_TARGET_ESP32
  270. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= esp_himem_reserved_area_size() - 1;
  271. #endif
  272. //will be removed, TODO: IDF-6944
  273. #if CONFIG_IDF_TARGET_ESP32
  274. cache_driver_t drv = {
  275. NULL,
  276. esp_psram_extram_writeback_cache,
  277. };
  278. cache_register_writeback(&drv);
  279. #endif
  280. return ESP_OK;
  281. }
  282. esp_err_t esp_psram_extram_add_to_heap_allocator(void)
  283. {
  284. esp_err_t ret = ESP_FAIL;
  285. uint32_t byte_aligned_caps[] = {MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_8BIT|MALLOC_CAP_32BIT};
  286. ret = heap_caps_add_region_with_caps(byte_aligned_caps,
  287. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start,
  288. s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_end);
  289. if (ret != ESP_OK) {
  290. return ret;
  291. }
  292. if (s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) {
  293. assert(s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start);
  294. uint32_t word_aligned_caps[] = {MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_32BIT};
  295. ret = heap_caps_add_region_with_caps(word_aligned_caps,
  296. s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start,
  297. s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
  298. if (ret != ESP_OK) {
  299. return ret;
  300. }
  301. }
  302. ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory to heap allocator",
  303. (s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) / 1024);
  304. return ESP_OK;
  305. }
  306. bool IRAM_ATTR esp_psram_check_ptr_addr(const void *p)
  307. {
  308. if (!s_psram_ctx.is_initialised) {
  309. return false;
  310. }
  311. return ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
  312. ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
  313. }
  314. esp_err_t esp_psram_extram_reserve_dma_pool(size_t size)
  315. {
  316. if (size == 0) {
  317. return ESP_OK; //no-op
  318. }
  319. ESP_EARLY_LOGI(TAG, "Reserving pool of %dK of internal memory for DMA/internal allocations", size / 1024);
  320. /* Pool may be allocated in multiple non-contiguous chunks, depending on available RAM */
  321. while (size > 0) {
  322. size_t next_size = heap_caps_get_largest_free_block(MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
  323. next_size = MIN(next_size, size);
  324. ESP_EARLY_LOGD(TAG, "Allocating block of size %d bytes", next_size);
  325. uint8_t *dma_heap = heap_caps_malloc(next_size, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
  326. if (!dma_heap || next_size == 0) {
  327. return ESP_ERR_NO_MEM;
  328. }
  329. uint32_t caps[] = {0, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL, MALLOC_CAP_8BIT | MALLOC_CAP_32BIT};
  330. esp_err_t e = heap_caps_add_region_with_caps(caps, (intptr_t)dma_heap, (intptr_t)dma_heap + next_size - 1);
  331. if (e != ESP_OK) {
  332. return e;
  333. }
  334. size -= next_size;
  335. }
  336. return ESP_OK;
  337. }
  338. bool IRAM_ATTR __attribute__((pure)) esp_psram_is_initialized(void)
  339. {
  340. return s_psram_ctx.is_initialised;
  341. }
  342. size_t esp_psram_get_size(void)
  343. {
  344. uint32_t available_size = 0;
  345. esp_err_t ret = esp_psram_impl_get_available_size(&available_size);
  346. if (ret != ESP_OK) {
  347. //This means PSRAM isn't initialised, to keep back-compatibility, set size to 0.
  348. available_size = 0;
  349. }
  350. return (size_t)available_size;
  351. }
  352. uint8_t esp_psram_io_get_cs_io(void)
  353. {
  354. return esp_psram_impl_get_cs_io();
  355. }
  356. /*
  357. Simple RAM test. Writes a word every 32 bytes. Takes about a second to complete for 4MiB. Returns
  358. true when RAM seems OK, false when test fails. WARNING: Do not run this before the 2nd cpu has been
  359. initialized (in a two-core system) or after the heap allocator has taken ownership of the memory.
  360. */
  361. static bool s_test_psram(intptr_t v_start, size_t size, intptr_t reserved_start, intptr_t reserved_end)
  362. {
  363. volatile int *spiram = (volatile int *)v_start;
  364. size_t p;
  365. int errct = 0;
  366. int initial_err = -1;
  367. for (p = 0; p < (size / sizeof(int)); p += 8) {
  368. intptr_t addr = (intptr_t)&spiram[p];
  369. if ((reserved_start <= addr) && (addr < reserved_end)) {
  370. continue;
  371. }
  372. spiram[p] = p ^ 0xAAAAAAAA;
  373. }
  374. for (p = 0; p < (size / sizeof(int)); p += 8) {
  375. intptr_t addr = (intptr_t)&spiram[p];
  376. if ((reserved_start <= addr) && (addr < reserved_end)) {
  377. continue;
  378. }
  379. if (spiram[p] != (p ^ 0xAAAAAAAA)) {
  380. errct++;
  381. if (errct == 1) {
  382. initial_err = p * 4;
  383. }
  384. }
  385. }
  386. if (errct) {
  387. ESP_EARLY_LOGE(TAG, "SPI SRAM memory test fail. %d/%d writes failed, first @ %X\n", errct, size/32, initial_err + v_start);
  388. return false;
  389. } else {
  390. ESP_EARLY_LOGI(TAG, "SPI SRAM memory test OK");
  391. return true;
  392. }
  393. }
  394. bool esp_psram_extram_test(void)
  395. {
  396. bool test_success = false;
  397. #if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
  398. intptr_t noinit_vstart = (intptr_t)&_ext_ram_noinit_start;
  399. intptr_t noinit_vend = (intptr_t)&_ext_ram_noinit_end;
  400. #else
  401. intptr_t noinit_vstart = 0;
  402. intptr_t noinit_vend = 0;
  403. #endif
  404. test_success = s_test_psram(s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start,
  405. s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size,
  406. noinit_vstart,
  407. noinit_vend);
  408. if (!test_success) {
  409. return false;
  410. }
  411. if (s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size) {
  412. test_success = s_test_psram(s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start,
  413. s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size,
  414. 0,
  415. 0);
  416. }
  417. if (!test_success) {
  418. return false;
  419. }
  420. return true;
  421. }