nvs_page.cpp 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "nvs_page.hpp"
  7. #include <esp_rom_crc.h>
  8. #include <cstdio>
  9. #include <cstring>
  10. namespace nvs
  11. {
  12. Page::Page() : mPartition(nullptr) { }
  13. uint32_t Page::Header::calculateCrc32()
  14. {
  15. return esp_rom_crc32_le(0xffffffff,
  16. reinterpret_cast<uint8_t*>(this) + offsetof(Header, mSeqNumber),
  17. offsetof(Header, mCrc32) - offsetof(Header, mSeqNumber));
  18. }
  19. esp_err_t Page::load(Partition *partition, uint32_t sectorNumber)
  20. {
  21. if (partition == nullptr) {
  22. return ESP_ERR_INVALID_ARG;
  23. }
  24. mPartition = partition;
  25. mBaseAddress = sectorNumber * SEC_SIZE;
  26. mUsedEntryCount = 0;
  27. mErasedEntryCount = 0;
  28. Header header;
  29. auto rc = mPartition->read_raw(mBaseAddress, &header, sizeof(header));
  30. if (rc != ESP_OK) {
  31. mState = PageState::INVALID;
  32. return rc;
  33. }
  34. if (header.mState == PageState::UNINITIALIZED) {
  35. mState = header.mState;
  36. // check if the whole page is really empty
  37. // reading the whole page takes ~40 times less than erasing it
  38. const int BLOCK_SIZE = 128;
  39. uint32_t* block = new (std::nothrow) uint32_t[BLOCK_SIZE];
  40. if (!block) return ESP_ERR_NO_MEM;
  41. for (uint32_t i = 0; i < SPI_FLASH_SEC_SIZE; i += 4 * BLOCK_SIZE) {
  42. rc = mPartition->read_raw(mBaseAddress + i, block, 4 * BLOCK_SIZE);
  43. if (rc != ESP_OK) {
  44. mState = PageState::INVALID;
  45. delete[] block;
  46. return rc;
  47. }
  48. if (std::any_of(block, block + BLOCK_SIZE, [](uint32_t val) -> bool { return val != 0xffffffff; })) {
  49. // page isn't as empty after all, mark it as corrupted
  50. mState = PageState::CORRUPT;
  51. break;
  52. }
  53. }
  54. delete[] block;
  55. } else if (header.mCrc32 != header.calculateCrc32()) {
  56. header.mState = PageState::CORRUPT;
  57. } else {
  58. mState = header.mState;
  59. mSeqNumber = header.mSeqNumber;
  60. if(header.mVersion < NVS_VERSION) {
  61. return ESP_ERR_NVS_NEW_VERSION_FOUND;
  62. } else {
  63. mVersion = header.mVersion;
  64. }
  65. }
  66. switch (mState) {
  67. case PageState::UNINITIALIZED:
  68. break;
  69. case PageState::FULL:
  70. case PageState::ACTIVE:
  71. case PageState::FREEING:
  72. mLoadEntryTable();
  73. break;
  74. default:
  75. mState = PageState::CORRUPT;
  76. break;
  77. }
  78. return ESP_OK;
  79. }
  80. esp_err_t Page::writeEntry(const Item& item)
  81. {
  82. esp_err_t err;
  83. err = mPartition->write(getEntryAddress(mNextFreeEntry), &item, sizeof(item));
  84. if (err != ESP_OK) {
  85. mState = PageState::INVALID;
  86. return err;
  87. }
  88. err = alterEntryState(mNextFreeEntry, EntryState::WRITTEN);
  89. if (err != ESP_OK) {
  90. return err;
  91. }
  92. if (mFirstUsedEntry == INVALID_ENTRY) {
  93. mFirstUsedEntry = mNextFreeEntry;
  94. }
  95. ++mUsedEntryCount;
  96. ++mNextFreeEntry;
  97. return ESP_OK;
  98. }
  99. esp_err_t Page::writeEntryData(const uint8_t* data, size_t size)
  100. {
  101. assert(size % ENTRY_SIZE == 0);
  102. assert(mNextFreeEntry != INVALID_ENTRY);
  103. assert(mFirstUsedEntry != INVALID_ENTRY);
  104. const uint16_t count = size / ENTRY_SIZE;
  105. const uint8_t* buf = data;
  106. #if !defined LINUX_TARGET
  107. // TODO: check whether still necessary with esp_partition* API
  108. /* On the ESP32, data can come from DROM, which is not accessible by spi_flash_write
  109. * function. To work around this, we copy the data to heap if it came from DROM.
  110. * Hopefully this won't happen very often in practice. For data from DRAM, we should
  111. * still be able to write it to flash directly.
  112. * TODO: figure out how to make this platform-specific check nicer (probably by introducing
  113. * a platform-specific flash layer).
  114. */
  115. if ((uint32_t) data < 0x3ff00000) {
  116. buf = (uint8_t*) malloc(size);
  117. if (!buf) {
  118. return ESP_ERR_NO_MEM;
  119. }
  120. memcpy((void*)buf, data, size);
  121. }
  122. #endif // ! LINUX_TARGET
  123. auto rc = mPartition->write(getEntryAddress(mNextFreeEntry), buf, size);
  124. #if !defined LINUX_TARGET
  125. if (buf != data) {
  126. free((void*)buf);
  127. }
  128. #endif // ! LINUX_TARGET
  129. if (rc != ESP_OK) {
  130. mState = PageState::INVALID;
  131. return rc;
  132. }
  133. auto err = alterEntryRangeState(mNextFreeEntry, mNextFreeEntry + count, EntryState::WRITTEN);
  134. if (err != ESP_OK) {
  135. return err;
  136. }
  137. mUsedEntryCount += count;
  138. mNextFreeEntry += count;
  139. return ESP_OK;
  140. }
  141. esp_err_t Page::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize, uint8_t chunkIdx)
  142. {
  143. Item item;
  144. esp_err_t err;
  145. if (mState == PageState::INVALID) {
  146. return ESP_ERR_NVS_INVALID_STATE;
  147. }
  148. if (mState == PageState::UNINITIALIZED) {
  149. err = initialize();
  150. if (err != ESP_OK) {
  151. return err;
  152. }
  153. }
  154. if (mState == PageState::FULL) {
  155. return ESP_ERR_NVS_PAGE_FULL;
  156. }
  157. const size_t keySize = strlen(key);
  158. if (keySize > Item::MAX_KEY_LENGTH) {
  159. return ESP_ERR_NVS_KEY_TOO_LONG;
  160. }
  161. if (dataSize > Page::CHUNK_MAX_SIZE) {
  162. return ESP_ERR_NVS_VALUE_TOO_LONG;
  163. }
  164. if ((!isVariableLengthType(datatype)) && dataSize > 8) {
  165. return ESP_ERR_INVALID_ARG;
  166. }
  167. size_t totalSize = ENTRY_SIZE;
  168. size_t entriesCount = 1;
  169. if (isVariableLengthType(datatype)) {
  170. size_t roundedSize = (dataSize + ENTRY_SIZE - 1) & ~(ENTRY_SIZE - 1);
  171. totalSize += roundedSize;
  172. entriesCount += roundedSize / ENTRY_SIZE;
  173. }
  174. // primitive types should fit into one entry
  175. assert(totalSize == ENTRY_SIZE ||
  176. isVariableLengthType(datatype));
  177. if (mNextFreeEntry == INVALID_ENTRY || mNextFreeEntry + entriesCount > ENTRY_COUNT) {
  178. // page will not fit this amount of data
  179. return ESP_ERR_NVS_PAGE_FULL;
  180. }
  181. // write first item
  182. size_t span = (totalSize + ENTRY_SIZE - 1) / ENTRY_SIZE;
  183. item = Item(nsIndex, datatype, span, key, chunkIdx);
  184. err = mHashList.insert(item, mNextFreeEntry);
  185. if (err != ESP_OK) {
  186. return err;
  187. }
  188. if (!isVariableLengthType(datatype)) {
  189. memcpy(item.data, data, dataSize);
  190. item.crc32 = item.calculateCrc32();
  191. err = writeEntry(item);
  192. if (err != ESP_OK) {
  193. return err;
  194. }
  195. } else {
  196. const uint8_t* src = reinterpret_cast<const uint8_t*>(data);
  197. item.varLength.dataCrc32 = Item::calculateCrc32(src, dataSize);
  198. item.varLength.dataSize = dataSize;
  199. item.varLength.reserved = 0xffff;
  200. item.crc32 = item.calculateCrc32();
  201. err = writeEntry(item);
  202. if (err != ESP_OK) {
  203. return err;
  204. }
  205. size_t rest = dataSize % ENTRY_SIZE;
  206. size_t left = dataSize - rest;
  207. if (left > 0) {
  208. err = writeEntryData(static_cast<const uint8_t*>(data), left);
  209. if (err != ESP_OK) {
  210. return err;
  211. }
  212. }
  213. size_t tail = rest;
  214. if (tail > 0) {
  215. std::fill_n(item.rawData, ENTRY_SIZE, 0xff);
  216. memcpy(item.rawData, static_cast<const uint8_t*>(data) + left, tail);
  217. err = writeEntry(item);
  218. if (err != ESP_OK) {
  219. return err;
  220. }
  221. }
  222. }
  223. return ESP_OK;
  224. }
  225. esp_err_t Page::readItem(uint8_t nsIndex, ItemType datatype, const char* key, void* data, size_t dataSize, uint8_t chunkIdx, VerOffset chunkStart)
  226. {
  227. size_t index = 0;
  228. Item item;
  229. if (mState == PageState::INVALID) {
  230. return ESP_ERR_NVS_INVALID_STATE;
  231. }
  232. esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  233. if (rc != ESP_OK) {
  234. return rc;
  235. }
  236. if (!isVariableLengthType(datatype)) {
  237. if (dataSize != getAlignmentForType(datatype)) {
  238. return ESP_ERR_NVS_TYPE_MISMATCH;
  239. }
  240. memcpy(data, item.data, dataSize);
  241. return ESP_OK;
  242. }
  243. if (dataSize < static_cast<size_t>(item.varLength.dataSize)) {
  244. return ESP_ERR_NVS_INVALID_LENGTH;
  245. }
  246. uint8_t* dst = reinterpret_cast<uint8_t*>(data);
  247. size_t left = item.varLength.dataSize;
  248. for (size_t i = index + 1; i < index + item.span; ++i) {
  249. Item ditem;
  250. rc = readEntry(i, ditem);
  251. if (rc != ESP_OK) {
  252. return rc;
  253. }
  254. size_t willCopy = ENTRY_SIZE;
  255. willCopy = (left < willCopy)?left:willCopy;
  256. memcpy(dst, ditem.rawData, willCopy);
  257. left -= willCopy;
  258. dst += willCopy;
  259. }
  260. if (Item::calculateCrc32(reinterpret_cast<uint8_t*>(data), item.varLength.dataSize) != item.varLength.dataCrc32) {
  261. rc = eraseEntryAndSpan(index);
  262. if (rc != ESP_OK) {
  263. return rc;
  264. }
  265. return ESP_ERR_NVS_NOT_FOUND;
  266. }
  267. return ESP_OK;
  268. }
  269. esp_err_t Page::cmpItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize, uint8_t chunkIdx, VerOffset chunkStart)
  270. {
  271. size_t index = 0;
  272. Item item;
  273. if (mState == PageState::INVALID) {
  274. return ESP_ERR_NVS_INVALID_STATE;
  275. }
  276. esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  277. if (rc != ESP_OK) {
  278. return rc;
  279. }
  280. if (!isVariableLengthType(datatype)) {
  281. if (dataSize != getAlignmentForType(datatype)) {
  282. return ESP_ERR_NVS_TYPE_MISMATCH;
  283. }
  284. if (memcmp(data, item.data, dataSize)) {
  285. return ESP_ERR_NVS_CONTENT_DIFFERS;
  286. }
  287. return ESP_OK;
  288. }
  289. if (dataSize < static_cast<size_t>(item.varLength.dataSize)) {
  290. return ESP_ERR_NVS_INVALID_LENGTH;
  291. }
  292. const uint8_t* dst = reinterpret_cast<const uint8_t*>(data);
  293. size_t left = item.varLength.dataSize;
  294. for (size_t i = index + 1; i < index + item.span; ++i) {
  295. Item ditem;
  296. rc = readEntry(i, ditem);
  297. if (rc != ESP_OK) {
  298. return rc;
  299. }
  300. size_t willCopy = ENTRY_SIZE;
  301. willCopy = (left < willCopy)?left:willCopy;
  302. if (memcmp(dst, ditem.rawData, willCopy)) {
  303. return ESP_ERR_NVS_CONTENT_DIFFERS;
  304. }
  305. left -= willCopy;
  306. dst += willCopy;
  307. }
  308. if (Item::calculateCrc32(reinterpret_cast<const uint8_t*>(data), item.varLength.dataSize) != item.varLength.dataCrc32) {
  309. return ESP_ERR_NVS_NOT_FOUND;
  310. }
  311. return ESP_OK;
  312. }
  313. esp_err_t Page::eraseItem(uint8_t nsIndex, ItemType datatype, const char* key, uint8_t chunkIdx, VerOffset chunkStart)
  314. {
  315. size_t index = 0;
  316. Item item;
  317. esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  318. if (rc != ESP_OK) {
  319. return rc;
  320. }
  321. return eraseEntryAndSpan(index);
  322. }
  323. esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, uint8_t chunkIdx, VerOffset chunkStart)
  324. {
  325. size_t index = 0;
  326. Item item;
  327. return findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  328. }
  329. esp_err_t Page::eraseEntryAndSpan(size_t index)
  330. {
  331. uint32_t seq_num;
  332. getSeqNumber(seq_num);
  333. auto state = mEntryTable.get(index);
  334. size_t span = 1;
  335. if (state == EntryState::WRITTEN) {
  336. Item item;
  337. auto rc = readEntry(index, item);
  338. if (rc != ESP_OK) {
  339. return rc;
  340. }
  341. if (item.calculateCrc32() != item.crc32) {
  342. mHashList.erase(index);
  343. rc = alterEntryState(index, EntryState::ERASED);
  344. --mUsedEntryCount;
  345. ++mErasedEntryCount;
  346. if (rc != ESP_OK) {
  347. return rc;
  348. }
  349. } else {
  350. mHashList.erase(index);
  351. span = item.span;
  352. for (ptrdiff_t i = index + span - 1; i >= static_cast<ptrdiff_t>(index); --i) {
  353. if (mEntryTable.get(i) == EntryState::WRITTEN) {
  354. --mUsedEntryCount;
  355. }
  356. ++mErasedEntryCount;
  357. }
  358. if (span == 1) {
  359. rc = alterEntryState(index, EntryState::ERASED);
  360. } else {
  361. rc = alterEntryRangeState(index, index + span, EntryState::ERASED);
  362. }
  363. if (rc != ESP_OK) {
  364. return rc;
  365. }
  366. }
  367. } else {
  368. auto rc = alterEntryState(index, EntryState::ERASED);
  369. if (rc != ESP_OK) {
  370. return rc;
  371. }
  372. }
  373. if (index == mFirstUsedEntry) {
  374. updateFirstUsedEntry(index, span);
  375. }
  376. if (index + span > mNextFreeEntry) {
  377. mNextFreeEntry = index + span;
  378. }
  379. return ESP_OK;
  380. }
  381. void Page::updateFirstUsedEntry(size_t index, size_t span)
  382. {
  383. assert(index == mFirstUsedEntry);
  384. mFirstUsedEntry = INVALID_ENTRY;
  385. size_t end = mNextFreeEntry;
  386. if (end > ENTRY_COUNT) {
  387. end = ENTRY_COUNT;
  388. }
  389. for (size_t i = index + span; i < end; ++i) {
  390. if (mEntryTable.get(i) == EntryState::WRITTEN) {
  391. mFirstUsedEntry = i;
  392. break;
  393. }
  394. }
  395. }
  396. esp_err_t Page::copyItems(Page& other)
  397. {
  398. if (mFirstUsedEntry == INVALID_ENTRY) {
  399. return ESP_ERR_NVS_NOT_FOUND;
  400. }
  401. if (other.mState == PageState::UNINITIALIZED) {
  402. auto err = other.initialize();
  403. if (err != ESP_OK) {
  404. return err;
  405. }
  406. }
  407. Item entry;
  408. size_t readEntryIndex = mFirstUsedEntry;
  409. while (readEntryIndex < ENTRY_COUNT) {
  410. if (mEntryTable.get(readEntryIndex) != EntryState::WRITTEN) {
  411. assert(readEntryIndex != mFirstUsedEntry);
  412. readEntryIndex++;
  413. continue;
  414. }
  415. auto err = readEntry(readEntryIndex, entry);
  416. if (err != ESP_OK) {
  417. return err;
  418. }
  419. err = other.mHashList.insert(entry, other.mNextFreeEntry);
  420. if (err != ESP_OK) {
  421. return err;
  422. }
  423. err = other.writeEntry(entry);
  424. if (err != ESP_OK) {
  425. return err;
  426. }
  427. size_t span = entry.span;
  428. size_t end = readEntryIndex + span;
  429. assert(end <= ENTRY_COUNT);
  430. for (size_t i = readEntryIndex + 1; i < end; ++i) {
  431. readEntry(i, entry);
  432. err = other.writeEntry(entry);
  433. if (err != ESP_OK) {
  434. return err;
  435. }
  436. }
  437. readEntryIndex = end;
  438. }
  439. return ESP_OK;
  440. }
  441. esp_err_t Page::mLoadEntryTable()
  442. {
  443. // for states where we actually care about data in the page, read entry state table
  444. if (mState == PageState::ACTIVE ||
  445. mState == PageState::FULL ||
  446. mState == PageState::FREEING) {
  447. auto rc = mPartition->read_raw(mBaseAddress + ENTRY_TABLE_OFFSET, mEntryTable.data(),
  448. mEntryTable.byteSize());
  449. if (rc != ESP_OK) {
  450. mState = PageState::INVALID;
  451. return rc;
  452. }
  453. }
  454. mErasedEntryCount = 0;
  455. mUsedEntryCount = 0;
  456. for (size_t i = 0; i < ENTRY_COUNT; ++i) {
  457. auto s = mEntryTable.get(i);
  458. if (s == EntryState::WRITTEN) {
  459. if (mFirstUsedEntry == INVALID_ENTRY) {
  460. mFirstUsedEntry = i;
  461. }
  462. ++mUsedEntryCount;
  463. } else if (s == EntryState::ERASED) {
  464. ++mErasedEntryCount;
  465. }
  466. }
  467. // for PageState::ACTIVE, we may have more data written to this page
  468. // as such, we need to figure out where the first unused entry is
  469. if (mState == PageState::ACTIVE) {
  470. for (size_t i = 0; i < ENTRY_COUNT; ++i) {
  471. if (mEntryTable.get(i) == EntryState::EMPTY) {
  472. mNextFreeEntry = i;
  473. break;
  474. }
  475. }
  476. // however, if power failed after some data was written into the entry.
  477. // but before the entry state table was altered, the entry locacted via
  478. // entry state table may actually be half-written.
  479. // this is easy to check by reading EntryHeader (i.e. first word)
  480. while (mNextFreeEntry < ENTRY_COUNT) {
  481. uint32_t entryAddress = getEntryAddress(mNextFreeEntry);
  482. uint32_t header;
  483. auto rc = mPartition->read_raw(entryAddress, &header, sizeof(header));
  484. if (rc != ESP_OK) {
  485. mState = PageState::INVALID;
  486. return rc;
  487. }
  488. if (header != 0xffffffff) {
  489. auto oldState = mEntryTable.get(mNextFreeEntry);
  490. auto err = alterEntryState(mNextFreeEntry, EntryState::ERASED);
  491. if (err != ESP_OK) {
  492. mState = PageState::INVALID;
  493. return err;
  494. }
  495. ++mNextFreeEntry;
  496. if (oldState == EntryState::WRITTEN) {
  497. --mUsedEntryCount;
  498. }
  499. ++mErasedEntryCount;
  500. }
  501. else {
  502. break;
  503. }
  504. }
  505. // check that all variable-length items are written or erased fully
  506. Item item;
  507. size_t lastItemIndex = INVALID_ENTRY;
  508. size_t end = mNextFreeEntry;
  509. if (end > ENTRY_COUNT) {
  510. end = ENTRY_COUNT;
  511. }
  512. size_t span;
  513. for (size_t i = 0; i < end; i += span) {
  514. span = 1;
  515. if (mEntryTable.get(i) == EntryState::ERASED) {
  516. lastItemIndex = INVALID_ENTRY;
  517. continue;
  518. }
  519. if (mEntryTable.get(i) == EntryState::ILLEGAL) {
  520. lastItemIndex = INVALID_ENTRY;
  521. auto err = eraseEntryAndSpan(i);
  522. if (err != ESP_OK) {
  523. mState = PageState::INVALID;
  524. return err;
  525. }
  526. continue;
  527. }
  528. lastItemIndex = i;
  529. auto err = readEntry(i, item);
  530. if (err != ESP_OK) {
  531. mState = PageState::INVALID;
  532. return err;
  533. }
  534. if (item.crc32 != item.calculateCrc32()) {
  535. err = eraseEntryAndSpan(i);
  536. if (err != ESP_OK) {
  537. mState = PageState::INVALID;
  538. return err;
  539. }
  540. continue;
  541. }
  542. err = mHashList.insert(item, i);
  543. if (err != ESP_OK) {
  544. mState = PageState::INVALID;
  545. return err;
  546. }
  547. // search for potential duplicate item
  548. size_t duplicateIndex = mHashList.find(0, item);
  549. if (isVariableLengthType(item.datatype)) {
  550. span = item.span;
  551. bool needErase = false;
  552. for (size_t j = i; j < i + span; ++j) {
  553. if (mEntryTable.get(j) != EntryState::WRITTEN) {
  554. needErase = true;
  555. lastItemIndex = INVALID_ENTRY;
  556. break;
  557. }
  558. }
  559. if (needErase) {
  560. eraseEntryAndSpan(i);
  561. continue;
  562. }
  563. }
  564. /* Note that logic for duplicate detections works fine even
  565. * when old-format blob is present along with new-format blob-index
  566. * for same key on active page. Since datatype is not used in hash calculation,
  567. * old-format blob will be removed.*/
  568. if (duplicateIndex < i) {
  569. eraseEntryAndSpan(duplicateIndex);
  570. }
  571. }
  572. // check that last item is not duplicate
  573. if (lastItemIndex != INVALID_ENTRY) {
  574. size_t findItemIndex = 0;
  575. Item dupItem;
  576. if (findItem(item.nsIndex, item.datatype, item.key, findItemIndex, dupItem) == ESP_OK) {
  577. if (findItemIndex < lastItemIndex) {
  578. auto err = eraseEntryAndSpan(findItemIndex);
  579. if (err != ESP_OK) {
  580. mState = PageState::INVALID;
  581. return err;
  582. }
  583. }
  584. }
  585. }
  586. } else if (mState == PageState::FULL || mState == PageState::FREEING) {
  587. // We have already filled mHashList for page in active state.
  588. // Do the same for the case when page is in full or freeing state.
  589. Item item;
  590. for (size_t i = mFirstUsedEntry; i < ENTRY_COUNT; ++i) {
  591. if (mEntryTable.get(i) != EntryState::WRITTEN) {
  592. continue;
  593. }
  594. auto err = readEntry(i, item);
  595. if (err != ESP_OK) {
  596. mState = PageState::INVALID;
  597. return err;
  598. }
  599. if (item.crc32 != item.calculateCrc32()) {
  600. err = eraseEntryAndSpan(i);
  601. if (err != ESP_OK) {
  602. mState = PageState::INVALID;
  603. return err;
  604. }
  605. continue;
  606. }
  607. assert(item.span > 0);
  608. err = mHashList.insert(item, i);
  609. if (err != ESP_OK) {
  610. mState = PageState::INVALID;
  611. return err;
  612. }
  613. size_t span = item.span;
  614. if (isVariableLengthType(item.datatype)) {
  615. for (size_t j = i + 1; j < i + span; ++j) {
  616. if (mEntryTable.get(j) != EntryState::WRITTEN) {
  617. eraseEntryAndSpan(i);
  618. break;
  619. }
  620. }
  621. }
  622. i += span - 1;
  623. }
  624. }
  625. return ESP_OK;
  626. }
  627. esp_err_t Page::initialize()
  628. {
  629. assert(mState == PageState::UNINITIALIZED);
  630. mState = PageState::ACTIVE;
  631. Header header;
  632. header.mState = mState;
  633. header.mSeqNumber = mSeqNumber;
  634. header.mVersion = mVersion;
  635. header.mCrc32 = header.calculateCrc32();
  636. auto rc = mPartition->write_raw(mBaseAddress, &header, sizeof(header));
  637. if (rc != ESP_OK) {
  638. mState = PageState::INVALID;
  639. return rc;
  640. }
  641. mNextFreeEntry = 0;
  642. std::fill_n(mEntryTable.data(), mEntryTable.byteSize() / sizeof(uint32_t), 0xffffffff);
  643. return ESP_OK;
  644. }
  645. esp_err_t Page::alterEntryState(size_t index, EntryState state)
  646. {
  647. assert(index < ENTRY_COUNT);
  648. mEntryTable.set(index, state);
  649. size_t wordToWrite = mEntryTable.getWordIndex(index);
  650. uint32_t word = mEntryTable.data()[wordToWrite];
  651. auto rc = mPartition->write_raw(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordToWrite) * 4,
  652. &word, sizeof(word));
  653. if (rc != ESP_OK) {
  654. mState = PageState::INVALID;
  655. return rc;
  656. }
  657. return ESP_OK;
  658. }
  659. esp_err_t Page::alterEntryRangeState(size_t begin, size_t end, EntryState state)
  660. {
  661. assert(end <= ENTRY_COUNT);
  662. assert(end > begin);
  663. size_t wordIndex = mEntryTable.getWordIndex(end - 1);
  664. for (ptrdiff_t i = end - 1; i >= static_cast<ptrdiff_t>(begin); --i) {
  665. mEntryTable.set(i, state);
  666. size_t nextWordIndex;
  667. if (i == static_cast<ptrdiff_t>(begin)) {
  668. nextWordIndex = (size_t) -1;
  669. } else {
  670. nextWordIndex = mEntryTable.getWordIndex(i - 1);
  671. }
  672. if (nextWordIndex != wordIndex) {
  673. uint32_t word = mEntryTable.data()[wordIndex];
  674. auto rc = mPartition->write_raw(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordIndex) * 4,
  675. &word, 4);
  676. if (rc != ESP_OK) {
  677. return rc;
  678. }
  679. }
  680. wordIndex = nextWordIndex;
  681. }
  682. return ESP_OK;
  683. }
  684. esp_err_t Page::alterPageState(PageState state)
  685. {
  686. uint32_t state_val = static_cast<uint32_t>(state);
  687. auto rc = mPartition->write_raw(mBaseAddress, &state_val, sizeof(state));
  688. if (rc != ESP_OK) {
  689. mState = PageState::INVALID;
  690. return rc;
  691. }
  692. mState = (PageState) state;
  693. return ESP_OK;
  694. }
  695. esp_err_t Page::readEntry(size_t index, Item& dst) const
  696. {
  697. auto rc = mPartition->read(getEntryAddress(index), &dst, sizeof(dst));
  698. if (rc != ESP_OK) {
  699. return rc;
  700. }
  701. return ESP_OK;
  702. }
  703. esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, size_t &itemIndex, Item& item, uint8_t chunkIdx, VerOffset chunkStart)
  704. {
  705. if (mState == PageState::CORRUPT || mState == PageState::INVALID || mState == PageState::UNINITIALIZED) {
  706. return ESP_ERR_NVS_NOT_FOUND;
  707. }
  708. size_t findBeginIndex = itemIndex;
  709. if (findBeginIndex >= ENTRY_COUNT) {
  710. return ESP_ERR_NVS_NOT_FOUND;
  711. }
  712. size_t start = mFirstUsedEntry;
  713. if (findBeginIndex > mFirstUsedEntry && findBeginIndex < ENTRY_COUNT) {
  714. start = findBeginIndex;
  715. }
  716. size_t end = mNextFreeEntry;
  717. if (end > ENTRY_COUNT) {
  718. end = ENTRY_COUNT;
  719. }
  720. if (nsIndex != NS_ANY && datatype != ItemType::ANY && key != NULL) {
  721. size_t cachedIndex = mHashList.find(start, Item(nsIndex, datatype, 0, key, chunkIdx));
  722. if (cachedIndex < ENTRY_COUNT) {
  723. start = cachedIndex;
  724. } else {
  725. return ESP_ERR_NVS_NOT_FOUND;
  726. }
  727. }
  728. size_t next;
  729. for (size_t i = start; i < end; i = next) {
  730. next = i + 1;
  731. if (mEntryTable.get(i) != EntryState::WRITTEN) {
  732. continue;
  733. }
  734. auto rc = readEntry(i, item);
  735. if (rc != ESP_OK) {
  736. mState = PageState::INVALID;
  737. return rc;
  738. }
  739. auto crc32 = item.calculateCrc32();
  740. if (item.crc32 != crc32) {
  741. rc = eraseEntryAndSpan(i);
  742. if (rc != ESP_OK) {
  743. mState = PageState::INVALID;
  744. return rc;
  745. }
  746. continue;
  747. }
  748. if (isVariableLengthType(item.datatype)) {
  749. next = i + item.span;
  750. }
  751. if (nsIndex != NS_ANY && item.nsIndex != nsIndex) {
  752. continue;
  753. }
  754. if (key != nullptr && strncmp(key, item.key, Item::MAX_KEY_LENGTH) != 0) {
  755. continue;
  756. }
  757. /* For blob data, chunkIndex should match*/
  758. if (chunkIdx != CHUNK_ANY
  759. && datatype == ItemType::BLOB_DATA
  760. && item.chunkIndex != chunkIdx) {
  761. continue;
  762. }
  763. /* Blob-index will match the <ns,key> with blob data.
  764. * Skip data chunks when searching for blob index*/
  765. if (datatype == ItemType::BLOB_IDX
  766. && item.chunkIndex != CHUNK_ANY) {
  767. continue;
  768. }
  769. /* Match the version for blob-index*/
  770. if (datatype == ItemType::BLOB_IDX
  771. && chunkStart != VerOffset::VER_ANY
  772. && item.blobIndex.chunkStart != chunkStart) {
  773. continue;
  774. }
  775. if (datatype != ItemType::ANY && item.datatype != datatype) {
  776. if (key == nullptr && nsIndex == NS_ANY && chunkIdx == CHUNK_ANY) {
  777. continue; // continue for bruteforce search on blob indices.
  778. }
  779. itemIndex = i;
  780. return ESP_ERR_NVS_TYPE_MISMATCH;
  781. }
  782. itemIndex = i;
  783. return ESP_OK;
  784. }
  785. return ESP_ERR_NVS_NOT_FOUND;
  786. }
  787. esp_err_t Page::getSeqNumber(uint32_t& seqNumber) const
  788. {
  789. if (mState != PageState::UNINITIALIZED && mState != PageState::INVALID && mState != PageState::CORRUPT) {
  790. seqNumber = mSeqNumber;
  791. return ESP_OK;
  792. }
  793. return ESP_ERR_NVS_NOT_INITIALIZED;
  794. }
  795. esp_err_t Page::setSeqNumber(uint32_t seqNumber)
  796. {
  797. if (mState != PageState::UNINITIALIZED) {
  798. return ESP_ERR_NVS_INVALID_STATE;
  799. }
  800. mSeqNumber = seqNumber;
  801. return ESP_OK;
  802. }
  803. esp_err_t Page::setVersion(uint8_t ver)
  804. {
  805. if (mState != PageState::UNINITIALIZED) {
  806. return ESP_ERR_NVS_INVALID_STATE;
  807. }
  808. mVersion = ver;
  809. return ESP_OK;
  810. }
  811. esp_err_t Page::erase()
  812. {
  813. auto rc = mPartition->erase_range(mBaseAddress, SPI_FLASH_SEC_SIZE);
  814. if (rc != ESP_OK) {
  815. mState = PageState::INVALID;
  816. return rc;
  817. }
  818. mUsedEntryCount = 0;
  819. mErasedEntryCount = 0;
  820. mFirstUsedEntry = INVALID_ENTRY;
  821. mNextFreeEntry = INVALID_ENTRY;
  822. mState = PageState::UNINITIALIZED;
  823. mHashList.clear();
  824. return ESP_OK;
  825. }
  826. esp_err_t Page::markFreeing()
  827. {
  828. if (mState != PageState::FULL && mState != PageState::ACTIVE) {
  829. return ESP_ERR_NVS_INVALID_STATE;
  830. }
  831. return alterPageState(PageState::FREEING);
  832. }
  833. esp_err_t Page::markFull()
  834. {
  835. if (mState != PageState::ACTIVE) {
  836. return ESP_ERR_NVS_INVALID_STATE;
  837. }
  838. return alterPageState(PageState::FULL);
  839. }
  840. size_t Page::getVarDataTailroom() const
  841. {
  842. if (mState == PageState::UNINITIALIZED) {
  843. return CHUNK_MAX_SIZE;
  844. } else if (mState == PageState::FULL) {
  845. return 0;
  846. }
  847. /* Skip one entry for blob data item precessing the data */
  848. return ((mNextFreeEntry < (ENTRY_COUNT-1)) ? ((ENTRY_COUNT - mNextFreeEntry - 1) * ENTRY_SIZE): 0);
  849. }
  850. const char* Page::pageStateToName(PageState ps)
  851. {
  852. switch (ps) {
  853. case PageState::CORRUPT:
  854. return "CORRUPT";
  855. case PageState::ACTIVE:
  856. return "ACTIVE";
  857. case PageState::FREEING:
  858. return "FREEING";
  859. case PageState::FULL:
  860. return "FULL";
  861. case PageState::INVALID:
  862. return "INVALID";
  863. case PageState::UNINITIALIZED:
  864. return "UNINITIALIZED";
  865. default:
  866. assert(0 && "invalid state value");
  867. return "";
  868. }
  869. }
  870. void Page::debugDump() const
  871. {
  872. printf("state=%x (%s) addr=%x seq=%d\nfirstUsed=%d nextFree=%d used=%d erased=%d\n", (uint32_t) mState, pageStateToName(mState), mBaseAddress, mSeqNumber, static_cast<int>(mFirstUsedEntry), static_cast<int>(mNextFreeEntry), mUsedEntryCount, mErasedEntryCount);
  873. size_t skip = 0;
  874. for (size_t i = 0; i < ENTRY_COUNT; ++i) {
  875. printf("%3d: ", static_cast<int>(i));
  876. EntryState state = mEntryTable.get(i);
  877. if (state == EntryState::EMPTY) {
  878. printf("E\n");
  879. } else if (state == EntryState::ERASED) {
  880. printf("X\n");
  881. } else if (state == EntryState::WRITTEN) {
  882. Item item;
  883. readEntry(i, item);
  884. if (skip == 0) {
  885. printf("W ns=%2u type=%2u span=%3u key=\"%s\" chunkIdx=%d len=%d\n", item.nsIndex, static_cast<unsigned>(item.datatype), item.span, item.key, item.chunkIndex, (item.span != 1)?((int)item.varLength.dataSize):-1);
  886. if (item.span > 0 && item.span <= ENTRY_COUNT - i) {
  887. skip = item.span - 1;
  888. } else {
  889. skip = 0;
  890. }
  891. } else {
  892. printf("D\n");
  893. skip--;
  894. }
  895. }
  896. }
  897. }
  898. esp_err_t Page::calcEntries(nvs_stats_t &nvsStats)
  899. {
  900. assert(mState != PageState::FREEING);
  901. nvsStats.total_entries += ENTRY_COUNT;
  902. switch (mState) {
  903. case PageState::UNINITIALIZED:
  904. case PageState::CORRUPT:
  905. nvsStats.free_entries += ENTRY_COUNT;
  906. break;
  907. case PageState::FULL:
  908. case PageState::ACTIVE:
  909. nvsStats.used_entries += mUsedEntryCount;
  910. nvsStats.free_entries += ENTRY_COUNT - mUsedEntryCount; // it's equivalent free + erase entries.
  911. break;
  912. case PageState::INVALID:
  913. return ESP_ERR_INVALID_STATE;
  914. break;
  915. default:
  916. assert(false && "Unhandled state");
  917. break;
  918. }
  919. return ESP_OK;
  920. }
  921. } // namespace nvs