nvs_page.cpp 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "nvs_page.hpp"
  15. #include <esp_rom_crc.h>
  16. #include <cstdio>
  17. #include <cstring>
  18. namespace nvs
  19. {
  20. Page::Page() : mPartition(nullptr) { }
  21. uint32_t Page::Header::calculateCrc32()
  22. {
  23. return esp_rom_crc32_le(0xffffffff,
  24. reinterpret_cast<uint8_t*>(this) + offsetof(Header, mSeqNumber),
  25. offsetof(Header, mCrc32) - offsetof(Header, mSeqNumber));
  26. }
  27. esp_err_t Page::load(Partition *partition, uint32_t sectorNumber)
  28. {
  29. if (partition == nullptr) {
  30. return ESP_ERR_INVALID_ARG;
  31. }
  32. mPartition = partition;
  33. mBaseAddress = sectorNumber * SEC_SIZE;
  34. mUsedEntryCount = 0;
  35. mErasedEntryCount = 0;
  36. Header header;
  37. auto rc = mPartition->read_raw(mBaseAddress, &header, sizeof(header));
  38. if (rc != ESP_OK) {
  39. mState = PageState::INVALID;
  40. return rc;
  41. }
  42. if (header.mState == PageState::UNINITIALIZED) {
  43. mState = header.mState;
  44. // check if the whole page is really empty
  45. // reading the whole page takes ~40 times less than erasing it
  46. const int BLOCK_SIZE = 128;
  47. uint32_t* block = new (std::nothrow) uint32_t[BLOCK_SIZE];
  48. if (!block) return ESP_ERR_NO_MEM;
  49. for (uint32_t i = 0; i < SPI_FLASH_SEC_SIZE; i += 4 * BLOCK_SIZE) {
  50. rc = mPartition->read_raw(mBaseAddress + i, block, 4 * BLOCK_SIZE);
  51. if (rc != ESP_OK) {
  52. mState = PageState::INVALID;
  53. delete[] block;
  54. return rc;
  55. }
  56. if (std::any_of(block, block + BLOCK_SIZE, [](uint32_t val) -> bool { return val != 0xffffffff; })) {
  57. // page isn't as empty after all, mark it as corrupted
  58. mState = PageState::CORRUPT;
  59. break;
  60. }
  61. }
  62. delete[] block;
  63. } else if (header.mCrc32 != header.calculateCrc32()) {
  64. header.mState = PageState::CORRUPT;
  65. } else {
  66. mState = header.mState;
  67. mSeqNumber = header.mSeqNumber;
  68. if(header.mVersion < NVS_VERSION) {
  69. return ESP_ERR_NVS_NEW_VERSION_FOUND;
  70. } else {
  71. mVersion = header.mVersion;
  72. }
  73. }
  74. switch (mState) {
  75. case PageState::UNINITIALIZED:
  76. break;
  77. case PageState::FULL:
  78. case PageState::ACTIVE:
  79. case PageState::FREEING:
  80. mLoadEntryTable();
  81. break;
  82. default:
  83. mState = PageState::CORRUPT;
  84. break;
  85. }
  86. return ESP_OK;
  87. }
  88. esp_err_t Page::writeEntry(const Item& item)
  89. {
  90. esp_err_t err;
  91. err = mPartition->write(getEntryAddress(mNextFreeEntry), &item, sizeof(item));
  92. if (err != ESP_OK) {
  93. mState = PageState::INVALID;
  94. return err;
  95. }
  96. err = alterEntryState(mNextFreeEntry, EntryState::WRITTEN);
  97. if (err != ESP_OK) {
  98. return err;
  99. }
  100. if (mFirstUsedEntry == INVALID_ENTRY) {
  101. mFirstUsedEntry = mNextFreeEntry;
  102. }
  103. ++mUsedEntryCount;
  104. ++mNextFreeEntry;
  105. return ESP_OK;
  106. }
  107. esp_err_t Page::writeEntryData(const uint8_t* data, size_t size)
  108. {
  109. assert(size % ENTRY_SIZE == 0);
  110. assert(mNextFreeEntry != INVALID_ENTRY);
  111. assert(mFirstUsedEntry != INVALID_ENTRY);
  112. const uint16_t count = size / ENTRY_SIZE;
  113. const uint8_t* buf = data;
  114. #if !defined LINUX_TARGET
  115. // TODO: check whether still necessary with esp_partition* API
  116. /* On the ESP32, data can come from DROM, which is not accessible by spi_flash_write
  117. * function. To work around this, we copy the data to heap if it came from DROM.
  118. * Hopefully this won't happen very often in practice. For data from DRAM, we should
  119. * still be able to write it to flash directly.
  120. * TODO: figure out how to make this platform-specific check nicer (probably by introducing
  121. * a platform-specific flash layer).
  122. */
  123. if ((uint32_t) data < 0x3ff00000) {
  124. buf = (uint8_t*) malloc(size);
  125. if (!buf) {
  126. return ESP_ERR_NO_MEM;
  127. }
  128. memcpy((void*)buf, data, size);
  129. }
  130. #endif // ! LINUX_TARGET
  131. auto rc = mPartition->write(getEntryAddress(mNextFreeEntry), buf, size);
  132. #if !defined LINUX_TARGET
  133. if (buf != data) {
  134. free((void*)buf);
  135. }
  136. #endif // ! LINUX_TARGET
  137. if (rc != ESP_OK) {
  138. mState = PageState::INVALID;
  139. return rc;
  140. }
  141. auto err = alterEntryRangeState(mNextFreeEntry, mNextFreeEntry + count, EntryState::WRITTEN);
  142. if (err != ESP_OK) {
  143. return err;
  144. }
  145. mUsedEntryCount += count;
  146. mNextFreeEntry += count;
  147. return ESP_OK;
  148. }
  149. esp_err_t Page::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize, uint8_t chunkIdx)
  150. {
  151. Item item;
  152. esp_err_t err;
  153. if (mState == PageState::INVALID) {
  154. return ESP_ERR_NVS_INVALID_STATE;
  155. }
  156. if (mState == PageState::UNINITIALIZED) {
  157. err = initialize();
  158. if (err != ESP_OK) {
  159. return err;
  160. }
  161. }
  162. if (mState == PageState::FULL) {
  163. return ESP_ERR_NVS_PAGE_FULL;
  164. }
  165. const size_t keySize = strlen(key);
  166. if (keySize > Item::MAX_KEY_LENGTH) {
  167. return ESP_ERR_NVS_KEY_TOO_LONG;
  168. }
  169. if (dataSize > Page::CHUNK_MAX_SIZE) {
  170. return ESP_ERR_NVS_VALUE_TOO_LONG;
  171. }
  172. size_t totalSize = ENTRY_SIZE;
  173. size_t entriesCount = 1;
  174. if (isVariableLengthType(datatype)) {
  175. size_t roundedSize = (dataSize + ENTRY_SIZE - 1) & ~(ENTRY_SIZE - 1);
  176. totalSize += roundedSize;
  177. entriesCount += roundedSize / ENTRY_SIZE;
  178. }
  179. // primitive types should fit into one entry
  180. assert(totalSize == ENTRY_SIZE ||
  181. isVariableLengthType(datatype));
  182. if (mNextFreeEntry == INVALID_ENTRY || mNextFreeEntry + entriesCount > ENTRY_COUNT) {
  183. // page will not fit this amount of data
  184. return ESP_ERR_NVS_PAGE_FULL;
  185. }
  186. // write first item
  187. size_t span = (totalSize + ENTRY_SIZE - 1) / ENTRY_SIZE;
  188. item = Item(nsIndex, datatype, span, key, chunkIdx);
  189. err = mHashList.insert(item, mNextFreeEntry);
  190. if (err != ESP_OK) {
  191. return err;
  192. }
  193. if (!isVariableLengthType(datatype)) {
  194. memcpy(item.data, data, dataSize);
  195. item.crc32 = item.calculateCrc32();
  196. err = writeEntry(item);
  197. if (err != ESP_OK) {
  198. return err;
  199. }
  200. } else {
  201. const uint8_t* src = reinterpret_cast<const uint8_t*>(data);
  202. item.varLength.dataCrc32 = Item::calculateCrc32(src, dataSize);
  203. item.varLength.dataSize = dataSize;
  204. item.varLength.reserved = 0xffff;
  205. item.crc32 = item.calculateCrc32();
  206. err = writeEntry(item);
  207. if (err != ESP_OK) {
  208. return err;
  209. }
  210. size_t left = dataSize / ENTRY_SIZE * ENTRY_SIZE;
  211. if (left > 0) {
  212. err = writeEntryData(static_cast<const uint8_t*>(data), left);
  213. if (err != ESP_OK) {
  214. return err;
  215. }
  216. }
  217. size_t tail = dataSize - left;
  218. if (tail > 0) {
  219. std::fill_n(item.rawData, ENTRY_SIZE, 0xff);
  220. memcpy(item.rawData, static_cast<const uint8_t*>(data) + left, tail);
  221. err = writeEntry(item);
  222. if (err != ESP_OK) {
  223. return err;
  224. }
  225. }
  226. }
  227. return ESP_OK;
  228. }
  229. esp_err_t Page::readItem(uint8_t nsIndex, ItemType datatype, const char* key, void* data, size_t dataSize, uint8_t chunkIdx, VerOffset chunkStart)
  230. {
  231. size_t index = 0;
  232. Item item;
  233. if (mState == PageState::INVALID) {
  234. return ESP_ERR_NVS_INVALID_STATE;
  235. }
  236. esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  237. if (rc != ESP_OK) {
  238. return rc;
  239. }
  240. if (!isVariableLengthType(datatype)) {
  241. if (dataSize != getAlignmentForType(datatype)) {
  242. return ESP_ERR_NVS_TYPE_MISMATCH;
  243. }
  244. memcpy(data, item.data, dataSize);
  245. return ESP_OK;
  246. }
  247. if (dataSize < static_cast<size_t>(item.varLength.dataSize)) {
  248. return ESP_ERR_NVS_INVALID_LENGTH;
  249. }
  250. uint8_t* dst = reinterpret_cast<uint8_t*>(data);
  251. size_t left = item.varLength.dataSize;
  252. for (size_t i = index + 1; i < index + item.span; ++i) {
  253. Item ditem;
  254. rc = readEntry(i, ditem);
  255. if (rc != ESP_OK) {
  256. return rc;
  257. }
  258. size_t willCopy = ENTRY_SIZE;
  259. willCopy = (left < willCopy)?left:willCopy;
  260. memcpy(dst, ditem.rawData, willCopy);
  261. left -= willCopy;
  262. dst += willCopy;
  263. }
  264. if (Item::calculateCrc32(reinterpret_cast<uint8_t*>(data), item.varLength.dataSize) != item.varLength.dataCrc32) {
  265. rc = eraseEntryAndSpan(index);
  266. if (rc != ESP_OK) {
  267. return rc;
  268. }
  269. return ESP_ERR_NVS_NOT_FOUND;
  270. }
  271. return ESP_OK;
  272. }
  273. esp_err_t Page::cmpItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize, uint8_t chunkIdx, VerOffset chunkStart)
  274. {
  275. size_t index = 0;
  276. Item item;
  277. if (mState == PageState::INVALID) {
  278. return ESP_ERR_NVS_INVALID_STATE;
  279. }
  280. esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  281. if (rc != ESP_OK) {
  282. return rc;
  283. }
  284. if (!isVariableLengthType(datatype)) {
  285. if (dataSize != getAlignmentForType(datatype)) {
  286. return ESP_ERR_NVS_TYPE_MISMATCH;
  287. }
  288. if (memcmp(data, item.data, dataSize)) {
  289. return ESP_ERR_NVS_CONTENT_DIFFERS;
  290. }
  291. return ESP_OK;
  292. }
  293. if (dataSize < static_cast<size_t>(item.varLength.dataSize)) {
  294. return ESP_ERR_NVS_INVALID_LENGTH;
  295. }
  296. const uint8_t* dst = reinterpret_cast<const uint8_t*>(data);
  297. size_t left = item.varLength.dataSize;
  298. for (size_t i = index + 1; i < index + item.span; ++i) {
  299. Item ditem;
  300. rc = readEntry(i, ditem);
  301. if (rc != ESP_OK) {
  302. return rc;
  303. }
  304. size_t willCopy = ENTRY_SIZE;
  305. willCopy = (left < willCopy)?left:willCopy;
  306. if (memcmp(dst, ditem.rawData, willCopy)) {
  307. return ESP_ERR_NVS_CONTENT_DIFFERS;
  308. }
  309. left -= willCopy;
  310. dst += willCopy;
  311. }
  312. if (Item::calculateCrc32(reinterpret_cast<const uint8_t*>(data), item.varLength.dataSize) != item.varLength.dataCrc32) {
  313. return ESP_ERR_NVS_NOT_FOUND;
  314. }
  315. return ESP_OK;
  316. }
  317. esp_err_t Page::eraseItem(uint8_t nsIndex, ItemType datatype, const char* key, uint8_t chunkIdx, VerOffset chunkStart)
  318. {
  319. size_t index = 0;
  320. Item item;
  321. esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  322. if (rc != ESP_OK) {
  323. return rc;
  324. }
  325. return eraseEntryAndSpan(index);
  326. }
  327. esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, uint8_t chunkIdx, VerOffset chunkStart)
  328. {
  329. size_t index = 0;
  330. Item item;
  331. return findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
  332. }
  333. esp_err_t Page::eraseEntryAndSpan(size_t index)
  334. {
  335. uint32_t seq_num;
  336. getSeqNumber(seq_num);
  337. auto state = mEntryTable.get(index);
  338. size_t span = 1;
  339. if (state == EntryState::WRITTEN) {
  340. Item item;
  341. auto rc = readEntry(index, item);
  342. if (rc != ESP_OK) {
  343. return rc;
  344. }
  345. if (item.calculateCrc32() != item.crc32) {
  346. mHashList.erase(index);
  347. rc = alterEntryState(index, EntryState::ERASED);
  348. --mUsedEntryCount;
  349. ++mErasedEntryCount;
  350. if (rc != ESP_OK) {
  351. return rc;
  352. }
  353. } else {
  354. mHashList.erase(index);
  355. span = item.span;
  356. for (ptrdiff_t i = index + span - 1; i >= static_cast<ptrdiff_t>(index); --i) {
  357. if (mEntryTable.get(i) == EntryState::WRITTEN) {
  358. --mUsedEntryCount;
  359. }
  360. ++mErasedEntryCount;
  361. }
  362. if (span == 1) {
  363. rc = alterEntryState(index, EntryState::ERASED);
  364. } else {
  365. rc = alterEntryRangeState(index, index + span, EntryState::ERASED);
  366. }
  367. if (rc != ESP_OK) {
  368. return rc;
  369. }
  370. }
  371. } else {
  372. auto rc = alterEntryState(index, EntryState::ERASED);
  373. if (rc != ESP_OK) {
  374. return rc;
  375. }
  376. }
  377. if (index == mFirstUsedEntry) {
  378. updateFirstUsedEntry(index, span);
  379. }
  380. if (index + span > mNextFreeEntry) {
  381. mNextFreeEntry = index + span;
  382. }
  383. return ESP_OK;
  384. }
  385. void Page::updateFirstUsedEntry(size_t index, size_t span)
  386. {
  387. assert(index == mFirstUsedEntry);
  388. mFirstUsedEntry = INVALID_ENTRY;
  389. size_t end = mNextFreeEntry;
  390. if (end > ENTRY_COUNT) {
  391. end = ENTRY_COUNT;
  392. }
  393. for (size_t i = index + span; i < end; ++i) {
  394. if (mEntryTable.get(i) == EntryState::WRITTEN) {
  395. mFirstUsedEntry = i;
  396. break;
  397. }
  398. }
  399. }
  400. esp_err_t Page::copyItems(Page& other)
  401. {
  402. if (mFirstUsedEntry == INVALID_ENTRY) {
  403. return ESP_ERR_NVS_NOT_FOUND;
  404. }
  405. if (other.mState == PageState::UNINITIALIZED) {
  406. auto err = other.initialize();
  407. if (err != ESP_OK) {
  408. return err;
  409. }
  410. }
  411. Item entry;
  412. size_t readEntryIndex = mFirstUsedEntry;
  413. while (readEntryIndex < ENTRY_COUNT) {
  414. if (mEntryTable.get(readEntryIndex) != EntryState::WRITTEN) {
  415. assert(readEntryIndex != mFirstUsedEntry);
  416. readEntryIndex++;
  417. continue;
  418. }
  419. auto err = readEntry(readEntryIndex, entry);
  420. if (err != ESP_OK) {
  421. return err;
  422. }
  423. err = other.mHashList.insert(entry, other.mNextFreeEntry);
  424. if (err != ESP_OK) {
  425. return err;
  426. }
  427. err = other.writeEntry(entry);
  428. if (err != ESP_OK) {
  429. return err;
  430. }
  431. size_t span = entry.span;
  432. size_t end = readEntryIndex + span;
  433. assert(end <= ENTRY_COUNT);
  434. for (size_t i = readEntryIndex + 1; i < end; ++i) {
  435. readEntry(i, entry);
  436. err = other.writeEntry(entry);
  437. if (err != ESP_OK) {
  438. return err;
  439. }
  440. }
  441. readEntryIndex = end;
  442. }
  443. return ESP_OK;
  444. }
  445. esp_err_t Page::mLoadEntryTable()
  446. {
  447. // for states where we actually care about data in the page, read entry state table
  448. if (mState == PageState::ACTIVE ||
  449. mState == PageState::FULL ||
  450. mState == PageState::FREEING) {
  451. auto rc = mPartition->read_raw(mBaseAddress + ENTRY_TABLE_OFFSET, mEntryTable.data(),
  452. mEntryTable.byteSize());
  453. if (rc != ESP_OK) {
  454. mState = PageState::INVALID;
  455. return rc;
  456. }
  457. }
  458. mErasedEntryCount = 0;
  459. mUsedEntryCount = 0;
  460. for (size_t i = 0; i < ENTRY_COUNT; ++i) {
  461. auto s = mEntryTable.get(i);
  462. if (s == EntryState::WRITTEN) {
  463. if (mFirstUsedEntry == INVALID_ENTRY) {
  464. mFirstUsedEntry = i;
  465. }
  466. ++mUsedEntryCount;
  467. } else if (s == EntryState::ERASED) {
  468. ++mErasedEntryCount;
  469. }
  470. }
  471. // for PageState::ACTIVE, we may have more data written to this page
  472. // as such, we need to figure out where the first unused entry is
  473. if (mState == PageState::ACTIVE) {
  474. for (size_t i = 0; i < ENTRY_COUNT; ++i) {
  475. if (mEntryTable.get(i) == EntryState::EMPTY) {
  476. mNextFreeEntry = i;
  477. break;
  478. }
  479. }
  480. // however, if power failed after some data was written into the entry.
  481. // but before the entry state table was altered, the entry locacted via
  482. // entry state table may actually be half-written.
  483. // this is easy to check by reading EntryHeader (i.e. first word)
  484. while (mNextFreeEntry < ENTRY_COUNT) {
  485. uint32_t entryAddress = getEntryAddress(mNextFreeEntry);
  486. uint32_t header;
  487. auto rc = mPartition->read_raw(entryAddress, &header, sizeof(header));
  488. if (rc != ESP_OK) {
  489. mState = PageState::INVALID;
  490. return rc;
  491. }
  492. if (header != 0xffffffff) {
  493. auto oldState = mEntryTable.get(mNextFreeEntry);
  494. auto err = alterEntryState(mNextFreeEntry, EntryState::ERASED);
  495. if (err != ESP_OK) {
  496. mState = PageState::INVALID;
  497. return err;
  498. }
  499. ++mNextFreeEntry;
  500. if (oldState == EntryState::WRITTEN) {
  501. --mUsedEntryCount;
  502. }
  503. ++mErasedEntryCount;
  504. }
  505. else {
  506. break;
  507. }
  508. }
  509. // check that all variable-length items are written or erased fully
  510. Item item;
  511. size_t lastItemIndex = INVALID_ENTRY;
  512. size_t end = mNextFreeEntry;
  513. if (end > ENTRY_COUNT) {
  514. end = ENTRY_COUNT;
  515. }
  516. size_t span;
  517. for (size_t i = 0; i < end; i += span) {
  518. span = 1;
  519. if (mEntryTable.get(i) == EntryState::ERASED) {
  520. lastItemIndex = INVALID_ENTRY;
  521. continue;
  522. }
  523. if (mEntryTable.get(i) == EntryState::ILLEGAL) {
  524. lastItemIndex = INVALID_ENTRY;
  525. auto err = eraseEntryAndSpan(i);
  526. if (err != ESP_OK) {
  527. mState = PageState::INVALID;
  528. return err;
  529. }
  530. continue;
  531. }
  532. lastItemIndex = i;
  533. auto err = readEntry(i, item);
  534. if (err != ESP_OK) {
  535. mState = PageState::INVALID;
  536. return err;
  537. }
  538. if (item.crc32 != item.calculateCrc32()) {
  539. err = eraseEntryAndSpan(i);
  540. if (err != ESP_OK) {
  541. mState = PageState::INVALID;
  542. return err;
  543. }
  544. continue;
  545. }
  546. err = mHashList.insert(item, i);
  547. if (err != ESP_OK) {
  548. mState = PageState::INVALID;
  549. return err;
  550. }
  551. // search for potential duplicate item
  552. size_t duplicateIndex = mHashList.find(0, item);
  553. if (isVariableLengthType(item.datatype)) {
  554. span = item.span;
  555. bool needErase = false;
  556. for (size_t j = i; j < i + span; ++j) {
  557. if (mEntryTable.get(j) != EntryState::WRITTEN) {
  558. needErase = true;
  559. lastItemIndex = INVALID_ENTRY;
  560. break;
  561. }
  562. }
  563. if (needErase) {
  564. eraseEntryAndSpan(i);
  565. continue;
  566. }
  567. }
  568. /* Note that logic for duplicate detections works fine even
  569. * when old-format blob is present along with new-format blob-index
  570. * for same key on active page. Since datatype is not used in hash calculation,
  571. * old-format blob will be removed.*/
  572. if (duplicateIndex < i) {
  573. eraseEntryAndSpan(duplicateIndex);
  574. }
  575. }
  576. // check that last item is not duplicate
  577. if (lastItemIndex != INVALID_ENTRY) {
  578. size_t findItemIndex = 0;
  579. Item dupItem;
  580. if (findItem(item.nsIndex, item.datatype, item.key, findItemIndex, dupItem) == ESP_OK) {
  581. if (findItemIndex < lastItemIndex) {
  582. auto err = eraseEntryAndSpan(findItemIndex);
  583. if (err != ESP_OK) {
  584. mState = PageState::INVALID;
  585. return err;
  586. }
  587. }
  588. }
  589. }
  590. } else if (mState == PageState::FULL || mState == PageState::FREEING) {
  591. // We have already filled mHashList for page in active state.
  592. // Do the same for the case when page is in full or freeing state.
  593. Item item;
  594. for (size_t i = mFirstUsedEntry; i < ENTRY_COUNT; ++i) {
  595. if (mEntryTable.get(i) != EntryState::WRITTEN) {
  596. continue;
  597. }
  598. auto err = readEntry(i, item);
  599. if (err != ESP_OK) {
  600. mState = PageState::INVALID;
  601. return err;
  602. }
  603. if (item.crc32 != item.calculateCrc32()) {
  604. err = eraseEntryAndSpan(i);
  605. if (err != ESP_OK) {
  606. mState = PageState::INVALID;
  607. return err;
  608. }
  609. continue;
  610. }
  611. assert(item.span > 0);
  612. err = mHashList.insert(item, i);
  613. if (err != ESP_OK) {
  614. mState = PageState::INVALID;
  615. return err;
  616. }
  617. size_t span = item.span;
  618. if (isVariableLengthType(item.datatype)) {
  619. for (size_t j = i + 1; j < i + span; ++j) {
  620. if (mEntryTable.get(j) != EntryState::WRITTEN) {
  621. eraseEntryAndSpan(i);
  622. break;
  623. }
  624. }
  625. }
  626. i += span - 1;
  627. }
  628. }
  629. return ESP_OK;
  630. }
  631. esp_err_t Page::initialize()
  632. {
  633. assert(mState == PageState::UNINITIALIZED);
  634. mState = PageState::ACTIVE;
  635. Header header;
  636. header.mState = mState;
  637. header.mSeqNumber = mSeqNumber;
  638. header.mVersion = mVersion;
  639. header.mCrc32 = header.calculateCrc32();
  640. auto rc = mPartition->write_raw(mBaseAddress, &header, sizeof(header));
  641. if (rc != ESP_OK) {
  642. mState = PageState::INVALID;
  643. return rc;
  644. }
  645. mNextFreeEntry = 0;
  646. std::fill_n(mEntryTable.data(), mEntryTable.byteSize() / sizeof(uint32_t), 0xffffffff);
  647. return ESP_OK;
  648. }
  649. esp_err_t Page::alterEntryState(size_t index, EntryState state)
  650. {
  651. assert(index < ENTRY_COUNT);
  652. mEntryTable.set(index, state);
  653. size_t wordToWrite = mEntryTable.getWordIndex(index);
  654. uint32_t word = mEntryTable.data()[wordToWrite];
  655. auto rc = mPartition->write_raw(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordToWrite) * 4,
  656. &word, sizeof(word));
  657. if (rc != ESP_OK) {
  658. mState = PageState::INVALID;
  659. return rc;
  660. }
  661. return ESP_OK;
  662. }
  663. esp_err_t Page::alterEntryRangeState(size_t begin, size_t end, EntryState state)
  664. {
  665. assert(end <= ENTRY_COUNT);
  666. assert(end > begin);
  667. size_t wordIndex = mEntryTable.getWordIndex(end - 1);
  668. for (ptrdiff_t i = end - 1; i >= static_cast<ptrdiff_t>(begin); --i) {
  669. mEntryTable.set(i, state);
  670. size_t nextWordIndex;
  671. if (i == static_cast<ptrdiff_t>(begin)) {
  672. nextWordIndex = (size_t) -1;
  673. } else {
  674. nextWordIndex = mEntryTable.getWordIndex(i - 1);
  675. }
  676. if (nextWordIndex != wordIndex) {
  677. uint32_t word = mEntryTable.data()[wordIndex];
  678. auto rc = mPartition->write_raw(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordIndex) * 4,
  679. &word, 4);
  680. if (rc != ESP_OK) {
  681. return rc;
  682. }
  683. }
  684. wordIndex = nextWordIndex;
  685. }
  686. return ESP_OK;
  687. }
  688. esp_err_t Page::alterPageState(PageState state)
  689. {
  690. uint32_t state_val = static_cast<uint32_t>(state);
  691. auto rc = mPartition->write_raw(mBaseAddress, &state_val, sizeof(state));
  692. if (rc != ESP_OK) {
  693. mState = PageState::INVALID;
  694. return rc;
  695. }
  696. mState = (PageState) state;
  697. return ESP_OK;
  698. }
  699. esp_err_t Page::readEntry(size_t index, Item& dst) const
  700. {
  701. auto rc = mPartition->read(getEntryAddress(index), &dst, sizeof(dst));
  702. if (rc != ESP_OK) {
  703. return rc;
  704. }
  705. return ESP_OK;
  706. }
  707. esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, size_t &itemIndex, Item& item, uint8_t chunkIdx, VerOffset chunkStart)
  708. {
  709. if (mState == PageState::CORRUPT || mState == PageState::INVALID || mState == PageState::UNINITIALIZED) {
  710. return ESP_ERR_NVS_NOT_FOUND;
  711. }
  712. size_t findBeginIndex = itemIndex;
  713. if (findBeginIndex >= ENTRY_COUNT) {
  714. return ESP_ERR_NVS_NOT_FOUND;
  715. }
  716. size_t start = mFirstUsedEntry;
  717. if (findBeginIndex > mFirstUsedEntry && findBeginIndex < ENTRY_COUNT) {
  718. start = findBeginIndex;
  719. }
  720. size_t end = mNextFreeEntry;
  721. if (end > ENTRY_COUNT) {
  722. end = ENTRY_COUNT;
  723. }
  724. if (nsIndex != NS_ANY && datatype != ItemType::ANY && key != NULL) {
  725. size_t cachedIndex = mHashList.find(start, Item(nsIndex, datatype, 0, key, chunkIdx));
  726. if (cachedIndex < ENTRY_COUNT) {
  727. start = cachedIndex;
  728. } else {
  729. return ESP_ERR_NVS_NOT_FOUND;
  730. }
  731. }
  732. size_t next;
  733. for (size_t i = start; i < end; i = next) {
  734. next = i + 1;
  735. if (mEntryTable.get(i) != EntryState::WRITTEN) {
  736. continue;
  737. }
  738. auto rc = readEntry(i, item);
  739. if (rc != ESP_OK) {
  740. mState = PageState::INVALID;
  741. return rc;
  742. }
  743. auto crc32 = item.calculateCrc32();
  744. if (item.crc32 != crc32) {
  745. rc = eraseEntryAndSpan(i);
  746. if (rc != ESP_OK) {
  747. mState = PageState::INVALID;
  748. return rc;
  749. }
  750. continue;
  751. }
  752. if (isVariableLengthType(item.datatype)) {
  753. next = i + item.span;
  754. }
  755. if (nsIndex != NS_ANY && item.nsIndex != nsIndex) {
  756. continue;
  757. }
  758. if (key != nullptr && strncmp(key, item.key, Item::MAX_KEY_LENGTH) != 0) {
  759. continue;
  760. }
  761. /* For blob data, chunkIndex should match*/
  762. if (chunkIdx != CHUNK_ANY
  763. && datatype == ItemType::BLOB_DATA
  764. && item.chunkIndex != chunkIdx) {
  765. continue;
  766. }
  767. /* Blob-index will match the <ns,key> with blob data.
  768. * Skip data chunks when searching for blob index*/
  769. if (datatype == ItemType::BLOB_IDX
  770. && item.chunkIndex != CHUNK_ANY) {
  771. continue;
  772. }
  773. /* Match the version for blob-index*/
  774. if (datatype == ItemType::BLOB_IDX
  775. && chunkStart != VerOffset::VER_ANY
  776. && item.blobIndex.chunkStart != chunkStart) {
  777. continue;
  778. }
  779. if (datatype != ItemType::ANY && item.datatype != datatype) {
  780. if (key == nullptr && nsIndex == NS_ANY && chunkIdx == CHUNK_ANY) {
  781. continue; // continue for bruteforce search on blob indices.
  782. }
  783. itemIndex = i;
  784. return ESP_ERR_NVS_TYPE_MISMATCH;
  785. }
  786. itemIndex = i;
  787. return ESP_OK;
  788. }
  789. return ESP_ERR_NVS_NOT_FOUND;
  790. }
  791. esp_err_t Page::getSeqNumber(uint32_t& seqNumber) const
  792. {
  793. if (mState != PageState::UNINITIALIZED && mState != PageState::INVALID && mState != PageState::CORRUPT) {
  794. seqNumber = mSeqNumber;
  795. return ESP_OK;
  796. }
  797. return ESP_ERR_NVS_NOT_INITIALIZED;
  798. }
  799. esp_err_t Page::setSeqNumber(uint32_t seqNumber)
  800. {
  801. if (mState != PageState::UNINITIALIZED) {
  802. return ESP_ERR_NVS_INVALID_STATE;
  803. }
  804. mSeqNumber = seqNumber;
  805. return ESP_OK;
  806. }
  807. esp_err_t Page::setVersion(uint8_t ver)
  808. {
  809. if (mState != PageState::UNINITIALIZED) {
  810. return ESP_ERR_NVS_INVALID_STATE;
  811. }
  812. mVersion = ver;
  813. return ESP_OK;
  814. }
  815. esp_err_t Page::erase()
  816. {
  817. auto rc = mPartition->erase_range(mBaseAddress, SPI_FLASH_SEC_SIZE);
  818. if (rc != ESP_OK) {
  819. mState = PageState::INVALID;
  820. return rc;
  821. }
  822. mUsedEntryCount = 0;
  823. mErasedEntryCount = 0;
  824. mFirstUsedEntry = INVALID_ENTRY;
  825. mNextFreeEntry = INVALID_ENTRY;
  826. mState = PageState::UNINITIALIZED;
  827. mHashList.clear();
  828. return ESP_OK;
  829. }
  830. esp_err_t Page::markFreeing()
  831. {
  832. if (mState != PageState::FULL && mState != PageState::ACTIVE) {
  833. return ESP_ERR_NVS_INVALID_STATE;
  834. }
  835. return alterPageState(PageState::FREEING);
  836. }
  837. esp_err_t Page::markFull()
  838. {
  839. if (mState != PageState::ACTIVE) {
  840. return ESP_ERR_NVS_INVALID_STATE;
  841. }
  842. return alterPageState(PageState::FULL);
  843. }
  844. size_t Page::getVarDataTailroom() const
  845. {
  846. if (mState == PageState::UNINITIALIZED) {
  847. return CHUNK_MAX_SIZE;
  848. } else if (mState == PageState::FULL) {
  849. return 0;
  850. }
  851. /* Skip one entry for blob data item precessing the data */
  852. return ((mNextFreeEntry < (ENTRY_COUNT-1)) ? ((ENTRY_COUNT - mNextFreeEntry - 1) * ENTRY_SIZE): 0);
  853. }
  854. const char* Page::pageStateToName(PageState ps)
  855. {
  856. switch (ps) {
  857. case PageState::CORRUPT:
  858. return "CORRUPT";
  859. case PageState::ACTIVE:
  860. return "ACTIVE";
  861. case PageState::FREEING:
  862. return "FREEING";
  863. case PageState::FULL:
  864. return "FULL";
  865. case PageState::INVALID:
  866. return "INVALID";
  867. case PageState::UNINITIALIZED:
  868. return "UNINITIALIZED";
  869. default:
  870. assert(0 && "invalid state value");
  871. return "";
  872. }
  873. }
  874. void Page::debugDump() const
  875. {
  876. printf("state=%x (%s) addr=%x seq=%d\nfirstUsed=%d nextFree=%d used=%d erased=%d\n", (uint32_t) mState, pageStateToName(mState), mBaseAddress, mSeqNumber, static_cast<int>(mFirstUsedEntry), static_cast<int>(mNextFreeEntry), mUsedEntryCount, mErasedEntryCount);
  877. size_t skip = 0;
  878. for (size_t i = 0; i < ENTRY_COUNT; ++i) {
  879. printf("%3d: ", static_cast<int>(i));
  880. EntryState state = mEntryTable.get(i);
  881. if (state == EntryState::EMPTY) {
  882. printf("E\n");
  883. } else if (state == EntryState::ERASED) {
  884. printf("X\n");
  885. } else if (state == EntryState::WRITTEN) {
  886. Item item;
  887. readEntry(i, item);
  888. if (skip == 0) {
  889. printf("W ns=%2u type=%2u span=%3u key=\"%s\" chunkIdx=%d len=%d\n", item.nsIndex, static_cast<unsigned>(item.datatype), item.span, item.key, item.chunkIndex, (item.span != 1)?((int)item.varLength.dataSize):-1);
  890. if (item.span > 0 && item.span <= ENTRY_COUNT - i) {
  891. skip = item.span - 1;
  892. } else {
  893. skip = 0;
  894. }
  895. } else {
  896. printf("D\n");
  897. skip--;
  898. }
  899. }
  900. }
  901. }
  902. esp_err_t Page::calcEntries(nvs_stats_t &nvsStats)
  903. {
  904. assert(mState != PageState::FREEING);
  905. nvsStats.total_entries += ENTRY_COUNT;
  906. switch (mState) {
  907. case PageState::UNINITIALIZED:
  908. case PageState::CORRUPT:
  909. nvsStats.free_entries += ENTRY_COUNT;
  910. break;
  911. case PageState::FULL:
  912. case PageState::ACTIVE:
  913. nvsStats.used_entries += mUsedEntryCount;
  914. nvsStats.free_entries += ENTRY_COUNT - mUsedEntryCount; // it's equivalent free + erase entries.
  915. break;
  916. case PageState::INVALID:
  917. return ESP_ERR_INVALID_STATE;
  918. break;
  919. default:
  920. assert(false && "Unhandled state");
  921. break;
  922. }
  923. return ESP_OK;
  924. }
  925. } // namespace nvs