Skip to content

Instantly share code, notes, and snippets.

@Staars
Last active September 7, 2025 04:58
Show Gist options
  • Select an option

  • Save Staars/2b0c4c4fef60b589a5655e9debd3e387 to your computer and use it in GitHub Desktop.

Select an option

Save Staars/2b0c4c4fef60b589a5655e9debd3e387 to your computer and use it in GitHub Desktop.
sha rom functions
#include "t_inner.h"
#if defined(USE_SHA_ROM)
#if defined(ESP_PLATFORM) && !defined(ESP8266)
#include <stdint.h>
#include <stddef.h>
#include <string.h>
#include "freertos/FreeRTOS.h"
#if __has_include("soc/sha_caps.h")
# include "soc/sha_caps.h"
#elif __has_include("soc/soc_caps.h")
# include "soc/soc_caps.h"
#else
# error "No SHA capability header found"
#endif
#if __has_include("esp_rom_sha.h")
# include "esp_rom_sha.h"
#elif __has_include("rom/sha.h")
# include "rom/sha.h"
#else
# error "No ROM SHA header found"
#endif
#define HAVE_ROM_SHA1 (SOC_SHA_SUPPORT_SHA1)
#define HAVE_ROM_SHA224 (SOC_SHA_SUPPORT_SHA224)
#define HAVE_ROM_SHA256 (SOC_SHA_SUPPORT_SHA256)
#define HAVE_ROM_SHA384 (SOC_SHA_SUPPORT_SHA384)
#define HAVE_ROM_SHA512 (SOC_SHA_SUPPORT_SHA512)
#if defined(SOC_SHA_SUPPORT_RESUME)
/* Newer chips: mode passed to init, update has no mode */
#define ROM_SHA_ENABLE() ets_sha_enable()
#define ROM_SHA_DISABLE() ets_sha_disable()
#define ROM_SHA_INIT_MODE(ctx, mode) ets_sha_init((ctx), (mode))
#define ROM_SHA_UPDATE_MODE(ctx, mode, d, l, last) \
ets_sha_update((ctx), (const unsigned char*)(d), (uint32_t)(l), (last))
#define ROM_SHA_FINISH_MODE(ctx, mode, out) ets_sha_finish((ctx), (unsigned char*)(out))
#define ROM_SHA_FINALIZE_MODE(ctx, mode, out) \
do { ROM_SHA_UPDATE_MODE((ctx), mode, NULL, 0, true); ROM_SHA_FINISH_MODE((ctx), mode, (out)); } while (0)
#else
/* Old ESP32: mode passed to update/finish, init takes only ctx */
#define ROM_SHA_ENABLE() ets_sha_enable()
#define ROM_SHA_DISABLE() ets_sha_disable()
#define ROM_SHA_INIT_MODE(ctx, mode) ets_sha_init((ctx))
#define ROM_SHA_UPDATE_MODE(ctx, mode, d, l, last) \
ets_sha_update((ctx), (mode), (const uint8_t*)(d), (size_t)((l) * 8))
#define ROM_SHA_FINISH_MODE(ctx, mode, out) ets_sha_finish((ctx), (mode), (uint8_t*)(out))
#define ROM_SHA_FINALIZE_MODE(ctx, mode, out) \
do { ROM_SHA_UPDATE_MODE((ctx), mode, NULL, 0, true); ROM_SHA_FINISH_MODE((ctx), mode, (out)); } while (0)
#endif
/* Enable periodic SHA finalization every BR_SHA256_FLUSH_SIZE bytes */
#if defined(CONFIG_IDF_TARGET_ESP32C3)
#define BR_SHA256_FLUSH_SIZE 512
#else
#define BR_SHA256_FLUSH_SIZE (((size_t)UINT32_MAX))
#endif
#ifndef BR_SHA256_DEBUG
#define BR_SHA256_DEBUG 0
#endif
#ifndef BR_SHA256_DEBUG_MORE
#define BR_SHA256_DEBUG_MORE 0
#endif
/* One global ROM context; accesses serialized across cores/tasks. */
static SHA_CTX rom_ctx;
static portMUX_TYPE s_sha_mux = portMUX_INITIALIZER_UNLOCKED;
#define SHA_ENTER() portENTER_CRITICAL(&s_sha_mux)
#define SHA_EXIT() portEXIT_CRITICAL(&s_sha_mux)
/* ================================================================
* SHA-1 (ROM path, no save/restore)
* ================================================================ */
#if HAVE_ROM_SHA1
void br_sha1_init(br_sha1_context *cc) {
SHA_ENTER();
cc->vtable = &br_sha1_vtable;
cc->count = 0;
ROM_SHA_ENABLE();
ROM_SHA_INIT_MODE(&rom_ctx, SHA1);
SHA_EXIT();
}
void br_sha1_update(br_sha1_context *cc, const void *data, size_t len) {
if (!len) return;
SHA_ENTER();
ROM_SHA_UPDATE_MODE(&rom_ctx, SHA1, data, len, false);
cc->count += len;
SHA_EXIT();
}
void br_sha1_out(const br_sha1_context *cc, void *out) {
(void)cc;
SHA_ENTER();
ROM_SHA_FINALIZE_MODE(&rom_ctx, SHA1, out);
ROM_SHA_INIT_MODE(&rom_ctx, SHA1);
SHA_EXIT();
}
const br_hash_class br_sha1_vtable PROGMEM = {
sizeof(br_sha1_context),
BR_HASHDESC_ID(br_sha1_ID)
| BR_HASHDESC_OUT(20)
| BR_HASHDESC_LBLEN(6)
| BR_HASHDESC_MD_PADDING
| BR_HASHDESC_MD_PADDING_BE,
(void (*)(const br_hash_class **)) &br_sha1_init,
(void (*)(const br_hash_class **, const void *, size_t)) &br_sha1_update,
(void (*)(const br_hash_class *const *, void *)) &br_sha1_out,
NULL,
NULL
};
#endif
#define S256_SLAB_SIZE 1512 /* big enough for Matter worst-case */
#define S256_SLAB_COUNT 3 /* max concurrent big contexts */
static uint8_t s256_slabs[S256_SLAB_COUNT][S256_SLAB_SIZE];
static void *s256_slab_owner[S256_SLAB_COUNT];
static int s256_rr_next = 0;
static size_t s256_max_requested = 0;
static br_sha256_context *active_ctx = NULL;
static inline int s256_slab_index_from_ptr(const uint8_t *p)
{
for (int i = 0; i < S256_SLAB_COUNT; i++) {
const uint8_t *base = s256_slabs[i];
if ((uintptr_t)p >= (uintptr_t)base && (uintptr_t)p < (uintptr_t)(base + S256_SLAB_SIZE)) return i;
}
return -1;
}
static void s256_claim_slab(void *ctx_owner, uint8_t **buf_out, size_t *cap_out)
{
br_sha256_context *cc = (br_sha256_context *)ctx_owner;
// If context fits entirely in inline buffer and is not requesting more, don't assign a slab
if (cc->replay_len <= S256_INLINE_CAP && cc->count <= S256_INLINE_CAP) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] s256_claim_slab: inline-only context — no slab assigned (len=%u count=%llu cap=%u)\n",
(unsigned)cc->replay_len, (unsigned long long)cc->count, (unsigned)S256_INLINE_CAP);
#endif
*buf_out = cc->replay_inline;
*cap_out = S256_INLINE_CAP;
return;
}
// Case 1: This context already owns a slab — reuse it
for (int i = 0; i < S256_SLAB_COUNT; i++) {
if (s256_slab_owner[i] == ctx_owner) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] s256_claim_slab: reuse slab #%d for ctx=%p\n",
i, ctx_owner);
#endif
*buf_out = s256_slabs[i];
*cap_out = S256_SLAB_SIZE;
return;
}
}
// Case 2: Find a free slab and assign it to this context
for (int i = 0; i < S256_SLAB_COUNT; i++) {
if (s256_slab_owner[i] == NULL) {
s256_slab_owner[i] = ctx_owner;
#if BR_SHA256_DEBUG
ets_printf("[SHA2] s256_claim_slab: assign NEW slab #%d to ctx=%p cap=%u\n",
i, ctx_owner, (unsigned)S256_SLAB_SIZE);
#endif
*buf_out = s256_slabs[i];
*cap_out = S256_SLAB_SIZE;
return;
}
}
// Case 3: No free slabs — evict one in round‑robin order
int victim = s256_rr_next;
s256_rr_next = (s256_rr_next + 1) % S256_SLAB_COUNT;
// Skip self as victim
if (s256_slab_owner[victim] == ctx_owner) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] s256_claim_slab: victim is self, advancing\n");
#endif
victim = s256_rr_next;
s256_rr_next = (s256_rr_next + 1) % S256_SLAB_COUNT;
}
// Prevent eviction of inline-only contexts
br_sha256_context *old_owner = (br_sha256_context *)s256_slab_owner[victim];
if (old_owner && old_owner->replay_len <= S256_INLINE_CAP && old_owner->count <= S256_INLINE_CAP) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] s256_claim_slab: cannot evict inline-only context %p (len=%u count=%llu)\n",
old_owner, (unsigned)old_owner->replay_len, (unsigned long long)old_owner->count);
#endif
// Try next victim
victim = s256_rr_next;
s256_rr_next = (s256_rr_next + 1) % S256_SLAB_COUNT;
}
#if BR_SHA256_DEBUG
ets_printf("[SHA2] s256_claim_slab: EVICT slab #%d old_owner=%p new_owner=%p\n",
victim, s256_slab_owner[victim], ctx_owner);
#endif
s256_slab_owner[victim] = ctx_owner;
*buf_out = s256_slabs[victim];
*cap_out = S256_SLAB_SIZE;
}
#if BR_SHA256_DEBUG
static void dump_hex(const char *tag, const uint8_t *p, size_t n)
{
char buf[3 * 16 + 1];
size_t m = (n > 16) ? 16 : n;
static const char hx[] = "0123456789ABCDEF";
for (size_t i = 0; i < m; i++) {
buf[3 * i + 0] = hx[p[i] >> 4];
buf[3 * i + 1] = hx[p[i] & 15];
buf[3 * i + 2] = (i + 1 < m) ? ' ' : 0;
}
ets_printf("[SHA2] %s: first16=%s\n", tag, buf);
}
#endif
/* ================================================================
* SHA-224/256 common section
* ================================================================ */
#if HAVE_ROM_SHA224 || HAVE_ROM_SHA256
static void rom_sha2_update_common(br_sha224_context *cc, const void *data, size_t len);
void br_sha224_update(br_sha224_context *cc, const void *data, size_t len) {
rom_sha2_update_common(cc, data, len);
}
// br_sha256_update is aliased in t_bearssl_hash.h
static void
ensure_active_ctx(br_sha256_context *cc)
{
// Fast path: already active and valid, nothing to do
if (cc == active_ctx && cc->engine_valid && cc->replay_len == cc->count) {
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] context already active and complete: ctx=%p\n", cc);
#endif
return;
}
const uint8_t *rb = cc->replay_buf;
size_t rl = cc->replay_len;
int mode = (cc->vtable == &br_sha256_vtable) ? SHA2_256 : SHA2_224;
// Cannot rebuild from history
if (!cc->replayable) {
if (cc == active_ctx && cc->engine_valid) {
// Still active in ROM engine — nothing to do
return;
}
#if BR_SHA256_DEBUG
ets_printf("[SHA2] ensure_active_ctx: unreplayable context — skipping restore (ctx=%p)\n", cc);
#endif
return;
}
// Replay buffer shorter than count — truncated history
if (rl < cc->count) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] ensure_active_ctx: truncated replay — skipping restore (ctx=%p, replay_len=%u, count=%llu)\n",
cc, (unsigned)rl, (unsigned long long)cc->count);
#endif
return;
}
// Slab ownership check
int slab_idx = s256_slab_index_from_ptr(rb);
if (slab_idx >= 0 && s256_slab_owner[slab_idx] != cc) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] ensure_active_ctx: lost slab ownership; trying to reclaim\n");
#endif
uint8_t *new_buf; size_t new_cap;
s256_claim_slab(cc, &new_buf, &new_cap);
if (new_buf) {
memcpy(new_buf, rb, rl);
cc->replay_buf = new_buf;
cc->replay_cap = new_cap;
rb = new_buf;
} else {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] ensure_active_ctx: reclaim failed; falling back to inline\n");
#endif
cc->replay_buf = cc->replay_inline;
cc->replay_cap = S256_INLINE_CAP;
if (rl > S256_INLINE_CAP) rl = S256_INLINE_CAP;
cc->replay_len = rl;
rb = cc->replay_inline;
}
#if BR_SHA256_DEBUG_MORE
} else if (slab_idx >= 0) {
ets_printf("[SHA2] ensure_active_ctx: still own slab #%d\n", slab_idx);
#endif
}
// Rebuild engine from replay buffer up to last flush boundary
SHA_ENTER();
ROM_SHA_ENABLE();
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] ensure_active_ctx: init mode=%d vtable=%p ctx=%p\n",
mode, cc->vtable, cc);
#endif
memset(&rom_ctx, 0, sizeof rom_ctx);
ROM_SHA_INIT_MODE(&rom_ctx, mode);
size_t chunks = rl / BR_SHA256_FLUSH_SIZE;
if (chunks > 0) {
size_t bytes = chunks * (size_t)BR_SHA256_FLUSH_SIZE;
ROM_SHA_UPDATE_MODE(&rom_ctx, mode, rb, bytes, false);
}
cc->flush_offset = (uint16_t)chunks;
SHA_EXIT();
active_ctx = cc;
cc->engine_valid = 1;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] context switch: loaded ctx %p flush_offset=%u\n",
cc, (unsigned)cc->flush_offset);
if (rl) dump_hex("replay", cc->replay_buf, 64);
#endif
}
/* Shared ROM-backed SHA-224/SHA-256 update() */
static void rom_sha2_update_common(br_sha224_context *cc, const void *data, size_t len)
{
if (!len) return;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] ENTER update(): cc=%p len=%u\n", cc, (unsigned)len);
#endif
// Ensure engine reflects state up to the last flush boundary (no partial tails)
ensure_active_ctx((br_sha256_context *)cc);
int mode = (cc->vtable == &br_sha256_vtable) ? SHA2_256 : SHA2_224;
// Bump counters and invalidate cached digest first
cc->count += len;
cc->digest_valid = 0;
// Slab ownership check
{
int slab_idx = s256_slab_index_from_ptr(cc->replay_buf);
if (slab_idx >= 0 && s256_slab_owner[slab_idx] != cc) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] rom_sha2_update_common: lost slab ownership; attempting reclaim\n");
#endif
uint8_t *new_buf; size_t new_cap;
s256_claim_slab(cc, &new_buf, &new_cap);
if (new_buf) {
memcpy(new_buf, cc->replay_buf, cc->replay_len);
cc->replay_buf = new_buf;
cc->replay_cap = new_cap;
#if BR_SHA256_DEBUG
ets_printf("[SHA2] rom_sha2_update_common: reclaimed slab; cap=%u\n", (unsigned)new_cap);
#endif
} else {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] rom_sha2_update_common: reclaim failed; falling back to inline\n");
#endif
if (cc->replay_len > S256_INLINE_CAP) {
ets_printf("[SHA2] FATAL: lost slab with replay_len=%u > inline cap=%u\n",
(unsigned)cc->replay_len, (unsigned)S256_INLINE_CAP);
abort();
}
cc->replay_buf = cc->replay_inline;
cc->replay_cap = S256_INLINE_CAP;
}
}
}
size_t need = cc->replay_len + len;
size_t room = (cc->replay_len < cc->replay_cap) ? (cc->replay_cap - cc->replay_len) : 0;
size_t intended = need <= cc->replay_cap ? need : cc->replay_cap;
if (intended > s256_max_requested) {
s256_max_requested = intended;
#if BR_SHA256_DEBUG
ets_printf("[SHA2] new peak requested=%u\n", (unsigned)intended);
#endif
}
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] rom_sha2_update_common: pre-update cap=%u len=%u need=%u room=%u\n",
(unsigned)cc->replay_cap, (unsigned)cc->replay_len,
(unsigned)need, (unsigned)room);
#endif
// Upgrade to slab if needed
if (need > cc->replay_cap && cc->replay_cap == S256_INLINE_CAP) {
uint8_t *new_buf; size_t new_cap;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] rom_sha2_update_common: upgrade triggered (need=%u, cap=%u)\n",
(unsigned)need, (unsigned)cc->replay_cap);
#endif
s256_claim_slab(cc, &new_buf, &new_cap);
if (new_buf) {
memcpy(new_buf, cc->replay_buf, cc->replay_len);
cc->replay_buf = new_buf;
cc->replay_cap = new_cap;
room = cc->replay_cap - cc->replay_len;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] rom_sha2_update_common: upgraded to slab: need=%u, new_cap=%u\n",
(unsigned)need, (unsigned)new_cap);
#endif
} else {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] rom_sha2_update_common: slab pool exhausted — staying inline (cap=%u)\n",
(unsigned)cc->replay_cap);
#endif
}
}
// FLUSH MODE:
// 1) Append data to the replay buffer
size_t rl_prev = cc->replay_len;
room = (cc->replay_len < cc->replay_cap) ? (cc->replay_cap - cc->replay_len) : 0;
if (len <= room) {
memcpy(cc->replay_buf + cc->replay_len, data, len);
cc->replay_len += len;
cc->engine_valid = 1; // engine is current up to flush_offset*FLUSH_SIZE
} else {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] replay buffer overflow — stopping replay (need=%u, cap=%u, room=%u)\n",
(unsigned)(cc->replay_len + len), (unsigned)cc->replay_cap, (unsigned)room);
#endif
if (room) {
memcpy(cc->replay_buf + cc->replay_len, data, room);
cc->replay_len += room;
}
cc->replayable = 0;
cc->engine_valid = 1;
active_ctx = (br_sha256_context *)cc;
}
// 2) Feed only newly completed full flush-size chunks
size_t boundary = (size_t)cc->flush_offset * (size_t)BR_SHA256_FLUSH_SIZE;
while (cc->replay_len >= boundary + BR_SHA256_FLUSH_SIZE) {
const uint8_t *chunk_ptr = cc->replay_buf + boundary;
SHA_ENTER();
ROM_SHA_UPDATE_MODE(&rom_ctx, mode, chunk_ptr, BR_SHA256_FLUSH_SIZE, false);
SHA_EXIT();
cc->flush_offset += 1;
boundary += BR_SHA256_FLUSH_SIZE;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] flush: ctx=%p new flush_offset=%u\n", cc, (unsigned)cc->flush_offset);
#endif
}
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] update(): ctx=%p count=%llu replay_len=%u cap=%u flush_offset=%u\n",
cc, (unsigned long long)cc->count, (unsigned)cc->replay_len,
(unsigned)cc->replay_cap, (unsigned)cc->flush_offset);
dump_hex("update()", cc->replay_buf, 64);
#endif
}
#endif /* HAVE_ROM_SHA224 || HAVE_ROM_SHA256 */
/* ================================================================
* SHA-224 (ROM path, no save/restore)
* ================================================================ */
#if HAVE_ROM_SHA224
void br_sha224_init(br_sha224_context *cc) {
SHA_ENTER();
cc->vtable = &br_sha224_vtable;
cc->count = 0;
ROM_SHA_ENABLE();
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_224);
SHA_EXIT();
}
void br_sha224_out(const br_sha224_context *cc, void *out) {
(void)cc;
uint8_t tmp[32];
SHA_ENTER();
ROM_SHA_FINALIZE_MODE(&rom_ctx, SHA2_224, tmp);
memcpy(out, tmp, 28);
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_224);
SHA_EXIT();
}
const br_hash_class br_sha224_vtable PROGMEM = {
sizeof(br_sha224_context),
BR_HASHDESC_ID(br_sha224_ID)
| BR_HASHDESC_OUT(28)
| BR_HASHDESC_LBLEN(6)
| BR_HASHDESC_MD_PADDING
| BR_HASHDESC_MD_PADDING_BE,
(void (*)(const br_hash_class **)) &br_sha224_init,
(void (*)(const br_hash_class **, const void *, size_t)) &br_sha224_update,
(void (*)(const br_hash_class *const *, void *)) &br_sha224_out,
NULL,
NULL
};
#endif /* HAVE_ROM_SHA224 */
/* ================================================================
* SHA-256
* ================================================================ */
#if HAVE_ROM_SHA256 && defined(SOC_SHA_SUPPORT_RESUME)
#undef br_sha256_init
// #undef br_sha256_update
#undef br_sha256_out
#undef br_sha256_state
#undef br_sha256_set_state
void br_sha256_init(br_sha256_context *cc)
{
SHA_ENTER(); // lock ROM SHA engine
cc->vtable = &br_sha256_vtable;
cc->count = 0;
cc->replay_len = 0;
cc->replay_buf = cc->replay_inline;
cc->replay_cap = S256_INLINE_CAP;
cc->replayable = 1; // mark as replayable at init
cc->digest_valid = 0; // no cached digest yet
cc->flush_offset = 0; // ensure flush tracking starts clean
memset(cc->digest, 0, sizeof cc->digest); // clear cached digest
memset(&rom_ctx, 0, sizeof rom_ctx); // clear any stale state in ROM context struct
ROM_SHA_ENABLE(); // ensure engine clocks are enabled before init
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] init(): init mode=%d ctx=%p\n", SHA2_256, cc);
#endif
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_256); // init ROM SHA engine for SHA-256
SHA_EXIT(); // unlock ROM SHA engine
active_ctx = cc; // mark this context as active
cc->engine_valid = 1; // engine state matches context
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA256] init(): ctx=%p count=%llu flush_offset=%u\n",
cc, (unsigned long long)cc->count, (unsigned)cc->flush_offset);
#endif
}
void br_sha256_out(const br_sha256_context *cc_in, void *out)
{
br_sha256_context *cc = (br_sha256_context *)cc_in;
// Fast path: return cached digest without touching engine/history
if (cc->digest_valid) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] out(): using cached digest ctx=%p\n", cc);
#endif
memcpy(out, cc->digest, 32);
#if BR_SHA256_DEBUG_MORE
dump_hex("out() cached", (const uint8_t *)out, 32);
#endif
return;
}
// Unreplayable: handle hot vs cold safely
if (!cc->replayable) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] out(): unreplayable context ctx=%p\n", cc);
#endif
if (cc == active_ctx && cc->engine_valid) {
// Hot: engine is current — finalize directly
SHA_ENTER();
ROM_SHA_ENABLE();
ROM_SHA_FINALIZE_MODE(&rom_ctx, SHA2_256, cc->digest);
SHA_EXIT();
#if BR_SHA256_DEBUG_MORE
dump_hex("out() unreplayable-hot", (const uint8_t *)cc->digest, 32);
#endif
} else {
// Cold: cannot rebuild — use cached digest if any, else zero-fill
if (!cc->digest_valid) {
#if BR_SHA256_DEBUG
ets_printf("[SHA2] out(): cold unreplayable with no cached digest — returning zeros\n");
#endif
memset(cc->digest, 0, sizeof cc->digest);
}
}
memcpy(out, cc->digest, 32);
cc->digest_valid = 1;
cc->engine_valid = 0;
active_ctx = NULL;
return;
}
// Normal path: rebuild engine to last flush boundary if needed
ensure_active_ctx(cc);
size_t start = (size_t)cc->flush_offset * (size_t)BR_SHA256_FLUSH_SIZE;
size_t tail = (cc->replay_len > start) ? (cc->replay_len - start) : 0;
// Feed any unflushed tail and finalize
SHA_ENTER();
ROM_SHA_ENABLE();
if (tail) {
ROM_SHA_UPDATE_MODE(&rom_ctx, SHA2_256, cc->replay_buf + start, tail, false);
}
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] out(): finalising mode=%d ctx=%p tail=%u\n",
SHA2_256, cc, (unsigned)tail);
#endif
ROM_SHA_FINALIZE_MODE(&rom_ctx, SHA2_256, cc->digest);
SHA_EXIT();
memcpy(out, cc->digest, 32);
cc->digest_valid = 1;
cc->engine_valid = 0;
active_ctx = NULL;
#if BR_SHA256_DEBUG_MORE
dump_hex("out()", (const uint8_t *)out, 32);
ets_printf("[SHA256] digest: ctx=%p count=%llu replay_len=%u flush_offset=%u\n",
cc, (unsigned long long)cc->count,
(unsigned)cc->replay_len, (unsigned)cc->flush_offset);
#endif
}
uint64_t br_sha256_state(const br_sha256_context *cc, void *dst)
{
br_sha256_context *c = (br_sha256_context *)cc; // cast away const for internal checks
if (!c->replayable) { // unreplayable context — return cached digest instead
#if BR_SHA256_DEBUG
ets_printf("[SHA256] state(): using cached digest for unreplayable context ctx=%p\n", c);
#endif
memcpy(dst, c->digest, sizeof c->digest); // copy finalized digest
return sizeof c->digest; // length of digest
}
// If our replay buffer is in a slab but owned by someone else, try to reclaim it
int slab_idx = s256_slab_index_from_ptr(c->replay_buf); // -1 if not slab-backed
if (slab_idx >= 0 && s256_slab_owner[slab_idx] != c) { // lost slab ownership
#if BR_SHA256_DEBUG
ets_printf("[SHA256] state(): lost slab ownership; attempting reclaim\n");
#endif
uint8_t *new_buf; size_t new_cap;
s256_claim_slab(c, &new_buf, &new_cap); // try to get a slab back
if (new_buf) { // slab reclaimed
memcpy(new_buf, c->replay_buf, c->replay_len); // preserve full replay
c->replay_buf = new_buf;
c->replay_cap = new_cap;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA256] state(): reclaimed slab; cap=%u\n", (unsigned)new_cap);
#endif
} else { // no slab available
#if BR_SHA256_DEBUG
ets_printf("[SHA256] state(): reclaim failed; staying inline\n");
#endif
if (c->replay_len > S256_INLINE_CAP) { // inline too small for current history
ets_printf("[SHA256] FATAL: lost slab with replay_len=%u > inline cap=%u\n",
(unsigned)c->replay_len, (unsigned)S256_INLINE_CAP);
abort(); // avoid silent truncation
}
c->replay_buf = c->replay_inline;
c->replay_cap = S256_INLINE_CAP;
}
}
size_t safe_len = (c->replay_len <= c->replay_cap) ? c->replay_len : c->replay_cap; // clamp to cap
memcpy(dst, c->replay_buf, safe_len); // copy replay buffer into dst
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA256] state(): ctx=%p count=%llu replay_len=%u (clamped=%u)\n",
c, (unsigned long long)c->count, (unsigned)c->replay_len, (unsigned)safe_len);
dump_hex("state buffer", c->replay_buf, 64);
#endif
return safe_len; // return number of bytes copied
}
void br_sha256_set_state(br_sha256_context *cc, const void *src, uint64_t count)
{
cc->count = count; // set total byte count
cc->replayable = 1; // assume replayable until proven otherwise
cc->digest_valid = 0; // no cached digest after restoring state
cc->engine_valid = 0; // force ensure_active_ctx() to rebuild engine
memset(cc->digest, 0, sizeof cc->digest); // clear cached digest
cc->replay_buf = cc->replay_inline; // start with inline buffer
cc->replay_cap = S256_INLINE_CAP; // inline capacity
size_t copy_len = (count <= cc->replay_cap)
? (size_t)count
: (size_t)cc->replay_cap; // bytes to copy initially
if ((size_t)count > cc->replay_cap) { // state won't fit inline, try to upgrade to slab
uint8_t *new_buf; size_t new_cap;
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA2] br_sha256_set_state: upgrade triggered (requested=%u, cap=%u)\n",
(unsigned)count, (unsigned)cc->replay_cap);
#endif
s256_claim_slab(cc, &new_buf, &new_cap);
if (new_buf) { // upgrade succeeded
cc->replay_buf = new_buf;
cc->replay_cap = new_cap;
copy_len = (count <= new_cap) ? (size_t)count : new_cap;
#if BR_SHA256_DEBUG
ets_printf("[SHA2] br_sha256_set_state: upgraded to slab: requested=%u, old_cap=%u, new_cap=%u\n",
(unsigned)count, (unsigned)S256_INLINE_CAP, (unsigned)new_cap);
#endif
} else { // no slab available
#if BR_SHA256_DEBUG
ets_printf("[SHA2] br_sha256_set_state: slab pool exhausted — staying inline (cap=%u)\n",
(unsigned)cc->replay_cap);
#endif
if (count > cc->replay_cap) {
// Can't fit full state — mark unreplayable and clamp
cc->replayable = 0;
copy_len = cc->replay_cap;
}
}
}
memcpy(cc->replay_buf, src, copy_len);
cc->replay_len = copy_len;
cc->flush_offset = (uint16_t)(cc->replay_len / BR_SHA256_FLUSH_SIZE);
// If we truncated the state, mark unreplayable
if (copy_len < count) {
cc->replayable = 0;
}
// Only rebuild if we have the full history
if (cc->replayable) {
ensure_active_ctx(cc);
}
#if BR_SHA256_DEBUG_MORE
ets_printf("[SHA256] set_state(): replayed %u bytes, flush_offset=%u, replayable=%d\n",
(unsigned)copy_len, (unsigned)cc->flush_offset, cc->replayable);
dump_hex("set_state() replayed", cc->replay_buf, 64);
#endif
}
const br_hash_class br_sha256_vtable = {
sizeof(br_sha256_context),
BR_HASHDESC_ID(br_sha256_ID)
| BR_HASHDESC_OUT(32)
| BR_HASHDESC_STATE(192)
| BR_HASHDESC_LBLEN(6)
| BR_HASHDESC_MD_PADDING
| BR_HASHDESC_MD_PADDING_BE,
(void (*)(const br_hash_class **)) &br_sha256_init,
(void (*)(const br_hash_class **, const void *, size_t)) &rom_sha2_update_common,
(void (*)(const br_hash_class *const *, void *)) &br_sha256_out,
(uint64_t (*)(const br_hash_class *const *, void *)) &br_sha256_state,
(void (*)(const br_hash_class **, const void *, uint64_t)) &br_sha256_set_state
};
#endif /* HAVE_ROM_SHA256 && defined(SOC_SHA_SUPPORT_RESUME) */
/* ================================================================
* SHA-384 (ROM path, no save/restore)
* ================================================================ */
#if HAVE_ROM_SHA384
void br_sha384_init(br_sha384_context *cc) {
SHA_ENTER();
cc->vtable = &br_sha384_vtable;
cc->count = 0;
ROM_SHA_ENABLE();
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_384);
SHA_EXIT();
}
void br_sha384_update(br_sha384_context *cc, const void *data, size_t len) {
if (!len) return;
SHA_ENTER();
ROM_SHA_UPDATE_MODE(&rom_ctx, SHA2_384, data, len, false);
cc->count += len;
SHA_EXIT();
}
void br_sha384_out(const br_sha384_context *cc, void *out) {
(void)cc;
uint8_t tmp[64];
SHA_ENTER();
ROM_SHA_FINALIZE_MODE(&rom_ctx, SHA2_384, tmp);
memcpy(out, tmp, 48);
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_384);
SHA_EXIT();
}
const br_hash_class br_sha384_vtable PROGMEM = {
sizeof(br_sha384_context),
BR_HASHDESC_ID(br_sha384_ID)
| BR_HASHDESC_OUT(48)
| BR_HASHDESC_LBLEN(7)
| BR_HASHDESC_MD_PADDING
| BR_HASHDESC_MD_PADDING_BE,
(void (*)(const br_hash_class **)) &br_sha384_init,
(void (*)(const br_hash_class **, const void *, size_t)) &br_sha384_update,
(void (*)(const br_hash_class *const *, void *)) &br_sha384_out,
NULL,
NULL
};
#endif
/* ================================================================
* SHA-512 (ROM path, no save/restore)
* ================================================================ */
#if HAVE_ROM_SHA512
#undef br_sha512_update
void br_sha512_init(br_sha512_context *cc) {
SHA_ENTER();
cc->vtable = &br_sha512_vtable;
cc->count = 0;
ROM_SHA_ENABLE();
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_512);
SHA_EXIT();
}
void br_sha512_update(br_sha512_context *cc, const void *data, size_t len) {
if (!len) return;
SHA_ENTER();
ROM_SHA_UPDATE_MODE(&rom_ctx, SHA2_512, data, len, false);
cc->count += len;
SHA_EXIT();
}
void br_sha512_out(const br_sha512_context *cc, void *out) {
(void)cc;
SHA_ENTER();
ROM_SHA_FINALIZE_MODE(&rom_ctx, SHA2_512, out);
ROM_SHA_INIT_MODE(&rom_ctx, SHA2_512);
SHA_EXIT();
}
const br_hash_class br_sha512_vtable PROGMEM = {
sizeof(br_sha512_context),
BR_HASHDESC_ID(br_sha512_ID)
| BR_HASHDESC_OUT(64)
| BR_HASHDESC_LBLEN(7)
| BR_HASHDESC_MD_PADDING
| BR_HASHDESC_MD_PADDING_BE,
(void (*)(const br_hash_class **)) &br_sha512_init,
(void (*)(const br_hash_class **, const void *, size_t)) &br_sha512_update,
(void (*)(const br_hash_class *const *, void *)) &br_sha512_out,
NULL,
NULL
};
#endif
#else
/* ===== ESP8266 - leave it unchanged ===== */
#endif
#endif //USE_SHA_ROM
/* needs this in t_bearssl_hash.h
typedef struct {
const br_hash_class *vtable;
#ifndef BR_DOXYGEN_IGNORE
#ifdef USE_SHA_ROM
unsigned char buf[0]; // would be wasted space with ROM functions
#else
unsigned char buf[64];
#endif
uint64_t count;
uint32_t val[8];
#endif
#if defined(USE_SHA_ROM)
#define S256_INLINE_CAP 192
uint8_t digest[32];
unsigned char replay_inline[S256_INLINE_CAP];
unsigned char *replay_buf;
size_t replay_len;
size_t replay_cap;
uint16_t flush_offset;
uint8_t replayable;
uint8_t digest_valid;
uint8_t engine_valid;
#endif // USE_SHA_ROM
} br_sha224_context;
*/
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment