Skip to content

Instantly share code, notes, and snippets.

@magik6k
Created November 23, 2022 20:21
Show Gist options
  • Save magik6k/f347c690ab06852f7db4285eb3f77cf6 to your computer and use it in GitHub Desktop.
Save magik6k/f347c690ab06852f7db4285eb3f77cf6 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
Only in b/vendor/github.com: armon
diff -r --color a/vendor/github.com/bep/debounce/README.md b/vendor/github.com/bep/debounce/README.md
3c3
< [![Build Status](https://travis-ci.org/bep/debounce.svg)](https://travis-ci.org/bep/debounce)
---
> [![Tests on Linux, MacOS and Windows](https://github.com/bep/debounce/workflows/Test/badge.svg)](https://github.com/bep/debounce/actions?query=workflow:Test)
Only in a/vendor/github.com/bep/debounce: .travis.yml
Only in b/vendor/github.com: boltdb
Only in a/vendor/github.com: cheekybits
diff -r --color a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
178a179,183
> // Connected returns whether conn is connected
> func (c *Conn) Connected() bool {
> return c.sysconn.Connected() && c.sigconn.Connected()
> }
>
diff -r --color a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
419a420,442
> // GetUnitByPID returns the unit object path of the unit a process ID
> // belongs to. It takes a UNIX PID and returns the object path. The PID must
> // refer to an existing system process
> func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) {
> var result dbus.ObjectPath
>
> err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result)
>
> return result, err
> }
>
> // GetUnitNameByPID returns the name of the unit a process ID belongs to. It
> // takes a UNIX PID and returns the object path. The PID must refer to an
> // existing system process
> func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) {
> path, err := c.GetUnitByPID(ctx, pid)
> if err != nil {
> return "", err
> }
>
> return unitName(path), nil
> }
>
829a853,863
> }
>
> // Freeze the cgroup associated with the unit.
> // Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2.
> func (c *Conn) FreezeUnit(ctx context.Context, unit string) error {
> return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store()
> }
>
> // Unfreeze the cgroup associated with the unit.
> func (c *Conn) ThawUnit(ctx context.Context, unit string) error {
> return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store()
diff -r --color a/vendor/github.com/daaku/go.zipexe/zipexe.go b/vendor/github.com/daaku/go.zipexe/zipexe.go
128,129c128,131
< if zfile, err := zip.NewReader(sect, int64(sect.Size)); err == nil {
< return zfile, nil
---
> if sect.ReaderAt != nil {
> if zfile, err := zip.NewReader(sect, int64(sect.Size)); err == nil {
> return zfile, nil
> }
diff -r --color a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h
59a60,61
> #elif defined(__ICCARM__)
> # include <intrinsics.h>
165c167,169
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
> # elif defined(__ICCARM__) /* IAR Intrinsic */
> return 31 - __CLZ(val);
242a247
> assert(bitC->ptr <= bitC->endPtr);
245d249
< assert(bitC->ptr <= bitC->endPtr);
258a263
> assert(bitC->ptr <= bitC->endPtr);
diff -r --color a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h
26c26
< #if defined(__GNUC__)
---
> #if defined(__GNUC__) || defined(__ICCARM__)
63a64,70
> /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
> #if defined(__GNUC__)
> # define UNUSED_ATTR __attribute__((unused))
> #else
> # define UNUSED_ATTR
> #endif
>
68c75
< # ifdef __GNUC__
---
> # if defined(__GNUC__) || defined(__ICCARM__)
79c86
< #if defined(__GNUC__)
---
> #if defined(__GNUC__) || defined(__ICCARM__)
130c137,138
< /* vectorization */
---
> /* vectorization
> * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
132c140,144
< # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
---
> # if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
> # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
> # else
> # define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
> # endif
diff -r --color a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c
641,642c641,642
< "training on sources at least 10x, and up to 100x the "
< "size of the dictionary!\n", (U32)maxDictSize,
---
> "training on sources at least 10x, and preferably 100x "
> "the size of the dictionary! \n", (U32)maxDictSize,
922,923c922,926
< if (!dict) {
< return;
---
> if (dict) {
> memcpy(best->dict, dict, dictSize);
> best->dictSize = dictSize;
> best->parameters = parameters;
> best->compressedSize = compressedSize;
925,928d927
< memcpy(best->dict, dict, dictSize);
< best->dictSize = dictSize;
< best->parameters = parameters;
< best->compressedSize = compressedSize;
diff -r --color a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c
54a55
> #ifndef CHECK_F
55a57
> #endif
diff -r --color a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h
311c311
< #define FSE_BLOCKBOUND(size) (size + (size>>7))
---
> #define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
diff -r --color a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c
63a64
> #ifndef CHECK_F
64a66
> #endif
diff -r --color a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h
49a50,122
> /* detects whether we are being compiled under msan */
> #if defined (__has_feature)
> # if __has_feature(memory_sanitizer)
> # define MEMORY_SANITIZER 1
> # endif
> #endif
>
> #if defined (MEMORY_SANITIZER)
> /* Not all platforms that support msan provide sanitizers/msan_interface.h.
> * We therefore declare the functions we need ourselves, rather than trying to
> * include the header file... */
>
> #include <stdint.h> /* intptr_t */
>
> /* Make memory region fully initialized (without changing its contents). */
> void __msan_unpoison(const volatile void *a, size_t size);
>
> /* Make memory region fully uninitialized (without changing its contents).
> This is a legacy interface that does not update origin information. Use
> __msan_allocated_memory() instead. */
> void __msan_poison(const volatile void *a, size_t size);
>
> /* Returns the offset of the first (at least partially) poisoned byte in the
> memory range, or -1 if the whole range is good. */
> intptr_t __msan_test_shadow(const volatile void *x, size_t size);
> #endif
>
> /* detects whether we are being compiled under asan */
> #if defined (__has_feature)
> # if __has_feature(address_sanitizer)
> # define ADDRESS_SANITIZER 1
> # endif
> #elif defined(__SANITIZE_ADDRESS__)
> # define ADDRESS_SANITIZER 1
> #endif
>
> #if defined (ADDRESS_SANITIZER)
> /* Not all platforms that support asan provide sanitizers/asan_interface.h.
> * We therefore declare the functions we need ourselves, rather than trying to
> * include the header file... */
>
> /**
> * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
> *
> * This memory must be previously allocated by your program. Instrumented
> * code is forbidden from accessing addresses in this region until it is
> * unpoisoned. This function is not guaranteed to poison the entire region -
> * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
> * alignment restrictions.
> *
> * \note This function is not thread-safe because no two threads can poison or
> * unpoison memory in the same memory region simultaneously.
> *
> * \param addr Start of memory region.
> * \param size Size of memory region. */
> void __asan_poison_memory_region(void const volatile *addr, size_t size);
>
> /**
> * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
> *
> * This memory must be previously allocated by your program. Accessing
> * addresses in this region is allowed until this region is poisoned again.
> * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
> * to ASan alignment restrictions.
> *
> * \note This function is not thread-safe because no two threads can
> * poison or unpoison memory in the same memory region simultaneously.
> *
> * \param addr Start of memory region.
> * \param size Size of memory region. */
> void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
> #endif
>
105c178
< # elif defined(__INTEL_COMPILER) || defined(__GNUC__)
---
> # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
diff -r --color a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c
130,132c130,136
< (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
< (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
< (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
---
> {
> int error = 0;
> error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
> error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
> error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
> if (error) { POOL_free(ctx); return NULL; }
> }
diff -r --color a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md
3c3,4
< [C Zstd Homepage](https://github.com/Cyan4973/zstd)
---
> [![CircleCI](https://circleci.com/gh/DataDog/zstd/tree/1.x.svg?style=svg)](https://circleci.com/gh/DataDog/zstd/tree/1.x)
> [![GoDoc](https://godoc.org/github.com/DataDog/zstd?status.svg)](https://godoc.org/github.com/DataDog/zstd)
5,6c6,10
< The current headers and C files are from *v1.4.1* (Commit
< [52181f8](https://github.com/facebook/zstd/releases/tag/v1.4.1)).
---
>
> [C Zstd Homepage](https://github.com/facebook/zstd)
>
> The current headers and C files are from *v1.4.4* (Commit
> [10f0e699](https://github.com/facebook/zstd/releases/tag/v1.4.4)).
diff -r --color a/vendor/github.com/DataDog/zstd/threading.c b/vendor/github.com/DataDog/zstd/threading.c
16a17,18
> #include "threading.h"
>
31d32
< #include "threading.h"
75a77,120
>
> #if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
>
> #include <stdlib.h>
>
> int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
> {
> *mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));
> if (!*mutex)
> return 1;
> return pthread_mutex_init(*mutex, attr);
> }
>
> int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
> {
> if (!*mutex)
> return 0;
> {
> int const ret = pthread_mutex_destroy(*mutex);
> free(*mutex);
> return ret;
> }
> }
>
> int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
> {
> *cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));
> if (!*cond)
> return 1;
> return pthread_cond_init(*cond, attr);
> }
>
> int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
> {
> if (!*cond)
> return 0;
> {
> int const ret = pthread_cond_destroy(*cond);
> free(*cond);
> return ret;
> }
> }
>
> #endif
diff -r --color a/vendor/github.com/DataDog/zstd/threading.h b/vendor/github.com/DataDog/zstd/threading.h
15a16,17
> #include "debug.h"
>
78c80
< #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
---
> #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
81a84,85
> #if DEBUGLEVEL < 1
>
97a102,128
>
> #else /* DEBUGLEVEL >= 1 */
>
> /* Debug implementation of threading.
> * In this implementation we use pointers for mutexes and condition variables.
> * This way, if we forget to init/destroy them the program will crash or ASAN
> * will report leaks.
> */
>
> #define ZSTD_pthread_mutex_t pthread_mutex_t*
> int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
> int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
> #define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a))
> #define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a))
>
> #define ZSTD_pthread_cond_t pthread_cond_t*
> int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
> int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
> #define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b))
> #define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a))
> #define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a))
>
> #define ZSTD_pthread_t pthread_t
> #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
> #define ZSTD_pthread_join(a, b) pthread_join((a),(b))
>
> #endif
diff -r --color a/vendor/github.com/DataDog/zstd/travis_test_32.sh b/vendor/github.com/DataDog/zstd/travis_test_32.sh
6,7c6,7
< wget -q https://dl.google.com/go/go1.11.1.linux-386.tar.gz
< tar -C /usr/local -xzf go1.11.1.linux-386.tar.gz
---
> wget -q https://dl.google.com/go/go1.13.linux-386.tar.gz
> tar -C /usr/local -xzf go1.13.linux-386.tar.gz
15d14
< cd zstd
Only in a/vendor/github.com/DataDog/zstd: .travis.yml
diff -r --color a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c
56c56,57
< (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
---
> (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
> defined(__ICCARM__)
123c124
< #if defined(__GNUC__)
---
> #if defined(__GNUC__) || defined(__ICCARM__)
208a210,213
> #if defined(__ICCARM__)
> # include <intrinsics.h>
> # define XXH_rotl32(x,r) __ROR(x,(32 - r))
> #else
209a215
> #endif
diff -r --color a/vendor/github.com/DataDog/zstd/zbuff.h b/vendor/github.com/DataDog/zstd/zbuff.h
39,42c39,43
< it is generally possible to disable them,
< typically with -Wno-deprecated-declarations for gcc
< or _CRT_SECURE_NO_WARNINGS in Visual.
< Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
---
> * it is generally possible to disable them,
> * typically with -Wno-deprecated-declarations for gcc
> * or _CRT_SECURE_NO_WARNINGS in Visual.
> * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS
> */
48c49
< # elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
---
> # elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
diff -r --color a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c
574c574
< size_t p=0;;
---
> size_t p=0;
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_compress.c b/vendor/github.com/DataDog/zstd/zstd_compress.c
23a24,25
> #include "zstd_compress_sequences.h"
> #include "zstd_compress_literals.h"
43d44
< void* dictBuffer;
46,47c47,48
< void* workspace;
< size_t workspaceSize;
---
> U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
> ZSTD_cwksp workspace;
51a53
> int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
85c87,88
< ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
---
> ZSTD_cwksp ws;
> ZSTD_CCtx* cctx;
88c91,98
< memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */
---
> ZSTD_cwksp_init(&ws, workspace, workspaceSize);
>
> cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
> if (cctx == NULL) {
> return NULL;
> }
> memset(cctx, 0, sizeof(ZSTD_CCtx));
> ZSTD_cwksp_move(&cctx->workspace, &ws);
90,91d99
< cctx->workSpace = (void*)(cctx+1);
< cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
94,101c102,106
< if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
< assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
< cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
< cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
< {
< void* const ptr = cctx->blockState.nextCBlock + 1;
< cctx->entropyWorkspace = (U32*)ptr;
< }
---
> if (!ZSTD_cwksp_check_available(&cctx->workspace, HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
> cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
> cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
> cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(
> &cctx->workspace, HUF_WORKSPACE_SIZE);
129d133
< ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
133a138
> ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
141,142c146,152
< ZSTD_freeCCtxContent(cctx);
< ZSTD_free(cctx, cctx->customMem);
---
> {
> int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
> ZSTD_freeCCtxContent(cctx);
> if (!cctxInWorkspace) {
> ZSTD_free(cctx, cctx->customMem);
> }
> }
161c171,173
< return sizeof(*cctx) + cctx->workSpaceSize
---
> /* cctx may be in the workspace */
> return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
> + ZSTD_cwksp_sizeof(&cctx->workspace)
229a242
> assert(!ZSTD_checkCParams(params.cParams));
233d245
< assert(!ZSTD_checkCParams(params.cParams));
240c252
< ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
---
> const ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
242c254,255
< ZSTD_CCtx_params ret = cctxParams;
---
> ZSTD_CCtx_params ret = *cctxParams;
> assert(!ZSTD_checkCParams(params.cParams));
246d258
< assert(!ZSTD_checkCParams(params.cParams));
379c391
< bounds.upperBound = ZSTD_dictForceCopy; /* note : how to ensure at compile time that this is the highest value enum ? */
---
> bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
392a405,409
> case ZSTD_c_srcSizeHint:
> bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
> bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
> return bounds;
>
400,411d416
< /* ZSTD_cParam_withinBounds:
< * @return 1 if value is within cParam bounds,
< * 0 otherwise */
< static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
< {
< ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
< if (ZSTD_isError(bounds.error)) return 0;
< if (value < bounds.lowerBound) return 0;
< if (value > bounds.upperBound) return 0;
< return 1;
< }
<
460a466
> case ZSTD_c_srcSizeHint:
506a513
> case ZSTD_c_srcSizeHint:
530c537
< if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
---
> if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
537c544
< CCtxParams->cParams.windowLog = value;
---
> CCtxParams->cParams.windowLog = (U32)value;
543c550
< CCtxParams->cParams.hashLog = value;
---
> CCtxParams->cParams.hashLog = (U32)value;
549c556
< CCtxParams->cParams.chainLog = value;
---
> CCtxParams->cParams.chainLog = (U32)value;
555,556c562,563
< CCtxParams->cParams.searchLog = value;
< return value;
---
> CCtxParams->cParams.searchLog = (U32)value;
> return (size_t)value;
686a694,699
> case ZSTD_c_srcSizeHint :
> if (value!=0) /* 0 ==> default */
> BOUNDCHECK(ZSTD_c_srcSizeHint, value);
> CCtxParams->srcSizeHint = value;
> return CCtxParams->srcSizeHint;
>
791a805,807
> case ZSTD_c_srcSizeHint :
> *value = (int)CCtxParams->srcSizeHint;
> break;
1042c1058,1062
< ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
---
> ZSTD_compressionParameters cParams;
> if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
> srcSizeHint = CCtxParams->srcSizeHint;
> }
> cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
1062,1065c1082,1094
< size_t const h3Size = ((size_t)1) << hashLog3;
< size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
< size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
< + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
---
> size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
> /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
> * surrounded by redzones in ASAN. */
> size_t const tableSpace = chainSize * sizeof(U32)
> + hSize * sizeof(U32)
> + h3Size * sizeof(U32);
> size_t const optPotentialSpace =
> ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
> + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
> + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
> + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
> + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
> + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
1082,1084c1111,1115
< size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
< size_t const entropySpace = HUF_WORKSPACE_SIZE;
< size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
---
> size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
> + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
> + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
> size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);
> size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
1088c1119
< size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
---
> size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq));
1091a1123
> size_t const cctxSpace = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx));
1093,1095c1125,1127
< DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
< DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
< return sizeof(ZSTD_CCtx) + neededSpace;
---
> DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)cctxSpace);
> DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
> return cctxSpace + neededSpace;
1131c1163,1164
< size_t const streamingSize = inBuffSize + outBuffSize;
---
> size_t const streamingSize = ZSTD_cwksp_alloc_size(inBuffSize)
> + ZSTD_cwksp_alloc_size(outBuffSize);
1199,1209d1231
<
<
< static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
< ZSTD_compressionParameters cParams2)
< {
< return (cParams1.hashLog == cParams2.hashLog)
< & (cParams1.chainLog == cParams2.chainLog)
< & (cParams1.strategy == cParams2.strategy) /* opt parser space */
< & ((cParams1.minMatch==3) == (cParams2.minMatch==3)); /* hashlog3 space */
< }
<
1224,1288d1245
< /** The parameters are equivalent if ldm is not enabled in both sets or
< * all the parameters are equivalent. */
< static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
< ldmParams_t ldmParams2)
< {
< return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
< (ldmParams1.enableLdm == ldmParams2.enableLdm &&
< ldmParams1.hashLog == ldmParams2.hashLog &&
< ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
< ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
< ldmParams1.hashRateLog == ldmParams2.hashRateLog);
< }
<
< typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
<
< /* ZSTD_sufficientBuff() :
< * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
< * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
< static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
< size_t maxNbLit1,
< ZSTD_buffered_policy_e buffPol2,
< ZSTD_compressionParameters cParams2,
< U64 pledgedSrcSize)
< {
< size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
< size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
< size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
< size_t const maxNbLit2 = blockSize2;
< size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
< DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
< (U32)neededBufferSize2, (U32)bufferSize1);
< DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
< (U32)maxNbSeq2, (U32)maxNbSeq1);
< DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
< (U32)maxNbLit2, (U32)maxNbLit1);
< return (maxNbLit2 <= maxNbLit1)
< & (maxNbSeq2 <= maxNbSeq1)
< & (neededBufferSize2 <= bufferSize1);
< }
<
< /** Equivalence for resetCCtx purposes */
< static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
< ZSTD_CCtx_params params2,
< size_t buffSize1,
< size_t maxNbSeq1, size_t maxNbLit1,
< ZSTD_buffered_policy_e buffPol2,
< U64 pledgedSrcSize)
< {
< DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
< if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
< DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
< return 0;
< }
< if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
< DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
< return 0;
< }
< if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
< params2.cParams, pledgedSrcSize)) {
< DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
< return 0;
< }
< return 1;
< }
<
1314,1341c1271,1279
< /*! ZSTD_continueCCtx() :
< * reuse CCtx without reset (note : requires no dictionary) */
< static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
< {
< size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
< size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
< DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
<
< cctx->blockSize = blockSize; /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
< cctx->appliedParams = params;
< cctx->blockState.matchState.cParams = params.cParams;
< cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
< cctx->consumedSrcSize = 0;
< cctx->producedCSize = 0;
< if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
< cctx->appliedParams.fParams.contentSizeFlag = 0;
< DEBUGLOG(4, "pledged content size : %u ; flag : %u",
< (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
< cctx->stage = ZSTDcs_init;
< cctx->dictID = 0;
< if (params.ldmParams.enableLdm)
< ZSTD_window_clear(&cctx->ldmState.window);
< ZSTD_referenceExternalSequences(cctx, NULL, 0);
< ZSTD_invalidateMatchState(&cctx->blockState.matchState);
< ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
< XXH64_reset(&cctx->xxhState, 0);
< return 0;
< }
---
> /**
> * Indicates whether this compression proceeds directly from user-provided
> * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
> * whether the context needs to buffer the input/output (ZSTDb_buffered).
> */
> typedef enum {
> ZSTDb_not_buffered,
> ZSTDb_buffered
> } ZSTD_buffered_policy_e;
1343c1281,1301
< typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
---
> /**
> * Controls, for this matchState reset, whether the tables need to be cleared /
> * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
> * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
> * subsequent operation will overwrite the table space anyways (e.g., copying
> * the matchState contents in from a CDict).
> */
> typedef enum {
> ZSTDcrp_makeClean,
> ZSTDcrp_leaveDirty
> } ZSTD_compResetPolicy_e;
>
> /**
> * Controls, for this matchState reset, whether indexing can continue where it
> * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
> * (ZSTDirp_reset).
> */
> typedef enum {
> ZSTDirp_continue,
> ZSTDirp_reset
> } ZSTD_indexResetPolicy_e;
1345c1303,1306
< typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e;
---
> typedef enum {
> ZSTD_resetTarget_CDict,
> ZSTD_resetTarget_CCtx
> } ZSTD_resetTarget_e;
1347c1308
< static void*
---
> static size_t
1349c1310
< void* ptr,
---
> ZSTD_cwksp* ws,
1351c1312,1314
< ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho)
---
> const ZSTD_compResetPolicy_e crp,
> const ZSTD_indexResetPolicy_e forceResetIndex,
> const ZSTD_resetTarget_e forWho)
1356,1357c1319
< size_t const h3Size = ((size_t)1) << hashLog3;
< size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
---
> size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
1359c1321,1328
< assert(((size_t)ptr & 3) == 0);
---
> DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
> if (forceResetIndex == ZSTDirp_reset) {
> memset(&ms->window, 0, sizeof(ms->window));
> ms->window.dictLimit = 1; /* start from 1, so that 1st position is valid */
> ms->window.lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
> ms->window.nextSrc = ms->window.base + 1; /* see issue #1241 */
> ZSTD_cwksp_mark_tables_dirty(ws);
> }
1362,1365c1331
< memset(&ms->window, 0, sizeof(ms->window));
< ms->window.dictLimit = 1; /* start from 1, so that 1st position is valid */
< ms->window.lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
< ms->window.nextSrc = ms->window.base + 1; /* see issue #1241 */
---
>
1367a1334,1351
> assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
>
> ZSTD_cwksp_clear_tables(ws);
>
> DEBUGLOG(5, "reserving table space");
> /* table Space */
> ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
> ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
> ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
> RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
> "failed a workspace allocation in ZSTD_reset_matchState");
>
> DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
> if (crp!=ZSTDcrp_leaveDirty) {
> /* reset tables only */
> ZSTD_cwksp_clean_tables(ws);
> }
>
1371,1379c1355,1360
< ms->opt.litFreq = (unsigned*)ptr;
< ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
< ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
< ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
< ptr = ms->opt.offCodeFreq + (MaxOff+1);
< ms->opt.matchTable = (ZSTD_match_t*)ptr;
< ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
< ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
< ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
---
> ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
> ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
> ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
> ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
> ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
> ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
1382,1390d1362
< /* table Space */
< DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
< assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
< if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */
< ms->hashTable = (U32*)(ptr);
< ms->chainTable = ms->hashTable + hSize;
< ms->hashTable3 = ms->chainTable + chainSize;
< ptr = ms->hashTable3 + h3Size;
<
1393,1394c1365,1368
< assert(((size_t)ptr & 3) == 0);
< return ptr;
---
> RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
> "failed a workspace allocation in ZSTD_reset_matchState");
>
> return 0;
1410,1416d1383
< #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
< #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large
< * during at least this number of times,
< * context's memory usage is considered wasteful,
< * because it's sized to handle a worst case scenario which rarely happens.
< * In which case, resize it down to free some memory */
<
1424a1392
> ZSTD_cwksp* const ws = &zc->workspace;
1429,1448c1397
< if (crp == ZSTDcrp_continue) {
< if (ZSTD_equivalentParams(zc->appliedParams, params,
< zc->inBuffSize,
< zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
< zbuff, pledgedSrcSize) ) {
< DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode");
< zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0); /* if it was too large, it still is */
< if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) {
< DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)",
< zc->appliedParams.cParams.windowLog, zc->blockSize);
< if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
< /* prefer a reset, faster than a rescale */
< ZSTD_reset_matchState(&zc->blockState.matchState,
< zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
< &params.cParams,
< crp, ZSTD_resetTarget_CCtx);
< }
< return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
< } } }
< DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
---
> zc->isFirstBlock = 1;
1462c1411,1413
< size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
---
> size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
> + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
> + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
1467d1417
< void* ptr; /* used to partition workSpace */
1469,1472c1419,1431
< /* Check if workSpace is large enough, alloc a new one if needed */
< { size_t const entropySpace = HUF_WORKSPACE_SIZE;
< size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
< size_t const bufferSpace = buffInSize + buffOutSize;
---
> ZSTD_indexResetPolicy_e needsIndexReset = ZSTDirp_continue;
>
> if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
> needsIndexReset = ZSTDirp_reset;
> }
>
> ZSTD_cwksp_bump_oversized_duration(ws, 0);
>
> /* Check if workspace is large enough, alloc a new one if needed */
> { size_t const cctxSpace = zc->staticSize ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
> size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);
> size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
> size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize);
1474c1433,1443
< size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
---
> size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq));
>
> size_t const neededSpace =
> cctxSpace +
> entropySpace +
> blockStateSpace +
> ldmSpace +
> ldmSeqSpace +
> matchStateSize +
> tokenSpace +
> bufferSpace;
1476,1483c1445,1446
< size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
< ldmSeqSpace + matchStateSize + tokenSpace +
< bufferSpace;
<
< int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
< int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
< int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
< zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
---
> int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
> int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
1489,1491c1452,1454
< if (workSpaceTooSmall || workSpaceWasteful) {
< DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB",
< zc->workSpaceSize >> 10,
---
> if (workspaceTooSmall || workspaceWasteful) {
> DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
> ZSTD_cwksp_sizeof(ws) >> 10,
1496,1501c1459,1462
< zc->workSpaceSize = 0;
< ZSTD_free(zc->workSpace, zc->customMem);
< zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
< RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
< zc->workSpaceSize = neededSpace;
< zc->workSpaceOversizedDuration = 0;
---
> needsIndexReset = ZSTDirp_reset;
>
> ZSTD_cwksp_free(ws, zc->customMem);
> FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem));
1502a1464
> DEBUGLOG(5, "reserving object space");
1506,1511c1468,1474
< assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */
< assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
< zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
< zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
< ptr = zc->blockState.nextCBlock + 1;
< zc->entropyWorkspace = (U32*)ptr;
---
> assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
> zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
> RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
> zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
> RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
> zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, HUF_WORKSPACE_SIZE);
> RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
1513a1477,1478
> ZSTD_cwksp_clear(ws);
>
1532,1560d1496
< ptr = ZSTD_reset_matchState(&zc->blockState.matchState,
< zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
< &params.cParams,
< crp, ZSTD_resetTarget_CCtx);
<
< /* ldm hash table */
< /* initialize bucketOffsets table later for pointer alignment */
< if (params.ldmParams.enableLdm) {
< size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
< memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
< assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
< zc->ldmState.hashTable = (ldmEntry_t*)ptr;
< ptr = zc->ldmState.hashTable + ldmHSize;
< zc->ldmSequences = (rawSeq*)ptr;
< ptr = zc->ldmSequences + maxNbLdmSeq;
< zc->maxNbLdmSequences = maxNbLdmSeq;
<
< memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
< }
< assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
<
< /* sequences storage */
< zc->seqStore.maxNbSeq = maxNbSeq;
< zc->seqStore.sequencesStart = (seqDef*)ptr;
< ptr = zc->seqStore.sequencesStart + maxNbSeq;
< zc->seqStore.llCode = (BYTE*) ptr;
< zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
< zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
< zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
1563a1500
> zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
1565c1502,1507
< ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
---
>
> /* buffers */
> zc->inBuffSize = buffInSize;
> zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
> zc->outBuffSize = buffOutSize;
> zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
1568a1511
> /* TODO: avoid memset? */
1572,1575c1515,1516
< memset(ptr, 0, ldmBucketSize);
< zc->ldmState.bucketOffsets = (BYTE*)ptr;
< ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
< ZSTD_window_clear(&zc->ldmState.window);
---
> zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);
> memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
1576a1518,1519
>
> /* sequences storage */
1577a1521,1533
> zc->seqStore.maxNbSeq = maxNbSeq;
> zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
> zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
> zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
> zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
>
> FORWARD_IF_ERROR(ZSTD_reset_matchState(
> &zc->blockState.matchState,
> ws,
> &params.cParams,
> crp,
> needsIndexReset,
> ZSTD_resetTarget_CCtx));
1579,1583c1535,1548
< /* buffers */
< zc->inBuffSize = buffInSize;
< zc->inBuff = (char*)ptr;
< zc->outBuffSize = buffOutSize;
< zc->outBuff = zc->inBuff + buffInSize;
---
> /* ldm hash table */
> if (params.ldmParams.enableLdm) {
> /* TODO: avoid memset? */
> size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
> zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
> memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
> zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
> zc->maxNbLdmSequences = maxNbLdmSeq;
>
> memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
> ZSTD_window_clear(&zc->ldmState.window);
> }
>
> DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
1617c1582
< ZSTD_CCtx_params params,
---
> const ZSTD_CCtx_params* params,
1623,1625c1588,1590
< || params.attachDictPref == ZSTD_dictForceAttach )
< && params.attachDictPref != ZSTD_dictForceCopy
< && !params.forceWindow; /* dictMatchState isn't correctly
---
> || params->attachDictPref == ZSTD_dictForceAttach )
> && params->attachDictPref != ZSTD_dictForceCopy
> && !params->forceWindow; /* dictMatchState isn't correctly
1643,1644c1608,1609
< ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
< ZSTDcrp_continue, zbuff);
---
> FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
> ZSTDcrp_makeClean, zbuff));
1692,1693c1657,1658
< ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
< ZSTDcrp_noMemset, zbuff);
---
> FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
> ZSTDcrp_leaveDirty, zbuff));
1698a1664,1665
> ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
>
1702,1707c1669,1675
< size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
< assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */
< assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
< assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize); /* chainTable must follow hashTable */
< assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
< memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace); /* presumes all tables follow each other */
---
>
> memcpy(cctx->blockState.matchState.hashTable,
> cdict->matchState.hashTable,
> hSize * sizeof(U32));
> memcpy(cctx->blockState.matchState.chainTable,
> cdict->matchState.chainTable,
> chainSize * sizeof(U32));
1711c1679,1680
< { size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
---
> { int const h3log = cctx->blockState.matchState.hashLog3;
> size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
1715a1685,1686
> ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
>
1737c1708
< ZSTD_CCtx_params params,
---
> const ZSTD_CCtx_params* params,
1747c1718
< cctx, cdict, params, pledgedSrcSize, zbuff);
---
> cctx, cdict, *params, pledgedSrcSize, zbuff);
1750c1721
< cctx, cdict, params, pledgedSrcSize, zbuff);
---
> cctx, cdict, *params, pledgedSrcSize, zbuff);
1776c1747
< ZSTDcrp_noMemset, zbuff);
---
> ZSTDcrp_leaveDirty, zbuff);
1783a1755,1756
> ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
>
1787,1791c1760,1771
< size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
< size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
< assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */
< assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
< memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace); /* presumes all tables follow each other */
---
> int const h3log = srcCCtx->blockState.matchState.hashLog3;
> size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
>
> memcpy(dstCCtx->blockState.matchState.hashTable,
> srcCCtx->blockState.matchState.hashTable,
> hSize * sizeof(U32));
> memcpy(dstCCtx->blockState.matchState.chainTable,
> srcCCtx->blockState.matchState.chainTable,
> chainSize * sizeof(U32));
> memcpy(dstCCtx->blockState.matchState.hashTable3,
> srcCCtx->blockState.matchState.hashTable3,
> h3Size * sizeof(U32));
1793a1774,1775
> ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
>
1843a1826,1839
>
> #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
> /* To validate that the table re-use logic is sound, and that we don't
> * access table space that we haven't cleaned, we re-"poison" the table
> * space every time we mark it dirty.
> *
> * This function however is intended to operate on those dirty tables and
> * re-clean them. So when this function is used correctly, we can unpoison
> * the memory it operated on. This introduces a blind spot though, since
> * if we now try to operate on __actually__ poisoned memory, we will not
> * detect that. */
> __msan_unpoison(table, size * sizeof(U32));
> #endif
>
1906,2054d1901
< static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
< {
< BYTE* const ostart = (BYTE* const)dst;
< U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
<
< RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
<
< switch(flSize)
< {
< case 1: /* 2 - 1 - 5 */
< ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
< break;
< case 2: /* 2 - 2 - 12 */
< MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
< break;
< case 3: /* 2 - 2 - 20 */
< MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
< break;
< default: /* not necessary : flSize is {1,2,3} */
< assert(0);
< }
<
< memcpy(ostart + flSize, src, srcSize);
< return srcSize + flSize;
< }
<
< static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
< {
< BYTE* const ostart = (BYTE* const)dst;
< U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
<
< (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
<
< switch(flSize)
< {
< case 1: /* 2 - 1 - 5 */
< ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
< break;
< case 2: /* 2 - 2 - 12 */
< MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
< break;
< case 3: /* 2 - 2 - 20 */
< MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
< break;
< default: /* not necessary : flSize is {1,2,3} */
< assert(0);
< }
<
< ostart[flSize] = *(const BYTE*)src;
< return flSize+1;
< }
<
<
< /* ZSTD_minGain() :
< * minimum compression required
< * to generate a compress block or a compressed literals section.
< * note : use same formula for both situations */
< static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
< {
< U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
< ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
< assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
< return (srcSize >> minlog) + 2;
< }
<
< static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
< ZSTD_hufCTables_t* nextHuf,
< ZSTD_strategy strategy, int disableLiteralCompression,
< void* dst, size_t dstCapacity,
< const void* src, size_t srcSize,
< void* workspace, size_t wkspSize,
< const int bmi2)
< {
< size_t const minGain = ZSTD_minGain(srcSize, strategy);
< size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
< BYTE* const ostart = (BYTE*)dst;
< U32 singleStream = srcSize < 256;
< symbolEncodingType_e hType = set_compressed;
< size_t cLitSize;
<
< DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
< disableLiteralCompression);
<
< /* Prepare nextEntropy assuming reusing the existing table */
< memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
<
< if (disableLiteralCompression)
< return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
<
< /* small ? don't even attempt compression (speed opt) */
< # define COMPRESS_LITERALS_SIZE_MIN 63
< { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
< if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
< }
<
< RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
< { HUF_repeat repeat = prevHuf->repeatMode;
< int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
< if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
< cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
< workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
< : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
< workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
< if (repeat != HUF_repeat_none) {
< /* reused the existing table */
< hType = set_repeat;
< }
< }
<
< if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
< memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
< return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
< }
< if (cLitSize==1) {
< memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
< return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
< }
<
< if (hType == set_compressed) {
< /* using a newly constructed table */
< nextHuf->repeatMode = HUF_repeat_check;
< }
<
< /* Build header */
< switch(lhSize)
< {
< case 3: /* 2 - 2 - 10 - 10 */
< { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
< MEM_writeLE24(ostart, lhc);
< break;
< }
< case 4: /* 2 - 2 - 14 - 14 */
< { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
< MEM_writeLE32(ostart, lhc);
< break;
< }
< case 5: /* 2 - 2 - 18 - 18 */
< { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
< MEM_writeLE32(ostart, lhc);
< ostart[4] = (BYTE)(cLitSize >> 10);
< break;
< }
< default: /* not possible : lhSize is {3,4,5} */
< assert(0);
< }
< return lhSize+cLitSize;
< }
<
<
2077,2488d1923
<
< /**
< * -log2(x / 256) lookup table for x in [0, 256).
< * If x == 0: Return 0
< * Else: Return floor(-log2(x / 256) * 256)
< */
< static unsigned const kInverseProbabilityLog256[256] = {
< 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
< 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
< 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
< 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
< 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
< 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
< 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
< 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
< 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
< 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
< 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
< 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
< 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
< 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
< 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
< 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
< 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
< 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
< 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
< 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
< 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
< 5, 4, 2, 1,
< };
<
<
< /**
< * Returns the cost in bits of encoding the distribution described by count
< * using the entropy bound.
< */
< static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
< {
< unsigned cost = 0;
< unsigned s;
< for (s = 0; s <= max; ++s) {
< unsigned norm = (unsigned)((256 * count[s]) / total);
< if (count[s] != 0 && norm == 0)
< norm = 1;
< assert(count[s] < total);
< cost += count[s] * kInverseProbabilityLog256[norm];
< }
< return cost >> 8;
< }
<
<
< /**
< * Returns the cost in bits of encoding the distribution in count using the
< * table described by norm. The max symbol support by norm is assumed >= max.
< * norm must be valid for every symbol with non-zero probability in count.
< */
< static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
< unsigned const* count, unsigned const max)
< {
< unsigned const shift = 8 - accuracyLog;
< size_t cost = 0;
< unsigned s;
< assert(accuracyLog <= 8);
< for (s = 0; s <= max; ++s) {
< unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
< unsigned const norm256 = normAcc << shift;
< assert(norm256 > 0);
< assert(norm256 < 256);
< cost += count[s] * kInverseProbabilityLog256[norm256];
< }
< return cost >> 8;
< }
<
<
< static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
< void const* ptr = ctable;
< U16 const* u16ptr = (U16 const*)ptr;
< U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
< return maxSymbolValue;
< }
<
<
< /**
< * Returns the cost in bits of encoding the distribution in count using ctable.
< * Returns an error if ctable cannot represent all the symbols in count.
< */
< static size_t ZSTD_fseBitCost(
< FSE_CTable const* ctable,
< unsigned const* count,
< unsigned const max)
< {
< unsigned const kAccuracyLog = 8;
< size_t cost = 0;
< unsigned s;
< FSE_CState_t cstate;
< FSE_initCState(&cstate, ctable);
< RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
< "Repeat FSE_CTable has maxSymbolValue %u < %u",
< ZSTD_getFSEMaxSymbolValue(ctable), max);
< for (s = 0; s <= max; ++s) {
< unsigned const tableLog = cstate.stateLog;
< unsigned const badCost = (tableLog + 1) << kAccuracyLog;
< unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
< if (count[s] == 0)
< continue;
< RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
< "Repeat FSE_CTable has Prob[%u] == 0", s);
< cost += count[s] * bitCost;
< }
< return cost >> kAccuracyLog;
< }
<
< /**
< * Returns the cost in bytes of encoding the normalized count header.
< * Returns an error if any of the helper functions return an error.
< */
< static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
< size_t const nbSeq, unsigned const FSELog)
< {
< BYTE wksp[FSE_NCOUNTBOUND];
< S16 norm[MaxSeq + 1];
< const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
< FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
< return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
< }
<
<
< typedef enum {
< ZSTD_defaultDisallowed = 0,
< ZSTD_defaultAllowed = 1
< } ZSTD_defaultPolicy_e;
<
< MEM_STATIC symbolEncodingType_e
< ZSTD_selectEncodingType(
< FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
< size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
< FSE_CTable const* prevCTable,
< short const* defaultNorm, U32 defaultNormLog,
< ZSTD_defaultPolicy_e const isDefaultAllowed,
< ZSTD_strategy const strategy)
< {
< ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
< if (mostFrequent == nbSeq) {
< *repeatMode = FSE_repeat_none;
< if (isDefaultAllowed && nbSeq <= 2) {
< /* Prefer set_basic over set_rle when there are 2 or less symbols,
< * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
< * If basic encoding isn't possible, always choose RLE.
< */
< DEBUGLOG(5, "Selected set_basic");
< return set_basic;
< }
< DEBUGLOG(5, "Selected set_rle");
< return set_rle;
< }
< if (strategy < ZSTD_lazy) {
< if (isDefaultAllowed) {
< size_t const staticFse_nbSeq_max = 1000;
< size_t const mult = 10 - strategy;
< size_t const baseLog = 3;
< size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
< assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
< assert(mult <= 9 && mult >= 7);
< if ( (*repeatMode == FSE_repeat_valid)
< && (nbSeq < staticFse_nbSeq_max) ) {
< DEBUGLOG(5, "Selected set_repeat");
< return set_repeat;
< }
< if ( (nbSeq < dynamicFse_nbSeq_min)
< || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
< DEBUGLOG(5, "Selected set_basic");
< /* The format allows default tables to be repeated, but it isn't useful.
< * When using simple heuristics to select encoding type, we don't want
< * to confuse these tables with dictionaries. When running more careful
< * analysis, we don't need to waste time checking both repeating tables
< * and default tables.
< */
< *repeatMode = FSE_repeat_none;
< return set_basic;
< }
< }
< } else {
< size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
< size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
< size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
< size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
<
< if (isDefaultAllowed) {
< assert(!ZSTD_isError(basicCost));
< assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
< }
< assert(!ZSTD_isError(NCountCost));
< assert(compressedCost < ERROR(maxCode));
< DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
< (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
< if (basicCost <= repeatCost && basicCost <= compressedCost) {
< DEBUGLOG(5, "Selected set_basic");
< assert(isDefaultAllowed);
< *repeatMode = FSE_repeat_none;
< return set_basic;
< }
< if (repeatCost <= compressedCost) {
< DEBUGLOG(5, "Selected set_repeat");
< assert(!ZSTD_isError(repeatCost));
< return set_repeat;
< }
< assert(compressedCost < basicCost && compressedCost < repeatCost);
< }
< DEBUGLOG(5, "Selected set_compressed");
< *repeatMode = FSE_repeat_check;
< return set_compressed;
< }
<
< MEM_STATIC size_t
< ZSTD_buildCTable(void* dst, size_t dstCapacity,
< FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
< unsigned* count, U32 max,
< const BYTE* codeTable, size_t nbSeq,
< const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
< const FSE_CTable* prevCTable, size_t prevCTableSize,
< void* workspace, size_t workspaceSize)
< {
< BYTE* op = (BYTE*)dst;
< const BYTE* const oend = op + dstCapacity;
< DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
<
< switch (type) {
< case set_rle:
< FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
< RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
< *op = codeTable[0];
< return 1;
< case set_repeat:
< memcpy(nextCTable, prevCTable, prevCTableSize);
< return 0;
< case set_basic:
< FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
< return 0;
< case set_compressed: {
< S16 norm[MaxSeq + 1];
< size_t nbSeq_1 = nbSeq;
< const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
< if (count[codeTable[nbSeq-1]] > 1) {
< count[codeTable[nbSeq-1]]--;
< nbSeq_1--;
< }
< assert(nbSeq_1 > 1);
< FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
< { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
< FORWARD_IF_ERROR(NCountSize);
< FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
< return NCountSize;
< }
< }
< default: assert(0); RETURN_ERROR(GENERIC);
< }
< }
<
< FORCE_INLINE_TEMPLATE size_t
< ZSTD_encodeSequences_body(
< void* dst, size_t dstCapacity,
< FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
< FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
< FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
< seqDef const* sequences, size_t nbSeq, int longOffsets)
< {
< BIT_CStream_t blockStream;
< FSE_CState_t stateMatchLength;
< FSE_CState_t stateOffsetBits;
< FSE_CState_t stateLitLength;
<
< RETURN_ERROR_IF(
< ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
< dstSize_tooSmall, "not enough space remaining");
< DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
< (int)(blockStream.endPtr - blockStream.startPtr),
< (unsigned)dstCapacity);
<
< /* first symbols */
< FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
< FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
< FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
< BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
< if (MEM_32bits()) BIT_flushBits(&blockStream);
< BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
< if (MEM_32bits()) BIT_flushBits(&blockStream);
< if (longOffsets) {
< U32 const ofBits = ofCodeTable[nbSeq-1];
< int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
< if (extraBits) {
< BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
< BIT_flushBits(&blockStream);
< }
< BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
< ofBits - extraBits);
< } else {
< BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
< }
< BIT_flushBits(&blockStream);
<
< { size_t n;
< for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
< BYTE const llCode = llCodeTable[n];
< BYTE const ofCode = ofCodeTable[n];
< BYTE const mlCode = mlCodeTable[n];
< U32 const llBits = LL_bits[llCode];
< U32 const ofBits = ofCode;
< U32 const mlBits = ML_bits[mlCode];
< DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
< (unsigned)sequences[n].litLength,
< (unsigned)sequences[n].matchLength + MINMATCH,
< (unsigned)sequences[n].offset);
< /* 32b*/ /* 64b*/
< /* (7)*/ /* (7)*/
< FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
< FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
< if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
< FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
< if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
< BIT_flushBits(&blockStream); /* (7)*/
< BIT_addBits(&blockStream, sequences[n].litLength, llBits);
< if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
< BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
< if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
< if (longOffsets) {
< int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
< if (extraBits) {
< BIT_addBits(&blockStream, sequences[n].offset, extraBits);
< BIT_flushBits(&blockStream); /* (7)*/
< }
< BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
< ofBits - extraBits); /* 31 */
< } else {
< BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
< }
< BIT_flushBits(&blockStream); /* (7)*/
< DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
< } }
<
< DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
< FSE_flushCState(&blockStream, &stateMatchLength);
< DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
< FSE_flushCState(&blockStream, &stateOffsetBits);
< DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
< FSE_flushCState(&blockStream, &stateLitLength);
<
< { size_t const streamSize = BIT_closeCStream(&blockStream);
< RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
< return streamSize;
< }
< }
<
< static size_t
< ZSTD_encodeSequences_default(
< void* dst, size_t dstCapacity,
< FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
< FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
< FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
< seqDef const* sequences, size_t nbSeq, int longOffsets)
< {
< return ZSTD_encodeSequences_body(dst, dstCapacity,
< CTable_MatchLength, mlCodeTable,
< CTable_OffsetBits, ofCodeTable,
< CTable_LitLength, llCodeTable,
< sequences, nbSeq, longOffsets);
< }
<
<
< #if DYNAMIC_BMI2
<
< static TARGET_ATTRIBUTE("bmi2") size_t
< ZSTD_encodeSequences_bmi2(
< void* dst, size_t dstCapacity,
< FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
< FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
< FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
< seqDef const* sequences, size_t nbSeq, int longOffsets)
< {
< return ZSTD_encodeSequences_body(dst, dstCapacity,
< CTable_MatchLength, mlCodeTable,
< CTable_OffsetBits, ofCodeTable,
< CTable_LitLength, llCodeTable,
< sequences, nbSeq, longOffsets);
< }
<
< #endif
<
< static size_t ZSTD_encodeSequences(
< void* dst, size_t dstCapacity,
< FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
< FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
< FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
< seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
< {
< DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
< #if DYNAMIC_BMI2
< if (bmi2) {
< return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
< CTable_MatchLength, mlCodeTable,
< CTable_OffsetBits, ofCodeTable,
< CTable_LitLength, llCodeTable,
< sequences, nbSeq, longOffsets);
< }
< #endif
< (void)bmi2;
< return ZSTD_encodeSequences_default(dst, dstCapacity,
< CTable_MatchLength, mlCodeTable,
< CTable_OffsetBits, ofCodeTable,
< CTable_LitLength, llCodeTable,
< sequences, nbSeq, longOffsets);
< }
<
2512c1947
< void* workspace, size_t wkspSize,
---
> void* entropyWorkspace, size_t entropyWkspSize,
2529c1964
< size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
---
> size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
2532a1968
> DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq);
2534d1969
< DEBUGLOG(5, "ZSTD_compressSequences_internal");
2538c1973
< size_t const litSize = seqStorePtr->lit - literals;
---
> size_t const litSize = (size_t)(seqStorePtr->lit - literals);
2545c1980
< workspace, wkspSize,
---
> entropyWorkspace, entropyWkspSize,
2555c1990
< if (nbSeq < 0x7F)
---
> if (nbSeq < 128) {
2557,2560c1992,2000
< else if (nbSeq < LONGNBSEQ)
< op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
< else
< op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
---
> } else if (nbSeq < LONGNBSEQ) {
> op[0] = (BYTE)((nbSeq>>8) + 0x80);
> op[1] = (BYTE)nbSeq;
> op+=2;
> } else {
> op[0]=0xFF;
> MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
> op+=3;
> }
2565c2005
< return op - ostart;
---
> return (size_t)(op - ostart);
2576c2016
< size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
---
> size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
2586,2589c2026,2033
< { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
< count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
< prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
< workspace, wkspSize);
---
> { size_t const countSize = ZSTD_buildCTable(
> op, (size_t)(oend - op),
> CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
> count, max, llCodeTable, nbSeq,
> LL_defaultNorm, LL_defaultNormLog, MaxLL,
> prevEntropy->fse.litlengthCTable,
> sizeof(prevEntropy->fse.litlengthCTable),
> entropyWorkspace, entropyWkspSize);
2598c2042,2043
< size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
---
> size_t const mostFrequent = HIST_countFast_wksp(
> count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
2609,2612c2054,2061
< { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
< count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
< prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
< workspace, wkspSize);
---
> { size_t const countSize = ZSTD_buildCTable(
> op, (size_t)(oend - op),
> CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
> count, max, ofCodeTable, nbSeq,
> OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
> prevEntropy->fse.offcodeCTable,
> sizeof(prevEntropy->fse.offcodeCTable),
> entropyWorkspace, entropyWkspSize);
2621c2070,2071
< size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
---
> size_t const mostFrequent = HIST_countFast_wksp(
> count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
2630,2633c2080,2087
< { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
< count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
< prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
< workspace, wkspSize);
---
> { size_t const countSize = ZSTD_buildCTable(
> op, (size_t)(oend - op),
> CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
> count, max, mlCodeTable, nbSeq,
> ML_defaultNorm, ML_defaultNormLog, MaxML,
> prevEntropy->fse.matchlengthCTable,
> sizeof(prevEntropy->fse.matchlengthCTable),
> entropyWorkspace, entropyWkspSize);
2644c2098
< op, oend - op,
---
> op, (size_t)(oend - op),
2671c2125
< return op - ostart;
---
> return (size_t)(op - ostart);
2681c2135
< void* workspace, size_t wkspSize,
---
> void* entropyWorkspace, size_t entropyWkspSize,
2687c2141
< workspace, wkspSize, bmi2);
---
> entropyWorkspace, entropyWkspSize, bmi2);
2837a2292,2372
> static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
> {
> const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
> const seqDef* seqs = seqStore->sequencesStart;
> size_t seqsSize = seqStore->sequences - seqs;
>
> ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
> size_t i; size_t position; int repIdx;
>
> assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
> for (i = 0, position = 0; i < seqsSize; ++i) {
> outSeqs[i].offset = seqs[i].offset;
> outSeqs[i].litLength = seqs[i].litLength;
> outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH;
>
> if (i == seqStore->longLengthPos) {
> if (seqStore->longLengthID == 1) {
> outSeqs[i].litLength += 0x10000;
> } else if (seqStore->longLengthID == 2) {
> outSeqs[i].matchLength += 0x10000;
> }
> }
>
> if (outSeqs[i].offset <= ZSTD_REP_NUM) {
> outSeqs[i].rep = outSeqs[i].offset;
> repIdx = (unsigned int)i - outSeqs[i].offset;
>
> if (outSeqs[i].litLength == 0) {
> if (outSeqs[i].offset < 3) {
> --repIdx;
> } else {
> repIdx = (unsigned int)i - 1;
> }
> ++outSeqs[i].rep;
> }
> assert(repIdx >= -3);
> outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1];
> if (outSeqs[i].rep == 4) {
> --outSeqs[i].offset;
> }
> } else {
> outSeqs[i].offset -= ZSTD_REP_NUM;
> }
>
> position += outSeqs[i].litLength;
> outSeqs[i].matchPos = (unsigned int)position;
> position += outSeqs[i].matchLength;
> }
> zc->seqCollector.seqIndex += seqsSize;
> }
>
> size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
> size_t outSeqsSize, const void* src, size_t srcSize)
> {
> const size_t dstCapacity = ZSTD_compressBound(srcSize);
> void* dst = ZSTD_malloc(dstCapacity, ZSTD_defaultCMem);
> SeqCollector seqCollector;
>
> RETURN_ERROR_IF(dst == NULL, memory_allocation);
>
> seqCollector.collectSequences = 1;
> seqCollector.seqStart = outSeqs;
> seqCollector.seqIndex = 0;
> seqCollector.maxSequences = outSeqsSize;
> zc->seqCollector = seqCollector;
>
> ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
> ZSTD_free(dst, ZSTD_defaultCMem);
> return zc->seqCollector.seqIndex;
> }
>
> /* Returns true if the given block is a RLE block */
> static int ZSTD_isRLE(const BYTE *ip, size_t length) {
> size_t i;
> if (length < 2) return 1;
> for (i = 1; i < length; ++i) {
> if (ip[0] != ip[i]) return 0;
> }
> return 1;
> }
>
2840c2375
< const void* src, size_t srcSize)
---
> const void* src, size_t srcSize, U32 frame)
2841a2377,2381
> /* This the upper bound for the length of an rle block.
> * This isn't the actual upper bound. Finding the real threshold
> * needs further investigation.
> */
> const U32 rleMaxLength = 25;
2842a2383,2384
> const BYTE* ip = (const BYTE*)src;
> BYTE* op = (BYTE*)dst;
2844c2386,2387
< (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate);
---
> (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
> (unsigned)zc->blockState.matchState.nextToUpdate);
2850a2394,2398
> if (zc->seqCollector.collectSequences) {
> ZSTD_copyBlockSequences(zc);
> return 0;
> }
>
2859a2408,2420
> if (frame &&
> /* We don't want to emit our first block as a RLE even if it qualifies because
> * doing so will cause the decoder (cli only) to throw a "should consume all input error."
> * This is only an issue for zstd <= v1.4.3
> */
> !zc->isFirstBlock &&
> cSize < rleMaxLength &&
> ZSTD_isRLE(ip, srcSize))
> {
> cSize = 1;
> op[0] = ip[0];
> }
>
2861c2422
< if (!ZSTD_isError(cSize) && cSize != 0) {
---
> if (!ZSTD_isError(cSize) && cSize > 1) {
2878c2439,2443
< static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, void const* ip, void const* iend)
---
> static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
> ZSTD_cwksp* ws,
> ZSTD_CCtx_params const* params,
> void const* ip,
> void const* iend)
2886a2452
> ZSTD_cwksp_mark_tables_dirty(ws);
2887a2454
> ZSTD_cwksp_mark_tables_clean(ws);
2896d2462
<
2930c2496,2497
< ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, ip, ip + blockSize);
---
> ZSTD_overflowCorrectIfNeeded(
> ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
2938c2505
< ip, blockSize);
---
> ip, blockSize, 1 /* frame */);
2940d2506
<
2945,2946c2511,2514
< U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
< MEM_writeLE24(op, cBlockHeader24);
---
> const U32 cBlockHeader = cSize == 1 ?
> lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
> lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
> MEM_writeLE24(op, cBlockHeader);
2955a2524
> cctx->isFirstBlock = 0;
2966c2535
< ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
---
> const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
2969,2974c2538,2543
< U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
< U32 const checksumFlag = params.fParams.checksumFlag>0;
< U32 const windowSize = (U32)1 << params.cParams.windowLog;
< U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
< BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
< U32 const fcsCode = params.fParams.contentSizeFlag ?
---
> U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
> U32 const checksumFlag = params->fParams.checksumFlag>0;
> U32 const windowSize = (U32)1 << params->cParams.windowLog;
> U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
> BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
> U32 const fcsCode = params->fParams.contentSizeFlag ?
2979c2548
< assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
---
> assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
2982c2551
< !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
---
> !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
2984c2553
< if (params.format == ZSTD_f_zstd1) {
---
> if (params->format == ZSTD_f_zstd1) {
3050c2619
< fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
---
> fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
3070c2639,2641
< ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, src, (BYTE const*)src + srcSize);
---
> ZSTD_overflowCorrectIfNeeded(
> ms, &cctx->workspace, &cctx->appliedParams,
> src, (BYTE const*)src + srcSize);
3076c2647
< ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
---
> ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
3112,3113c2683,2685
< size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
< RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
---
> DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
> { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
> RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong); }
3121a2694
> ZSTD_cwksp* ws,
3138c2711
< size_t const remaining = iend - ip;
---
> size_t const remaining = (size_t)(iend - ip);
3142c2715
< ZSTD_overflowCorrectIfNeeded(ms, params, ip, ichunk);
---
> ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
3201c2774
< * dictSize supposed > 8
---
> * dictSize supposed >= 8
3204a2778
> ZSTD_cwksp* ws,
3217c2791
< assert(dictSize > 8);
---
> assert(dictSize >= 8);
3300c2874,2875
< FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
---
> FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
> ms, ws, params, dictPtr, dictContentSize, dtlm));
3309a2885
> ZSTD_cwksp* ws,
3317c2893,2896
< if ((dict==NULL) || (dictSize<=8)) return 0;
---
> if ((dict==NULL) || (dictSize<8)) {
> RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
> return 0;
> }
3323c2902
< return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
---
> return ZSTD_loadDictionaryContent(ms, ws, params, dict, dictSize, dtlm);
3328c2907,2908
< return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
---
> return ZSTD_loadDictionaryContent(
> ms, ws, params, dict, dictSize, dtlm);
3335c2915,2916
< return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
---
> return ZSTD_loadZstdDictionary(
> bs, ms, ws, params, dict, dictSize, dtlm, workspace);
3337a2919,2921
> #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
> #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6)
>
3345c2929
< ZSTD_CCtx_params params, U64 pledgedSrcSize,
---
> const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
3348c2932
< DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
---
> DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
3350c2934
< assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
---
> assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
3352,3353c2936,2942
<
< if (cdict && cdict->dictContentSize>0) {
---
> if ( (cdict)
> && (cdict->dictContentSize > 0)
> && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
> || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
> || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
> || cdict->compressionLevel == 0)
> && (params->attachDictPref != ZSTD_dictForceLoad) ) {
3357,3361c2946,2956
< FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
< ZSTDcrp_continue, zbuff) );
< { size_t const dictID = ZSTD_compress_insertDictionary(
< cctx->blockState.prevCBlock, &cctx->blockState.matchState,
< &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
---
> FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
> ZSTDcrp_makeClean, zbuff) );
> { size_t const dictID = cdict ?
> ZSTD_compress_insertDictionary(
> cctx->blockState.prevCBlock, &cctx->blockState.matchState,
> &cctx->workspace, params, cdict->dictContent, cdict->dictContentSize,
> dictContentType, dtlm, cctx->entropyWorkspace)
> : ZSTD_compress_insertDictionary(
> cctx->blockState.prevCBlock, &cctx->blockState.matchState,
> &cctx->workspace, params, dict, dictSize,
> dictContentType, dtlm, cctx->entropyWorkspace);
3374c2969
< ZSTD_CCtx_params params,
---
> const ZSTD_CCtx_params* params,
3377c2972
< DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
---
> DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
3379c2974
< FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
---
> FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) );
3394c2989
< ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
---
> ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
3398c2993
< cctxParams, pledgedSrcSize);
---
> &cctxParams, pledgedSrcSize);
3405c3000
< ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
---
> ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
3408c3003
< cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
---
> &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
3431c3026
< fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
---
> fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
3492c3087
< ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
---
> ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
3498c3093
< cctxParams);
---
> &cctxParams);
3522c3117
< ZSTD_CCtx_params params)
---
> const ZSTD_CCtx_params* params)
3538c3133
< ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
---
> ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
3540c3135
< return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
---
> return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
3575,3576c3170,3174
< return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
< + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
---
> return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
> + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
> + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
> + (dictLoadMethod == ZSTD_dlm_byRef ? 0
> : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
3589c3187,3189
< return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
---
> /* cdict may be in the workspace */
> return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
> + ZSTD_cwksp_sizeof(&cdict->workspace);
3603d3202
< cdict->dictBuffer = NULL;
3606,3608c3205
< void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
< cdict->dictBuffer = internalBuffer;
< cdict->dictContent = internalBuffer;
---
> void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
3609a3207
> cdict->dictContent = internalBuffer;
3613a3212,3214
> cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
>
>
3616,3622c3217,3223
< { void* const end = ZSTD_reset_matchState(&cdict->matchState,
< (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
< &cParams,
< ZSTDcrp_continue, ZSTD_resetTarget_CDict);
< assert(end == (char*)cdict->workspace + cdict->workspaceSize);
< (void)end;
< }
---
> FORWARD_IF_ERROR(ZSTD_reset_matchState(
> &cdict->matchState,
> &cdict->workspace,
> &cParams,
> ZSTDcrp_makeClean,
> ZSTDirp_reset,
> ZSTD_resetTarget_CDict));
3624c3225
< * Skips loading the dictionary if it is <= 8 bytes.
---
> * Skips loading the dictionary if it is < 8 bytes.
3632,3634c3233,3235
< &cdict->cBlockState, &cdict->matchState, &params,
< cdict->dictContent, cdict->dictContentSize,
< dictContentType, ZSTD_dtlm_full, cdict->workspace);
---
> &cdict->cBlockState, &cdict->matchState, &cdict->workspace,
> &params, cdict->dictContent, cdict->dictContentSize,
> dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
3652,3653c3253,3258
< { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
< size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
---
> { size_t const workspaceSize =
> ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
> ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
> ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
> (dictLoadMethod == ZSTD_dlm_byRef ? 0
> : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
3654a3260,3261
> ZSTD_cwksp ws;
> ZSTD_CDict* cdict;
3656,3657c3263
< if (!cdict || !workspace) {
< ZSTD_free(cdict, customMem);
---
> if (!workspace) {
3660a3267,3272
>
> ZSTD_cwksp_init(&ws, workspace, workspaceSize);
>
> cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
> assert(cdict != NULL);
> ZSTD_cwksp_move(&cdict->workspace, &ws);
3662,3663c3274,3275
< cdict->workspace = workspace;
< cdict->workspaceSize = workspaceSize;
---
> cdict->compressionLevel = 0; /* signals advanced API usage */
>
3679,3681c3291,3296
< return ZSTD_createCDict_advanced(dict, dictSize,
< ZSTD_dlm_byCopy, ZSTD_dct_auto,
< cParams, ZSTD_defaultCMem);
---
> ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize,
> ZSTD_dlm_byCopy, ZSTD_dct_auto,
> cParams, ZSTD_defaultCMem);
> if (cdict)
> cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
> return cdict;
3696,3698c3311,3315
< ZSTD_free(cdict->workspace, cMem);
< ZSTD_free(cdict->dictBuffer, cMem);
< ZSTD_free(cdict, cMem);
---
> int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
> ZSTD_cwksp_free(&cdict->workspace, cMem);
> if (!cdictInWorkspace) {
> ZSTD_free(cdict, cMem);
> }
3724,3727c3341,3347
< size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
< + HUF_WORKSPACE_SIZE + matchStateSize;
< ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
< void* ptr;
---
> size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
> + (dictLoadMethod == ZSTD_dlm_byRef ? 0
> : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
> + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
> + matchStateSize;
> ZSTD_CDict* cdict;
>
3728a3349,3357
>
> {
> ZSTD_cwksp ws;
> ZSTD_cwksp_init(&ws, workspace, workspaceSize);
> cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
> if (cdict == NULL) return NULL;
> ZSTD_cwksp_move(&cdict->workspace, &ws);
> }
>
3733,3742d3361
< if (dictLoadMethod == ZSTD_dlm_byCopy) {
< memcpy(cdict+1, dict, dictSize);
< dict = cdict+1;
< ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
< } else {
< ptr = cdict+1;
< }
< cdict->workspace = ptr;
< cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
<
3745c3364
< ZSTD_dlm_byRef, dictContentType,
---
> dictLoadMethod, dictContentType,
3767c3386,3394
< params.cParams = ZSTD_getCParamsFromCDict(cdict);
---
> params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
> || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
> || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
> || cdict->compressionLevel == 0 )
> && (params.attachDictPref != ZSTD_dictForceLoad) ?
> ZSTD_getCParamsFromCDict(cdict)
> : ZSTD_getCParams(cdict->compressionLevel,
> pledgedSrcSize,
> cdict->dictContentSize);
3781c3408
< params, pledgedSrcSize,
---
> &params, pledgedSrcSize,
3872c3499
< params, pledgedSrcSize,
---
> &params, pledgedSrcSize,
3906c3533,3534
< ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
---
> const ZSTD_CCtx_params* params,
> unsigned long long pledgedSrcSize)
3911,3912c3539,3540
< assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
< zcs->requestedParams = params;
---
> assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
> zcs->requestedParams = *params;
3951c3579
< * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
---
> * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
3965c3593
< zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
---
> zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, params);
4215c3843
< cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
---
> cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem);
4343,4344c3971,3972
< { 21, 16, 17, 1, 5, 1, ZSTD_dfast }, /* level 3 */
< { 21, 18, 18, 1, 5, 1, ZSTD_dfast }, /* level 4 */
---
> { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
> { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
4368,4369c3996,3997
< { 18, 14, 14, 1, 5, 1, ZSTD_dfast }, /* level 2 */
< { 18, 16, 16, 1, 4, 1, ZSTD_dfast }, /* level 3 */
---
> { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
> { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
4395,4396c4023,4024
< { 17, 15, 16, 2, 5, 1, ZSTD_dfast }, /* level 3 */
< { 17, 17, 17, 2, 4, 1, ZSTD_dfast }, /* level 4 */
---
> { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
> { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
4421c4049
< { 14, 14, 15, 2, 4, 1, ZSTD_dfast }, /* level 3 */
---
> { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
21a22
> #include "zstd_cwksp.h"
137c138,144
< U32 loadedDictEnd; /* index of end of dictionary, within context's referential. When dict referential is copied into active context (i.e. not attached), effectively same value as dictSize, since referential starts from zero */
---
> U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
> * When loadedDictEnd != 0, a dictionary is in use, and still valid.
> * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
> * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
> * When dict referential is copied into active context (i.e. not attached),
> * loadedDictEnd == dictSize, since referential starts from zero.
> */
139c146
< U32 hashLog3; /* dispatch table : larger == faster, more memory */
---
> U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
188a196,202
> typedef struct {
> int collectSequences;
> ZSTD_Sequence* seqStart;
> size_t seqIndex;
> size_t maxSequences;
> } SeqCollector;
>
199a214,216
> int srcSizeHint; /* User's best guess of source size.
> * Hint is not valid when srcSizeHint == 0.
> * There is no guarantee that hint is close to actual source size */
225,227c242
< int workSpaceOversizedDuration;
< void* workSpace;
< size_t workSpaceSize;
---
> ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
234a250,251
> SeqCollector seqCollector;
> int isFirstBlock;
309a327,365
> /* ZSTD_cParam_withinBounds:
> * @return 1 if value is within cParam bounds,
> * 0 otherwise */
> MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
> {
> ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
> if (ZSTD_isError(bounds.error)) return 0;
> if (value < bounds.lowerBound) return 0;
> if (value > bounds.upperBound) return 0;
> return 1;
> }
>
> /* ZSTD_minGain() :
> * minimum compression required
> * to generate a compress block or a compressed literals section.
> * note : use same formula for both situations */
> MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
> {
> U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
> ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
> assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
> return (srcSize >> minlog) + 2;
> }
>
> /*! ZSTD_safecopyLiterals() :
> * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
> * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
> * large copies.
> */
> static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
> assert(iend > ilimit_w);
> if (ip <= ilimit_w) {
> ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
> op += ilimit_w - ip;
> ip = ilimit_w;
> }
> while (ip < iend) *op++ = *ip++;
> }
>
311,312c367,368
< * Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
< * `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
---
> * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
> * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
313a370
> * Allowed to overread literals up to litLimit.
315c372,373
< MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
---
> HINT_INLINE UNUSED_ATTR
> void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
316a375,376
> BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
> BYTE const* const litEnd = literals + litLength;
322c382
< pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
---
> pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
329c389,401
< ZSTD_wildcopy(seqStorePtr->lit, literals, litLength, ZSTD_no_overlap);
---
> assert(literals + litLength <= litLimit);
> if (litEnd <= litLimit_w) {
> /* Common case we can use wildcopy.
> * First copy 16 bytes, because literals are likely short.
> */
> assert(WILDCOPY_OVERLENGTH >= 16);
> ZSTD_copy16(seqStorePtr->lit, literals);
> if (litLength > 16) {
> ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
> }
> } else {
> ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
> }
341c413
< seqStorePtr->sequences[0].offset = offsetCode + 1;
---
> seqStorePtr->sequences[0].offset = offCode + 1;
742c814,817
< * when input progresses beyond window size. */
---
> * when input progresses beyond window size.
> * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
> * loadedDictEnd uses same referential as window->base
> * maxDist is the window size */
744c819
< ZSTD_checkDictValidity(ZSTD_window_t* window,
---
> ZSTD_checkDictValidity(const ZSTD_window_t* window,
750,759c825,844
< U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
< U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
< DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
< (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
<
< if (loadedDictEnd && (blockEndIdx > maxDist + loadedDictEnd)) {
< /* On reaching window size, dictionaries are invalidated */
< if (loadedDictEndPtr) *loadedDictEndPtr = 0;
< if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
< }
---
> assert(loadedDictEndPtr != NULL);
> assert(dictMatchStatePtr != NULL);
> { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
> U32 const loadedDictEnd = *loadedDictEndPtr;
> DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
> (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
> assert(blockEndIdx >= loadedDictEnd);
>
> if (blockEndIdx > loadedDictEnd + maxDist) {
> /* On reaching window size, dictionaries are invalidated.
> * For simplification, if window size is reached anywhere within next block,
> * the dictionary is invalidated for the full block.
> */
> DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
> *loadedDictEndPtr = 0;
> *dictMatchStatePtr = NULL;
> } else {
> if (*loadedDictEndPtr != 0) {
> DEBUGLOG(6, "dictionary considered valid for current block");
> } } }
800a886,896
> MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
> {
> U32 const maxDistance = 1U << windowLog;
> U32 const lowestValid = ms->window.lowLimit;
> U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
> U32 const isDictionary = (ms->loadedDictEnd != 0);
> U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
> return matchLowest;
> }
>
>
859c955
< ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
---
> const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
874c970
< ZSTD_CCtx_params params,
---
> const ZSTD_CCtx_params* params,
883c979
< ZSTD_CCtx_params params);
---
> const ZSTD_CCtx_params* params);
Only in b/vendor/github.com/DataDog/zstd: zstd_compress_literals.c
Only in b/vendor/github.com/DataDog/zstd: zstd_compress_literals.h
Only in b/vendor/github.com/DataDog/zstd: zstd_compress_sequences.c
Only in b/vendor/github.com/DataDog/zstd: zstd_compress_sequences.h
Only in b/vendor/github.com/DataDog/zstd: zstd_cwksp.h
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_decompress_block.c b/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
81a82
> DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
89a91
> DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
119c121
< litCSize = (lhc >> 22) + (istart[4] << 10);
---
> litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
394c396,397
< symbolNext[s] = normalizedCounter[s];
---
> assert(normalizedCounter[s]>=0);
> symbolNext[s] = (U16)normalizedCounter[s];
572a576,603
> /*! ZSTD_overlapCopy8() :
> * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
> * If the offset is < 8 then the offset is spread to at least 8 bytes.
> *
> * Precondition: *ip <= *op
> * Postcondition: *op - *op >= 8
> */
> static void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
> assert(*ip <= *op);
> if (offset < 8) {
> /* close range match, overlap */
> static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
> static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
> int const sub2 = dec64table[offset];
> (*op)[0] = (*ip)[0];
> (*op)[1] = (*ip)[1];
> (*op)[2] = (*ip)[2];
> (*op)[3] = (*ip)[3];
> *ip += dec32table[offset];
> ZSTD_copy4(*op+4, *ip);
> *ip -= sub2;
> } else {
> ZSTD_copy8(*op, *ip);
> }
> *ip += 8;
> *op += 8;
> assert(*op - *ip >= 8);
> }
574,580c605,659
< /* ZSTD_execSequenceLast7():
< * exceptional case : decompress a match starting within last 7 bytes of output buffer.
< * requires more careful checks, to ensure there is no overflow.
< * performance does not matter though.
< * note : this case is supposed to be never generated "naturally" by reference encoder,
< * since in most cases it needs at least 8 bytes to look for a match.
< * but it's allowed by the specification. */
---
> /*! ZSTD_safecopy() :
> * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
> * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
> * This function is only called in the uncommon case where the sequence is near the end of the block. It
> * should be fast for a single long sequence, but can be slow for several short sequences.
> *
> * @param ovtype controls the overlap detection
> * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
> * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
> * The src buffer must be before the dst buffer.
> */
> static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
> ptrdiff_t const diff = op - ip;
> BYTE* const oend = op + length;
>
> assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
> (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
>
> if (length < 8) {
> /* Handle short lengths. */
> while (op < oend) *op++ = *ip++;
> return;
> }
> if (ovtype == ZSTD_overlap_src_before_dst) {
> /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
> assert(length >= 8);
> ZSTD_overlapCopy8(&op, &ip, diff);
> assert(op - ip >= 8);
> assert(op <= oend);
> }
>
> if (oend <= oend_w) {
> /* No risk of overwrite. */
> ZSTD_wildcopy(op, ip, length, ovtype);
> return;
> }
> if (op <= oend_w) {
> /* Wildcopy until we get close to the end. */
> assert(oend > oend_w);
> ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
> ip += oend_w - op;
> op = oend_w;
> }
> /* Handle the leftovers. */
> while (op < oend) *op++ = *ip++;
> }
>
> /* ZSTD_execSequenceEnd():
> * This version handles cases that are near the end of the output buffer. It requires
> * more careful checks to make sure there is no overflow. By separating out these hard
> * and unlikely cases, we can speed up the common cases.
> *
> * NOTE: This function needs to be fast for a single long sequence, but doesn't need
> * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
> */
582,585c661,664
< size_t ZSTD_execSequenceLast7(BYTE* op,
< BYTE* const oend, seq_t sequence,
< const BYTE** litPtr, const BYTE* const litLimit,
< const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
---
> size_t ZSTD_execSequenceEnd(BYTE* op,
> BYTE* const oend, seq_t sequence,
> const BYTE** litPtr, const BYTE* const litLimit,
> const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
591a671
> BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
593,594c673,675
< /* check */
< RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
---
> /* bounds checks */
> assert(oLitEnd < oMatchEnd);
> RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must fit within dstBuffer");
598c679,681
< while (op < oLitEnd) *op++ = *(*litPtr)++;
---
> ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
> op = oLitEnd;
> *litPtr = iLitEnd;
601c684
< if (sequence.offset > (size_t)(oLitEnd - base)) {
---
> if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
603,604c686,687
< RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
< match = dictEnd - (base-match);
---
> RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
> match = dictEnd - (prefixStart-match);
614c697
< match = base;
---
> match = prefixStart;
616c699
< while (op < oMatchEnd) *op++ = *match++;
---
> ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
620d702
<
634,643c716,734
< /* check */
< RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
< RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
< if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
<
< /* copy Literals */
< if (sequence.litLength > 8)
< ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
< else
< ZSTD_copy8(op, *litPtr);
---
> /* Errors and uncommon cases handled here. */
> assert(oLitEnd < oMatchEnd);
> if (iLitEnd > litLimit || oMatchEnd > oend_w)
> return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
>
> /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
> assert(iLitEnd <= litLimit /* Literal length is in bounds */);
> assert(oLitEnd <= oend_w /* Can wildcopy literals */);
> assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
>
> /* Copy Literals:
> * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
> * We likely don't need the full 32-byte wildcopy.
> */
> assert(WILDCOPY_OVERLENGTH >= 16);
> ZSTD_copy16(op, (*litPtr));
> if (sequence.litLength > 16) {
> ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
> }
647c738
< /* copy Match */
---
> /* Copy Match */
662,666d752
< if (op > oend_w || sequence.matchLength < MINMATCH) {
< U32 i;
< for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
< return sequenceLength;
< }
668,684c754,769
< /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
<
< /* match within prefix */
< if (sequence.offset < 8) {
< /* close range match, overlap */
< static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
< static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
< int const sub2 = dec64table[sequence.offset];
< op[0] = match[0];
< op[1] = match[1];
< op[2] = match[2];
< op[3] = match[3];
< match += dec32table[sequence.offset];
< ZSTD_copy4(op+4, match);
< match -= sub2;
< } else {
< ZSTD_copy8(op, match);
---
> /* Match within prefix of 1 or more bytes */
> assert(op <= oMatchEnd);
> assert(oMatchEnd <= oend_w);
> assert(match >= prefixStart);
> assert(sequence.matchLength >= 1);
>
> /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
> * without overlap checking.
> */
> if (sequence.offset >= WILDCOPY_VECLEN) {
> /* We bet on a full wildcopy for matches, since we expect matches to be
> * longer than literals (in general). In silesia, ~10% of matches are longer
> * than 16 bytes.
> */
> ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
> return sequenceLength;
686,699c771
< op += 8; match += 8;
<
< if (oMatchEnd > oend-(16-MINMATCH)) {
< if (op < oend_w) {
< ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
< match += oend_w - op;
< op = oend_w;
< }
< while (op < oMatchEnd) *op++ = *match++;
< } else {
< ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); /* works even if matchLength < 8 */
< }
< return sequenceLength;
< }
---
> assert(sequence.offset < WILDCOPY_VECLEN);
700a773,774
> /* Copy 8 bytes and spread the offset to be >= 8. */
> ZSTD_overlapCopy8(&op, &match, sequence.offset);
702,778c776,779
< HINT_INLINE
< size_t ZSTD_execSequenceLong(BYTE* op,
< BYTE* const oend, seq_t sequence,
< const BYTE** litPtr, const BYTE* const litLimit,
< const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
< {
< BYTE* const oLitEnd = op + sequence.litLength;
< size_t const sequenceLength = sequence.litLength + sequence.matchLength;
< BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
< BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
< const BYTE* const iLitEnd = *litPtr + sequence.litLength;
< const BYTE* match = sequence.match;
<
< /* check */
< RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
< RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
< if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
<
< /* copy Literals */
< if (sequence.litLength > 8)
< ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
< else
< ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
<
< op = oLitEnd;
< *litPtr = iLitEnd; /* update for next sequence */
<
< /* copy Match */
< if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
< /* offset beyond prefix */
< RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
< if (match + sequence.matchLength <= dictEnd) {
< memmove(oLitEnd, match, sequence.matchLength);
< return sequenceLength;
< }
< /* span extDict & currentPrefixSegment */
< { size_t const length1 = dictEnd - match;
< memmove(oLitEnd, match, length1);
< op = oLitEnd + length1;
< sequence.matchLength -= length1;
< match = prefixStart;
< if (op > oend_w || sequence.matchLength < MINMATCH) {
< U32 i;
< for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
< return sequenceLength;
< }
< } }
< assert(op <= oend_w);
< assert(sequence.matchLength >= MINMATCH);
<
< /* match within prefix */
< if (sequence.offset < 8) {
< /* close range match, overlap */
< static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
< static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
< int const sub2 = dec64table[sequence.offset];
< op[0] = match[0];
< op[1] = match[1];
< op[2] = match[2];
< op[3] = match[3];
< match += dec32table[sequence.offset];
< ZSTD_copy4(op+4, match);
< match -= sub2;
< } else {
< ZSTD_copy8(op, match);
< }
< op += 8; match += 8;
<
< if (oMatchEnd > oend-(16-MINMATCH)) {
< if (op < oend_w) {
< ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
< match += oend_w - op;
< op = oend_w;
< }
< while (op < oMatchEnd) *op++ = *match++;
< } else {
< ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); /* works even if matchLength < 8 */
---
> /* If the match length is > 8 bytes, then continue with the wildcopy. */
> if (sequence.matchLength > 8) {
> assert(op < oMatchEnd);
> ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
1098c1099
< size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
---
> size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1109c1110
< size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
---
> size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_decompress.c b/vendor/github.com/DataDog/zstd/zstd_decompress.c
91,94c91
< size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
< ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
< ZSTD_FRAMEHEADERSIZE_PREFIX;
< ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
---
> size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
379c376
< while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
---
> while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
577c574
< insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
---
> * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
579a577
> DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
631c629
< remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
---
> remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
635c633,634
< { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
---
> { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
> ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
716c715
< while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
---
> while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
911a911
> RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
955a956
> RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
1098c1099
< RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
---
> RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
1267c1268
< if (dict && dictSize >= 8) {
---
> if (dict && dictSize != 0) {
1300c1301
< * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
---
> * return : expected size, aka ZSTD_startingInputLength().
1307c1308
< return ZSTD_FRAMEHEADERSIZE_PREFIX;
---
> return ZSTD_startingInputLength(zds->format);
1324c1325
< return ZSTD_FRAMEHEADERSIZE_PREFIX;
---
> return ZSTD_startingInputLength(dctx->format);
1328c1329
< * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
---
> * return : expected size, aka ZSTD_startingInputLength().
1333c1334
< return ZSTD_FRAMEHEADERSIZE_PREFIX;
---
> return ZSTD_startingInputLength(dctx->format);
1564c1565
< return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
---
> return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_double_fast.c b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
67a68
> /* presumes that, if there is a dictionary, it must be using Attach mode */
150c151
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
159c160
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
249c250
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
280c281
< ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
299c300
< ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
372,374c373
< const U32 maxDistance = 1U << cParams->windowLog;
< const U32 lowestValid = ms->window.lowLimit;
< const U32 lowLimit = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
---
> const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
415c414
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
426c425
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
451c450
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
483c482
< ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_fast.c b/vendor/github.com/DataDog/zstd/zstd_fast.c
11c11
< #include "zstd_compress_internal.h"
---
> #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
46,47c46,47
< FORCE_INLINE_TEMPLATE
< size_t ZSTD_compressBlock_fast_generic(
---
> FORCE_INLINE_TEMPLATE size_t
> ZSTD_compressBlock_fast_generic(
73a74
> DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
76,77c77
< {
< U32 const maxRep = (U32)(ip0 - prefixStart);
---
> { U32 const maxRep = (U32)(ip0 - prefixStart);
120,121c120
< {
< size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
---
> { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
140c139
< ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
152,154c151,152
< while ( (ip0 <= ilimit)
< && ( (offset_2>0)
< & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
---
> while ( ((ip0 <= ilimit) & (offset_2>0)) /* offset_2==0 means offset_2 is invalidated */
> && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
157c155
< U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
---
> { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
161c159
< ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
181,182c179
< ZSTD_compressionParameters const* cParams = &ms->cParams;
< U32 const mls = cParams->minMatch;
---
> U32 const mls = ms->cParams.minMatch;
241a239
> DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
266c264
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
286c284
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
301c299
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
326c324
< ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
349,350c347
< ZSTD_compressionParameters const* cParams = &ms->cParams;
< U32 const mls = cParams->minMatch;
---
> U32 const mls = ms->cParams.minMatch;
382,384c379
< const U32 maxDistance = 1U << cParams->windowLog;
< const U32 validLow = ms->window.lowLimit;
< const U32 lowLimit = (endIndex - validLow > maxDistance) ? endIndex - maxDistance : validLow;
---
> const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
394a390,391
> DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic");
>
409d405
< size_t mLength;
415,416c411,412
< const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
< mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
---
> const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
> size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
418c414,416
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
> ip += rLength;
> anchor = ip;
426,429c424,427
< { const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
< const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
< U32 offset;
< mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
---
> { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
> const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
> U32 const offset = current - matchIndex;
> size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
431,434c429,432
< offset = current - matchIndex;
< offset_2 = offset_1;
< offset_1 = offset;
< ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
---
> offset_2 = offset_1; offset_1 = offset; /* update offset history */
> ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
> ip += mLength;
> anchor = ip;
437,440d434
< /* found a match : store it */
< ip += mLength;
< anchor = ip;
<
449c443
< const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
---
> const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
454,455c448,449
< U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
< ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
---
> { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
> ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
477,478c471
< ZSTD_compressionParameters const* cParams = &ms->cParams;
< U32 const mls = cParams->minMatch;
---
> U32 const mls = ms->cParams.minMatch;
diff -r --color a/vendor/github.com/DataDog/zstd/zstd.h b/vendor/github.com/DataDog/zstd/zstd.h
17a18
> #include <limits.h> /* INT_MAX */
74c75
< #define ZSTD_VERSION_RELEASE 1
---
> #define ZSTD_VERSION_RELEASE 4
199,201c200,206
< * Same as ZSTD_compress(), using an explicit ZSTD_CCtx
< * The function will compress at requested compression level,
< * ignoring any other parameter */
---
> * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
> * Important : in order to behave similarly to `ZSTD_compress()`,
> * this function compresses at requested compression level,
> * __ignoring any other parameter__ .
> * If any advanced parameter was set using the advanced API,
> * they will all be reset. Only `compressionLevel` remains.
> */
236c241
< * They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()
---
> * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
264,266c269,273
< * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
< * for more info (superseded-by-cdict). */
< ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
---
> * by the parameters used to construct the ZSTD_CDict.
> * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
> ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
> * Note that exact compression parameters are dynamically determined,
> * depending on both compression level and srcSize (when known).
270c277,280
< * Note 2 : setting a level sets all default values of other compression parameters */
---
> * Note 2 : setting a level resets all other compression parameters to default */
> /* Advanced compression parameters :
> * It's possible to pin down compression parameters to some specific values.
> * In which case, these values are no longer dynamically selected by the compressor */
271a282,284
> * This will set a memory budget for streaming decompression,
> * with larger values requiring more memory
> * and typically compressing more.
275c288
< * requires explicitly allowing such window size at decompression stage if using streaming. */
---
> * requires explicitly allowing such size at streaming decompression stage. */
286c299
< * This parameter is useless when using "fast" strategy.
---
> * This parameter is useless for "fast" strategy.
292c305
< * This parameter is useless when using "fast" and "dFast" strategies.
---
> * This parameter is useless for "fast" and "dFast" strategies.
347c360
< * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
---
> * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
366c379
< * The minimum size is automatically and transparently enforced */
---
> * The minimum size is automatically and transparently enforced. */
388a402
> * ZSTD_c_srcSizeHint
398a413
> ZSTD_c_experimentalParam7=1004
796,797c811,814
< * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
< * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
---
> * When compressing multiple messages or blocks using the same dictionary,
> * it's recommended to digest the dictionary only once, since it's a costly operation.
> * ZSTD_createCDict() will create a state from digesting a dictionary.
> * The resulting state can be used for future compression operations with very limited startup cost.
799,801c816,821
< * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
< * Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
< * Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
---
> * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
> * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
> * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
> * in which case the only thing that it transports is the @compressionLevel.
> * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
> * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
928c948
< * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
---
> * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
972c992
< * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
---
> * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
1017,1018c1037,1038
< #define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size required to query frame header size */
< #define ZSTD_FRAMEHEADERSIZE_MIN 6
---
> #define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
> #define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
1065a1086,1087
> #define ZSTD_SRCSIZEHINT_MIN 0
> #define ZSTD_SRCSIZEHINT_MAX INT_MAX
1075a1098,1115
> unsigned int matchPos; /* Match pos in dst */
> /* If seqDef.offset > 3, then this is seqDef.offset - 3
> * If seqDef.offset < 3, then this is the corresponding repeat offset
> * But if seqDef.offset < 3 and litLength == 0, this is the
> * repeat offset before the corresponding repeat offset
> * And if seqDef.offset == 3 and litLength == 0, this is the
> * most recent repeat offset - 1
> */
> unsigned int offset;
> unsigned int litLength; /* Literal length */
> unsigned int matchLength; /* Match length */
> /* 0 when seq not rep and seqDef.offset otherwise
> * when litLength == 0 this will be <= 4, otherwise <= 3 like normal
> */
> unsigned int rep;
> } ZSTD_Sequence;
>
> typedef struct {
1104c1144
< ZSTD_dlm_byRef = 1, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
---
> ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
1108,1116d1147
< /* Opened question : should we have a format ZSTD_f_auto ?
< * Today, it would mean exactly the same as ZSTD_f_zstd1.
< * But, in the future, should several formats become supported,
< * on the compression side, it would mean "default format".
< * On the decompression side, it would mean "automatic format detection",
< * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
< * Since meaning is a little different, another option could be to define different enums for compression and decompression.
< * This question could be kept for later, when there are actually multiple formats to support,
< * but there is also the question of pinning enum values, and pinning value `0` is especially important */
1118c1149
< ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
---
> ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
1129c1160
< * Zstd currently supports the use of a CDict in two ways:
---
> * Zstd currently supports the use of a CDict in three ways:
1144a1176,1181
> * - The CDict's tables are not used at all, and instead we use the working
> * context alone to reload the dictionary and use params based on the source
> * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
> * This method is effective when the dictionary sizes are very small relative
> * to the input size, and the input size is fairly large to begin with.
> *
1152a1190
> ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
1161c1199
< ZSTD_lcm_uncompressed = 2, /**< Always emit uncompressed literals. */
---
> ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */
1212a1251,1259
> /*! ZSTD_getSequences() :
> * Extract sequences from the sequence store
> * zc can be used to insert custom compression params.
> * This function invokes ZSTD_compress2
> * @return : number of sequences extracted
> */
> ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
> size_t outSeqsSize, const void* src, size_t srcSize);
>
1219,1226c1266,1282
< * These functions make it possible to estimate memory usage
< * of a future {D,C}Ctx, before its creation.
< * ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
< * It will also consider src size to be arbitrarily "large", which is worst case.
< * If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
< * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
< * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
< * Note : CCtx size estimation is only correct for single-threaded compression. */
---
> * These functions make it possible to estimate memory usage of a future
> * {D,C}Ctx, before its creation.
> *
> * ZSTD_estimateCCtxSize() will provide a budget large enough for any
> * compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
> * this estimate does not include space for a window buffer, so this estimate
> * is guaranteed to be enough for single-shot compressions, but not streaming
> * compressions. It will however assume the input may be arbitrarily large,
> * which is the worst case. If srcSize is known to always be small,
> * ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
> * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
> * ZSTD_getCParams() to create cParams from compressionLevel.
> * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
> * ZSTD_CCtxParams_setParameter().
> *
> * Note: only single-threaded compression is supported. This function will
> * return an error code if ZSTD_c_nbWorkers is >= 1. */
1337c1393,1394
< * and its content must remain unmodified throughout the lifetime of CDict. */
---
> * and its content must remain unmodified throughout the lifetime of CDict.
> * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
1364c1421,1423
< * Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */
---
> * Note : this function is now DEPRECATED.
> * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
> * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
1372c1431,1433
< * Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
---
> * Note : this function is now REDUNDANT.
> * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
> * This prototype will be marked as deprecated and generate compilation warning in some future version */
1443a1505,1510
> /* User's best guess of source size.
> * Hint is not valid when srcSizeHint == 0.
> * There is no guarantee that hint is close to actual source size,
> * but compression ratio may regress significantly if guess considerably underestimates */
> #define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
>
1615a1683
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1617c1685,1689
< ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
---
> ZSTDLIB_API size_t
> ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
> int compressionLevel,
> unsigned long long pledgedSrcSize);
>
1626c1698
< * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
---
> * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
1627a1700
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1629c1702,1706
< ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
---
> ZSTDLIB_API size_t
> ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
> const void* dict, size_t dictSize,
> int compressionLevel);
>
1633c1710,1713
< * ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
---
> * // Pseudocode: Set each zstd parameter and leave the rest as-is.
> * for ((param, value) : params) {
> * ZSTD_CCtx_setParameter(zcs, param, value);
> * }
1637,1641c1717,1727
< * pledgedSrcSize must be correct. If srcSize is not known at init time, use
< * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
< */
< ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
< ZSTD_parameters params, unsigned long long pledgedSrcSize);
---
> * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
> * pledgedSrcSize must be correct.
> * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
> */
> ZSTDLIB_API size_t
> ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
> const void* dict, size_t dictSize,
> ZSTD_parameters params,
> unsigned long long pledgedSrcSize);
>
1647a1734
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1649a1737
>
1651c1739
< * This function is deprecated, and is approximately equivalent to:
---
> * This function is DEPRECATED, and is approximately equivalent to:
1653c1741,1744
< * ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
---
> * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
> * for ((fParam, value) : fParams) {
> * ZSTD_CCtx_setParameter(zcs, fParam, value);
> * }
1659a1751
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1661c1753,1757
< ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
---
> ZSTDLIB_API size_t
> ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
> const ZSTD_CDict* cdict,
> ZSTD_frameParameters fParams,
> unsigned long long pledgedSrcSize);
1675a1772
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1720a1818
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1722a1821
>
1729a1829
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1731a1832
>
1737a1839
> * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
1911,1912c2013,2014
< Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
< User will have to take in charge required information to regenerate data, such as compressed and content sizes.
---
> Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
> But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
1923,1928c2025,2032
< + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
< Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
< - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
< In which case, nothing is produced into `dst` !
< + User must test for such outcome and deal directly with uncompressed data
< + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
---
> + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
> Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
> - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
> ===> In which case, nothing is produced into `dst` !
> + User __must__ test for such outcome and deal directly with uncompressed data
> + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
> Doing so would mess up with statistics history, leading to potential data corruption.
> + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_internal.h b/vendor/github.com/DataDog/zstd/zstd_internal.h
59,61c59,61
< * In debug modes, prints additional information. In order to do that
< * (particularly, printing the conditional that failed), this can't just wrap
< * RETURN_ERROR().
---
> * In debug modes, prints additional information.
> * In order to do that (particularly, printing the conditional that failed),
> * this can't just wrap RETURN_ERROR().
200,201c200,201
< #define WILDCOPY_OVERLENGTH 8
< #define VECLEN 16
---
> #define WILDCOPY_OVERLENGTH 32
> #define WILDCOPY_VECLEN 16
205c205
< ZSTD_overlap_src_before_dst,
---
> ZSTD_overlap_src_before_dst
210,236c210,215
< * custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
< MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
< void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
< {
< ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
< const BYTE* ip = (const BYTE*)src;
< BYTE* op = (BYTE*)dst;
< BYTE* const oend = op + length;
<
< assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
< if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
< do
< COPY8(op, ip)
< while (op < oend);
< }
< else {
< if ((length & 8) == 0)
< COPY8(op, ip);
< do {
< COPY16(op, ip);
< }
< while (op < oend);
< }
< }
<
< /*! ZSTD_wildcopy_16min() :
< * same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
---
> * Custom version of memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
> * @param ovtype controls the overlap detection
> * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
> * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
> * The src buffer must be before the dst buffer.
> */
238c217
< void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
---
> void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
245,246c224
< assert(length >= 8);
< assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
---
> assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
248,256c226,239
< if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
< do
< COPY8(op, ip)
< while (op < oend);
< }
< else {
< if ((length & 8) == 0)
< COPY8(op, ip);
< do {
---
> if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
> /* Handle short offset copies. */
> do {
> COPY8(op, ip)
> } while (op < oend);
> } else {
> assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
> /* Separate out the first two COPY16() calls because the copy length is
> * almost certain to be short, so the branches have different
> * probabilities.
> * On gcc-9 unrolling once is +1.6%, twice is +2%, thrice is +1.8%.
> * On clang-8 unrolling once is +1.4%, twice is +3.3%, thrice is +3%.
> */
> COPY16(op, ip);
258,259c241,246
< }
< while (op < oend);
---
> if (op >= oend) return;
> do {
> COPY16(op, ip);
> COPY16(op, ip);
> }
> while (op < oend);
263,272d249
< MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
< {
< const BYTE* ip = (const BYTE*)src;
< BYTE* op = (BYTE*)dst;
< BYTE* const oend = (BYTE*)dstEnd;
< do
< COPY8(op, ip)
< while (op < oend);
< }
<
326c303,305
< return 31 - __builtin_clz(val);
---
> return __builtin_clz (val) ^ 31;
> # elif defined(__ICCARM__) /* IAR Intrinsic */
> return 31 - __CLZ(val);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_lazy.c b/vendor/github.com/DataDog/zstd/zstd_lazy.c
245,247c245
< U32 const maxDistance = 1U << cParams->windowLog;
< U32 const windowValid = ms->window.lowLimit;
< U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
---
> U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
500,501c498,501
< const U32 lowValid = ms->window.lowLimit;
< const U32 lowLimit = (current - lowValid > maxDistance) ? current - maxDistance : lowValid;
---
> const U32 lowestValid = ms->window.lowLimit;
> const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
> const U32 isDictionary = (ms->loadedDictEnd != 0);
> const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
622,623c622,625
< FORCE_INLINE_TEMPLATE
< size_t ZSTD_compressBlock_lazy_generic(
---
> typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
>
> FORCE_INLINE_TEMPLATE size_t
> ZSTD_compressBlock_lazy_generic(
627c629
< const U32 searchMethod, const U32 depth,
---
> const searchMethod_e searchMethod, const U32 depth,
643,644c645,648
< (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
< (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
---
> (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS
> : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
> (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS
> : ZSTD_HcFindBestMatch_selectMLS);
809c813
< ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
827c831
< ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
842c846
< ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
853c857
< return iend - anchor;
---
> return (size_t)(iend - anchor);
861c865
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
868c872
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
875c879
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
882c886
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
889c893
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
896c900
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
903c907
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
910c914
< return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
---
> return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
919c923
< const U32 searchMethod, const U32 depth)
---
> const searchMethod_e searchMethod, const U32 depth)
937c941
< searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
---
> searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
1050c1054
< ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
1065c1069
< ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
---
> ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
1078c1082
< return iend - anchor;
---
> return (size_t)(iend - anchor);
1086c1090
< return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
---
> return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
1094c1098
< return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
---
> return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
1102c1106
< return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
---
> return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
1110c1114
< return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
---
> return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_ldm.c b/vendor/github.com/DataDog/zstd/zstd_ldm.c
52,54c52,54
< size_t const ldmBucketSize =
< ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
< size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
---
> size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
> size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
> + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
586c586
< ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
---
> ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
diff -r --color a/vendor/github.com/DataDog/zstd/zstdmt_compress.c b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
671c671
< size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
---
> size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
683c683
< jobParams, pledgedSrcSize);
---
> &jobParams, pledgedSrcSize);
929a930,933
> /* Copy the mutex/cond out */
> ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
> ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
>
932,933c936,940
< mtctx->jobs[jobID].dstBuff = g_nullBuffer;
< mtctx->jobs[jobID].cSize = 0;
---
>
> /* Clear the job description, but keep the mutex/cond */
> memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
> mtctx->jobs[jobID].job_mutex = mutex;
> mtctx->jobs[jobID].job_cond = cond;
935d941
< memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
1031c1037
< static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
---
> static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
1033c1039
< ZSTD_CCtx_params jobParams = params;
---
> ZSTD_CCtx_params jobParams = *params;
1154c1160
< static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
---
> static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
1157c1163
< if (params.ldmParams.enableLdm) {
---
> if (params->ldmParams.enableLdm) {
1161c1167
< jobLog = MAX(21, params.cParams.chainLog + 4);
---
> jobLog = MAX(21, params->cParams.chainLog + 4);
1163c1169
< jobLog = MAX(20, params.cParams.windowLog + 2);
---
> jobLog = MAX(20, params->cParams.windowLog + 2);
1196c1202
< static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
---
> static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
1198,1199c1204,1205
< int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
< int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
---
> int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
> int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
1201c1207
< if (params.ldmParams.enableLdm) {
---
> if (params->ldmParams.enableLdm) {
1206c1212
< ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
---
> ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1210c1216
< DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
---
> DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
1216c1222
< ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
---
> ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)
1239,1241c1245,1247
< ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
< size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
< unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
---
> ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(&params);
> size_t const overlapSize = ZSTDMT_computeOverlapSize(&params);
> unsigned const nbJobs = ZSTDMT_computeNbJobs(&params, srcSize, params.nbWorkers);
1259c1265
< return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
---
> return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);
1407c1413
< ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
---
> ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(&params);
1412c1418
< singleThreadParams, pledgedSrcSize);
---
> &singleThreadParams, pledgedSrcSize);
1438c1444
< mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
---
> mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
1442c1448
< mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
---
> mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_opt.c b/vendor/github.com/DataDog/zstd/zstd_opt.c
555d554
< U32 const maxDistance = 1U << cParams->windowLog;
572,573c571
< U32 const windowValid = ms->window.lowLimit;
< U32 const windowLow = ((current - windowValid) > maxDistance) ? current - maxDistance : windowValid;
---
> U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
677d674
< size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
678a676
> size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
683a682
> if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
686a686
> assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
689c689
< match = base + matchIndex; /* prepare for match[matchLength] */
---
> match = base + matchIndex; /* prepare for match[matchLength] read */
1101c1101
< ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
---
> ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_stream.go b/vendor/github.com/DataDog/zstd/zstd_stream.go
5c5
< #define ZBUFF_DISABLE_DEPRECATE_WARNINGS
---
> #include "stdint.h" // for uintptr_t
7c7,49
< #include "zbuff.h"
---
>
> typedef struct compressStream2_result_s {
> size_t return_code;
> size_t bytes_consumed;
> size_t bytes_written;
> } compressStream2_result;
>
> static void ZSTD_compressStream2_wrapper(compressStream2_result* result, ZSTD_CCtx* ctx, uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize) {
> ZSTD_outBuffer outBuffer = { (void*)dst, maxDstSize, 0 };
> ZSTD_inBuffer inBuffer = { (void*)src, srcSize, 0 };
> size_t retCode = ZSTD_compressStream2(ctx, &outBuffer, &inBuffer, ZSTD_e_continue);
>
> result->return_code = retCode;
> result->bytes_consumed = inBuffer.pos;
> result->bytes_written = outBuffer.pos;
> }
>
> static void ZSTD_compressStream2_finish(compressStream2_result* result, ZSTD_CCtx* ctx, uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize) {
> ZSTD_outBuffer outBuffer = { (void*)dst, maxDstSize, 0 };
> ZSTD_inBuffer inBuffer = { (void*)src, srcSize, 0 };
> size_t retCode = ZSTD_compressStream2(ctx, &outBuffer, &inBuffer, ZSTD_e_end);
>
> result->return_code = retCode;
> result->bytes_consumed = inBuffer.pos;
> result->bytes_written = outBuffer.pos;
> }
>
> // decompressStream2_result is the same as compressStream2_result, but keep 2 separate struct for easier changes
> typedef struct decompressStream2_result_s {
> size_t return_code;
> size_t bytes_consumed;
> size_t bytes_written;
> } decompressStream2_result;
>
> static void ZSTD_decompressStream_wrapper(decompressStream2_result* result, ZSTD_DCtx* ctx, uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize) {
> ZSTD_outBuffer outBuffer = { (void*)dst, maxDstSize, 0 };
> ZSTD_inBuffer inBuffer = { (void*)src, srcSize, 0 };
> size_t retCode = ZSTD_decompressStream(ctx, &outBuffer, &inBuffer);
>
> result->return_code = retCode;
> result->bytes_consumed = inBuffer.pos;
> result->bytes_written = outBuffer.pos;
> }
14a57
> "sync"
18a62
> var errReaderClosed = errors.New("Reader is closed")
25a70
> srcBuffer []byte
28a74
> resultBuffer *C.compressStream2_result
63c109
< ctx := C.ZSTD_createCCtx()
---
> ctx := C.ZSTD_createCStream()
65,70c111,113
< if dict == nil {
< err = getError(int(C.ZSTD_compressBegin(ctx,
< C.int(level))))
< } else {
< err = getError(int(C.ZSTD_compressBegin_usingDict(
< ctx,
---
> // Load dictionnary if any
> if dict != nil {
> err = getError(int(C.ZSTD_CCtx_loadDictionary(ctx,
73c116,121
< C.int(level))))
---
> )))
> }
>
> if err == nil {
> // Only set level if the ctx is not in error already
> err = getError(int(C.ZSTD_CCtx_setParameter(ctx, C.ZSTD_c_compressionLevel, C.int(level))))
79a128
> srcBuffer: make([]byte, 0),
82a132
> resultBuffer: new(C.compressStream2_result),
94a145
> w.dstBuffer = w.dstBuffer[0:cap(w.dstBuffer)]
99c150,164
< retCode := C.ZSTD_compressContinue(
---
> // Do not do an extra memcopy if zstd ingest all input data
> srcData := p
> fastPath := len(w.srcBuffer) == 0
> if !fastPath {
> w.srcBuffer = append(w.srcBuffer, p...)
> srcData = w.srcBuffer
> }
>
> srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
> if len(srcData) > 0 {
> srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&srcData[0])))
> }
>
> C.ZSTD_compressStream2_wrapper(
> w.resultBuffer,
101c166
< unsafe.Pointer(&w.dstBuffer[0]),
---
> C.uintptr_t(uintptr(unsafe.Pointer(&w.dstBuffer[0]))),
103,106c168,173
< unsafe.Pointer(&p[0]),
< C.size_t(len(p)))
<
< if err := getError(int(retCode)); err != nil {
---
> srcPtr,
> C.size_t(len(srcData)),
> )
> runtime.KeepAlive(p) // Ensure p is kept until here so pointer doesn't disappear during C call
> ret := int(w.resultBuffer.return_code)
> if err := getError(ret); err != nil {
109d175
< written := int(retCode)
110a177,194
> consumed := int(w.resultBuffer.bytes_consumed)
> if !fastPath {
> w.srcBuffer = w.srcBuffer[consumed:]
> } else {
> remaining := len(p) - consumed
> if remaining > 0 {
> // We still have some non-consumed data, copy remaining data to srcBuffer
> // Try to not reallocate w.srcBuffer if we already have enough space
> if cap(w.srcBuffer) >= remaining {
> w.srcBuffer = w.srcBuffer[0:remaining]
> } else {
> w.srcBuffer = make([]byte, remaining)
> }
> copy(w.srcBuffer, p[consumed:])
> }
> }
>
> written := int(w.resultBuffer.bytes_written)
125,133c209,210
< retCode := C.ZSTD_compressEnd(
< w.ctx,
< unsafe.Pointer(&w.dstBuffer[0]),
< C.size_t(len(w.dstBuffer)),
< unsafe.Pointer(nil),
< C.size_t(0))
<
< if err := getError(int(retCode)); err != nil {
< return err
---
> if w.firstError != nil {
> return w.firstError
135,136d211
< written := int(retCode)
< retCode = C.ZSTD_freeCCtx(w.ctx) // Safely close buffer before writing the end
138,140c213,218
< if err := getError(int(retCode)); err != nil {
< return err
< }
---
> ret := 1 // So we loop at least once
> for ret > 0 {
> srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
> if len(w.srcBuffer) > 0 {
> srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&w.srcBuffer[0])))
> }
142,144c220,241
< _, err := w.underlyingWriter.Write(w.dstBuffer[:written])
< if err != nil {
< return err
---
> C.ZSTD_compressStream2_finish(
> w.resultBuffer,
> w.ctx,
> C.uintptr_t(uintptr(unsafe.Pointer(&w.dstBuffer[0]))),
> C.size_t(len(w.dstBuffer)),
> srcPtr,
> C.size_t(len(w.srcBuffer)),
> )
> ret = int(w.resultBuffer.return_code)
> if err := getError(ret); err != nil {
> return err
> }
> w.srcBuffer = w.srcBuffer[w.resultBuffer.bytes_consumed:]
> written := int(w.resultBuffer.bytes_written)
> w.underlyingWriter.Write(w.dstBuffer[:written])
>
> if ret > 0 { // We have a hint if we need to resize the dstBuffer
> w.dstBuffer = w.dstBuffer[:cap(w.dstBuffer)]
> if len(w.dstBuffer) < ret {
> w.dstBuffer = make([]byte, ret)
> }
> }
146c243,286
< return nil
---
>
> return getError(int(C.ZSTD_freeCStream(w.ctx)))
> }
>
> // cSize is the recommended size of reader.compressionBuffer. This func and
> // invocation allow for a one-time check for validity.
> var cSize = func() int {
> v := int(C.ZSTD_DStreamInSize())
> if v <= 0 {
> panic(fmt.Errorf("ZSTD_DStreamInSize() returned invalid size: %v", v))
> }
> return v
> }()
>
> // dSize is the recommended size of reader.decompressionBuffer. This func and
> // invocation allow for a one-time check for validity.
> var dSize = func() int {
> v := int(C.ZSTD_DStreamOutSize())
> if v <= 0 {
> panic(fmt.Errorf("ZSTD_DStreamOutSize() returned invalid size: %v", v))
> }
> return v
> }()
>
> // cPool is a pool of buffers for use in reader.compressionBuffer. Buffers are
> // taken from the pool in NewReaderDict, returned in reader.Close(). Returns a
> // pointer to a slice to avoid the extra allocation of returning the slice as a
> // value.
> var cPool = sync.Pool{
> New: func() interface{} {
> buff := make([]byte, cSize)
> return &buff
> },
> }
>
> // dPool is a pool of buffers for use in reader.decompressionBuffer. Buffers are
> // taken from the pool in NewReaderDict, returned in reader.Close(). Returns a
> // pointer to a slice to avoid the extra allocation of returning the slice as a
> // value.
> var dPool = sync.Pool{
> New: func() interface{} {
> buff := make([]byte, dSize)
> return &buff
> },
151c291
< ctx *C.ZBUFF_DCtx
---
> ctx *C.ZSTD_DCtx
159a300
> resultBuffer *C.decompressStream2_result
175c316
< ctx := C.ZBUFF_createDCtx()
---
> ctx := C.ZSTD_createDStream()
177c318
< err = getError(int(C.ZBUFF_decompressInit(ctx)))
---
> err = getError(int(C.ZSTD_initDStream(ctx)))
179,190c320,327
< err = getError(int(C.ZBUFF_decompressInitDictionary(
< ctx,
< unsafe.Pointer(&dict[0]),
< C.size_t(len(dict)))))
< }
< cSize := int(C.ZBUFF_recommendedDInSize())
< dSize := int(C.ZBUFF_recommendedDOutSize())
< if cSize <= 0 {
< panic(fmt.Errorf("ZBUFF_recommendedDInSize() returned invalid size: %v", cSize))
< }
< if dSize <= 0 {
< panic(fmt.Errorf("ZBUFF_recommendedDOutSize() returned invalid size: %v", dSize))
---
> err = getError(int(C.ZSTD_DCtx_reset(ctx, C.ZSTD_reset_session_only)))
> if err == nil {
> // Only load dictionary if we succesfully inited the context
> err = getError(int(C.ZSTD_DCtx_loadDictionary(
> ctx,
> unsafe.Pointer(&dict[0]),
> C.size_t(len(dict)))))
> }
192,194c329,330
<
< compressionBuffer := make([]byte, cSize)
< decompressionBuffer := make([]byte, dSize)
---
> compressionBufferP := cPool.Get().(*[]byte)
> decompressionBufferP := dPool.Get().(*[]byte)
198,199c334,335
< compressionBuffer: compressionBuffer,
< decompressionBuffer: decompressionBuffer,
---
> compressionBuffer: *compressionBufferP,
> decompressionBuffer: *decompressionBufferP,
201a338
> resultBuffer: new(C.decompressStream2_result),
208c345,358
< return getError(int(C.ZBUFF_freeDCtx(r.ctx)))
---
> if r.firstError != nil {
> return r.firstError
> }
>
> cb := r.compressionBuffer
> db := r.decompressionBuffer
> // Ensure that we won't resuse buffer
> r.firstError = errReaderClosed
> r.compressionBuffer = nil
> r.decompressionBuffer = nil
>
> cPool.Put(&cb)
> dPool.Put(&db)
> return getError(int(C.ZSTD_freeDStream(r.ctx)))
211a362,364
> if r.firstError != nil {
> return 0, r.firstError
> }
238,240c391,397
< cSrcSize := C.size_t(len(src))
< cDstSize := C.size_t(len(r.decompressionBuffer))
< retCode := int(C.ZBUFF_decompressContinue(
---
> srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
> if len(src) > 0 {
> srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&src[0])))
> }
>
> C.ZSTD_decompressStream_wrapper(
> r.resultBuffer,
242,245c399,404
< unsafe.Pointer(&r.decompressionBuffer[0]),
< &cDstSize,
< unsafe.Pointer(&src[0]),
< &cSrcSize))
---
> C.uintptr_t(uintptr(unsafe.Pointer(&r.decompressionBuffer[0]))),
> C.size_t(len(r.decompressionBuffer)),
> srcPtr,
> C.size_t(len(src)),
> )
> retCode := int(r.resultBuffer.return_code)
247c406
< // Keep src here eventhough, we reuse later, the code might be deleted at some point
---
> // Keep src here eventhough we reuse later, the code might be deleted at some point
254,255c413,415
< if int(cSrcSize) < len(src) {
< left := src[int(cSrcSize):]
---
> bytesConsumed := int(r.resultBuffer.bytes_consumed)
> if bytesConsumed < len(src) {
> left := src[bytesConsumed:]
258,259c418,419
< r.compressionLeft = len(src) - int(cSrcSize)
< r.decompSize = int(cDstSize)
---
> r.compressionLeft = len(src) - int(bytesConsumed)
> r.decompSize = int(r.resultBuffer.bytes_written)
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v01.c b/vendor/github.com/DataDog/zstd/zstd_v01.c
349c349
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v02.c b/vendor/github.com/DataDog/zstd/zstd_v02.c
356c356
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
2891a2892
> if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v03.c b/vendor/github.com/DataDog/zstd/zstd_v03.c
359c359
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
2532a2533
> if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v04.c b/vendor/github.com/DataDog/zstd/zstd_v04.c
630c630
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
2657a2658
> if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
3036a3038,3040
> size_t litCSize;
>
> if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);
3039c3043
< size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
---
> litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v05.c b/vendor/github.com/DataDog/zstd/zstd_v05.c
221,225d220
< MEM_STATIC U32 MEM_readLE24(const void* memPtr)
< {
< return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
< }
<
764c759
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
3161,3164c3156,3163
< else if (dumps + 3 <= de) {
< litLength = MEM_readLE24(dumps);
< if (litLength&1) litLength>>=1, dumps += 3;
< else litLength = (U16)(litLength)>>1, dumps += 2;
---
> else if (dumps + 2 <= de) {
> litLength = MEM_readLE16(dumps);
> dumps += 2;
> if ((litLength & 1) && dumps < de) {
> litLength += *dumps << 16;
> dumps += 1;
> }
> litLength>>=1;
3194,3197c3193,3200
< else if (dumps + 3 <= de) {
< matchLength = MEM_readLE24(dumps);
< if (matchLength&1) matchLength>>=1, dumps += 3;
< else matchLength = (U16)(matchLength)>>1, dumps += 2;
---
> else if (dumps + 2 <= de) {
> matchLength = MEM_readLE16(dumps);
> dumps += 2;
> if ((matchLength & 1) && dumps < de) {
> matchLength += *dumps << 16;
> dumps += 1;
> }
> matchLength >>= 1;
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v06.c b/vendor/github.com/DataDog/zstd/zstd_v06.c
863c863
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
diff -r --color a/vendor/github.com/DataDog/zstd/zstd_v07.c b/vendor/github.com/DataDog/zstd/zstd_v07.c
533c533
< return 31 - __builtin_clz (val);
---
> return __builtin_clz (val) ^ 31;
diff -r --color a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
5d4
< "regexp"
29c28
< type unitMap map[string]int64
---
> type unitMap map[byte]int64
32,34c31,32
< decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
< binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
< sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
---
> decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB}
> binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB}
37,38c35,38
< var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
< var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
---
> var (
> decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
> binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
> )
92,93c92,96
< matches := sizeRegex.FindStringSubmatch(sizeStr)
< if len(matches) != 4 {
---
> // TODO: rewrite to use strings.Cut if there's a space
> // once Go < 1.18 is deprecated.
> sep := strings.LastIndexAny(sizeStr, "01234567890. ")
> if sep == -1 {
> // There should be at least a digit.
95a99,107
> var num, sfx string
> if sizeStr[sep] != ' ' {
> num = sizeStr[:sep+1]
> sfx = sizeStr[sep+1:]
> } else {
> // Omit the space separator.
> num = sizeStr[:sep]
> sfx = sizeStr[sep+1:]
> }
97c109
< size, err := strconv.ParseFloat(matches[1], 64)
---
> size, err := strconv.ParseFloat(num, 64)
100a113,120
> // Backward compatibility: reject negative sizes.
> if size < 0 {
> return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
> }
>
> if len(sfx) == 0 {
> return int64(size), nil
> }
102,103c122,136
< unitPrefix := strings.ToLower(matches[3])
< if mul, ok := uMap[unitPrefix]; ok {
---
> // Process the suffix.
>
> if len(sfx) > 3 { // Too long.
> goto badSuffix
> }
> sfx = strings.ToLower(sfx)
> // Trivial case: b suffix.
> if sfx[0] == 'b' {
> if len(sfx) > 1 { // no extra characters allowed after b.
> goto badSuffix
> }
> return int64(size), nil
> }
> // A suffix from the map.
> if mul, ok := uMap[sfx[0]]; ok {
104a138,147
> } else {
> goto badSuffix
> }
>
> // The suffix may have extra "b" or "ib" (e.g. KiB or MB).
> switch {
> case len(sfx) == 2 && sfx[1] != 'b':
> goto badSuffix
> case len(sfx) == 3 && sfx[1:] != "ib":
> goto badSuffix
107a151,153
>
> badSuffix:
> return -1, fmt.Errorf("invalid suffix: '%s'", sfx)
diff -r --color a/vendor/github.com/filecoin-project/filecoin-ffi/cgo/fvm.go b/vendor/github.com/filecoin-project/filecoin-ffi/cgo/fvm.go
11c11
< func CreateFvmMachine(fvmVersion FvmRegisteredVersion, chainEpoch, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, manifestCid SliceRefUint8, tracing bool, blockstoreId, externsId uint64) (*FvmMachine, error) {
---
> func CreateFvmMachine(fvmVersion FvmRegisteredVersion, chainEpoch, chainTimestamp, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, manifestCid SliceRefUint8, tracing bool, blockstoreId, externsId uint64) (*FvmMachine, error) {
14a15
> C.uint64_t(chainTimestamp),
38c39
< func CreateFvmDebugMachine(fvmVersion FvmRegisteredVersion, chainEpoch, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, actorRedirect SliceRefUint8, tracing bool, blockstoreId, externsId uint64) (*FvmMachine, error) {
---
> func CreateFvmDebugMachine(fvmVersion FvmRegisteredVersion, chainEpoch, chainTimestamp, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, actorRedirect SliceRefUint8, tracing bool, blockstoreId, externsId uint64) (*FvmMachine, error) {
41a43
> C.uint64_t(chainTimestamp),
diff -r --color a/vendor/github.com/filecoin-project/filecoin-ffi/cgo/types.go b/vendor/github.com/filecoin-project/filecoin-ffi/cgo/types.go
112a113,114
> Events []byte
> EventsRoot []byte
615d616
<
671a673,674
> Events: r.events.copy(),
> EventsRoot: r.events_root.copy(),
diff -r --color a/vendor/github.com/filecoin-project/filecoin-ffi/filcrypto.h b/vendor/github.com/filecoin-project/filecoin-ffi/filcrypto.h
201,236d200
< typedef enum FvmError
< #else
< typedef int32_t FvmError_t; enum
< #endif
< {
< /** \brief
< * The error code returned by cgo if the blockstore handle isn't valid.
< */
< FVM_ERROR_INVALID_HANDLE = -1,
< /** \brief
< * The error code returned by cgo when the block isn't found.
< */
< FVM_ERROR_NOT_FOUND = -2,
< /** \brief
< * The error code returned by cgo when there's some underlying system error.
< */
< FVM_ERROR_IO = -3,
< /** \brief
< * The error code returned by cgo when an argument is invalid.
< */
< FVM_ERROR_INVALID_ARGUMENT = -4,
< /** \brief
< * The error code returned by cgo when the application panics.
< */
< FVM_ERROR_PANIC = -5,
< }
< #ifdef DOXYGEN
< FvmError_t
< #endif
< ;
<
< void dummy (
< FvmError_t _error);
<
< /** \remark Has the same ABI as `int32_t` **/
< #ifdef DOXYGEN
335a300,493
> /** \remark Has the same ABI as `uint8_t` **/
> #ifdef DOXYGEN
> typedef enum FvmRegisteredVersion
> #else
> typedef uint8_t FvmRegisteredVersion_t; enum
> #endif
> {
> /** . */
> FVM_REGISTERED_VERSION_V1,
> }
> #ifdef DOXYGEN
> FvmRegisteredVersion_t
> #endif
> ;
>
> typedef struct InnerFvmMachine InnerFvmMachine_t;
>
> typedef struct Result_InnerFvmMachine_ptr {
>
> FCPResponseStatus_t status_code;
>
> slice_boxed_uint8_t error_msg;
>
> InnerFvmMachine_t * value;
>
> } Result_InnerFvmMachine_ptr_t;
>
> /** \brief
> * Note: the incoming args as u64 and odd conversions to i32/i64
> * for some types is due to the generated bindings not liking the
> * 32bit types as incoming args
> *
> */
> Result_InnerFvmMachine_ptr_t * create_fvm_machine (
> FvmRegisteredVersion_t fvm_version,
> uint64_t chain_epoch,
> uint64_t chain_timestamp,
> uint64_t base_fee_hi,
> uint64_t base_fee_lo,
> uint64_t base_circ_supply_hi,
> uint64_t base_circ_supply_lo,
> uint64_t network_version,
> slice_ref_uint8_t state_root,
> slice_ref_uint8_t manifest_cid,
> bool tracing,
> uint64_t blockstore_id,
> uint64_t externs_id);
>
> Result_InnerFvmMachine_ptr_t * create_fvm_debug_machine (
> FvmRegisteredVersion_t fvm_version,
> uint64_t chain_epoch,
> uint64_t chain_timestamp,
> uint64_t base_fee_hi,
> uint64_t base_fee_lo,
> uint64_t base_circ_supply_hi,
> uint64_t base_circ_supply_lo,
> uint64_t network_version,
> slice_ref_uint8_t state_root,
> slice_ref_uint8_t actor_redirect,
> bool tracing,
> uint64_t blockstore_id,
> uint64_t externs_id);
>
> typedef struct FvmMachineExecuteResponse {
>
> uint64_t exit_code;
>
> slice_boxed_uint8_t return_val;
>
> uint64_t gas_used;
>
> uint64_t penalty_hi;
>
> uint64_t penalty_lo;
>
> uint64_t miner_tip_hi;
>
> uint64_t miner_tip_lo;
>
> uint64_t base_fee_burn_hi;
>
> uint64_t base_fee_burn_lo;
>
> uint64_t over_estimation_burn_hi;
>
> uint64_t over_estimation_burn_lo;
>
> uint64_t refund_hi;
>
> uint64_t refund_lo;
>
> int64_t gas_refund;
>
> int64_t gas_burned;
>
> slice_boxed_uint8_t exec_trace;
>
> slice_boxed_uint8_t failure_info;
>
> slice_boxed_uint8_t events;
>
> slice_boxed_uint8_t events_root;
>
> } FvmMachineExecuteResponse_t;
>
> typedef struct Result_FvmMachineExecuteResponse {
>
> FCPResponseStatus_t status_code;
>
> slice_boxed_uint8_t error_msg;
>
> FvmMachineExecuteResponse_t value;
>
> } Result_FvmMachineExecuteResponse_t;
>
> Result_FvmMachineExecuteResponse_t * fvm_machine_execute_message (
> InnerFvmMachine_t const * executor,
> slice_ref_uint8_t message,
> uint64_t chain_len,
> uint64_t apply_kind);
>
> typedef struct Result_slice_boxed_uint8 {
>
> FCPResponseStatus_t status_code;
>
> slice_boxed_uint8_t error_msg;
>
> slice_boxed_uint8_t value;
>
> } Result_slice_boxed_uint8_t;
>
> Result_slice_boxed_uint8_t * fvm_machine_flush (
> InnerFvmMachine_t const * executor);
>
> /** \brief
> * Destroys the passed in `repr_c::Box<$type>`.
> */
> void drop_fvm_machine (
> InnerFvmMachine_t * ptr);
>
> /** \brief
> * Destroys the passed in `repr_c::Box<$type>`.
> */
> void destroy_create_fvm_machine_response (
> Result_InnerFvmMachine_ptr_t * ptr);
>
> /** \brief
> * Destroys the passed in `repr_c::Box<$type>`.
> */
> void destroy_fvm_machine_execute_response (
> Result_FvmMachineExecuteResponse_t * ptr);
>
> /** \brief
> * Destroys the passed in `repr_c::Box<$type>`.
> */
> void destroy_fvm_machine_flush_response (
> Result_slice_boxed_uint8_t * ptr);
>
> /** \remark Has the same ABI as `int32_t` **/
> #ifdef DOXYGEN
> typedef enum FvmError
> #else
> typedef int32_t FvmError_t; enum
> #endif
> {
> /** \brief
> * The error code returned by cgo if the blockstore handle isn't valid.
> */
> FVM_ERROR_INVALID_HANDLE = -1,
> /** \brief
> * The error code returned by cgo when the block isn't found.
> */
> FVM_ERROR_NOT_FOUND = -2,
> /** \brief
> * The error code returned by cgo when there's some underlying system error.
> */
> FVM_ERROR_IO = -3,
> /** \brief
> * The error code returned by cgo when an argument is invalid.
> */
> FVM_ERROR_INVALID_ARGUMENT = -4,
> /** \brief
> * The error code returned by cgo when the application panics.
> */
> FVM_ERROR_PANIC = -5,
> }
> #ifdef DOXYGEN
> FvmError_t
> #endif
> ;
>
> void dummy (
> FvmError_t _error);
>
514,523d671
< typedef struct Result_slice_boxed_uint8 {
<
< FCPResponseStatus_t status_code;
<
< slice_boxed_uint8_t error_msg;
<
< slice_boxed_uint8_t value;
<
< } Result_slice_boxed_uint8_t;
<
1636,1777d1783
<
< /** \remark Has the same ABI as `uint8_t` **/
< #ifdef DOXYGEN
< typedef enum FvmRegisteredVersion
< #else
< typedef uint8_t FvmRegisteredVersion_t; enum
< #endif
< {
< /** . */
< FVM_REGISTERED_VERSION_V1,
< }
< #ifdef DOXYGEN
< FvmRegisteredVersion_t
< #endif
< ;
<
< typedef struct InnerFvmMachine InnerFvmMachine_t;
<
< typedef struct Result_InnerFvmMachine_ptr {
<
< FCPResponseStatus_t status_code;
<
< slice_boxed_uint8_t error_msg;
<
< InnerFvmMachine_t * value;
<
< } Result_InnerFvmMachine_ptr_t;
<
< /** \brief
< * Note: the incoming args as u64 and odd conversions to i32/i64
< * for some types is due to the generated bindings not liking the
< * 32bit types as incoming args
< *
< */
< Result_InnerFvmMachine_ptr_t * create_fvm_machine (
< FvmRegisteredVersion_t fvm_version,
< uint64_t chain_epoch,
< uint64_t base_fee_hi,
< uint64_t base_fee_lo,
< uint64_t base_circ_supply_hi,
< uint64_t base_circ_supply_lo,
< uint64_t network_version,
< slice_ref_uint8_t state_root,
< slice_ref_uint8_t manifest_cid,
< bool tracing,
< uint64_t blockstore_id,
< uint64_t externs_id);
<
< Result_InnerFvmMachine_ptr_t * create_fvm_debug_machine (
< FvmRegisteredVersion_t fvm_version,
< uint64_t chain_epoch,
< uint64_t base_fee_hi,
< uint64_t base_fee_lo,
< uint64_t base_circ_supply_hi,
< uint64_t base_circ_supply_lo,
< uint64_t network_version,
< slice_ref_uint8_t state_root,
< slice_ref_uint8_t actor_redirect,
< bool tracing,
< uint64_t blockstore_id,
< uint64_t externs_id);
<
< typedef struct FvmMachineExecuteResponse {
<
< uint64_t exit_code;
<
< slice_boxed_uint8_t return_val;
<
< uint64_t gas_used;
<
< uint64_t penalty_hi;
<
< uint64_t penalty_lo;
<
< uint64_t miner_tip_hi;
<
< uint64_t miner_tip_lo;
<
< uint64_t base_fee_burn_hi;
<
< uint64_t base_fee_burn_lo;
<
< uint64_t over_estimation_burn_hi;
<
< uint64_t over_estimation_burn_lo;
<
< uint64_t refund_hi;
<
< uint64_t refund_lo;
<
< int64_t gas_refund;
<
< int64_t gas_burned;
<
< slice_boxed_uint8_t exec_trace;
<
< slice_boxed_uint8_t failure_info;
<
< } FvmMachineExecuteResponse_t;
<
< typedef struct Result_FvmMachineExecuteResponse {
<
< FCPResponseStatus_t status_code;
<
< slice_boxed_uint8_t error_msg;
<
< FvmMachineExecuteResponse_t value;
<
< } Result_FvmMachineExecuteResponse_t;
<
< Result_FvmMachineExecuteResponse_t * fvm_machine_execute_message (
< InnerFvmMachine_t const * executor,
< slice_ref_uint8_t message,
< uint64_t chain_len,
< uint64_t apply_kind);
<
< Result_slice_boxed_uint8_t * fvm_machine_flush (
< InnerFvmMachine_t const * executor);
<
< /** \brief
< * Destroys the passed in `repr_c::Box<$type>`.
< */
< void drop_fvm_machine (
< InnerFvmMachine_t * ptr);
<
< /** \brief
< * Destroys the passed in `repr_c::Box<$type>`.
< */
< void destroy_create_fvm_machine_response (
< Result_InnerFvmMachine_ptr_t * ptr);
<
< /** \brief
< * Destroys the passed in `repr_c::Box<$type>`.
< */
< void destroy_fvm_machine_execute_response (
< Result_FvmMachineExecuteResponse_t * ptr);
<
< /** \brief
< * Destroys the passed in `repr_c::Box<$type>`.
< */
< void destroy_fvm_machine_flush_response (
< Result_slice_boxed_uint8_t * ptr);
diff -r --color a/vendor/github.com/filecoin-project/filecoin-ffi/filcrypto.pc b/vendor/github.com/filecoin-project/filecoin-ffi/filcrypto.pc
2c2
< Version: f0a7de6991e037a7c355fdb62a0f1ae7fb7324e7
---
> Version: bad0c9c81dc8a1415f214f58e62c785dbf1c775f
diff -r --color a/vendor/github.com/filecoin-project/filecoin-ffi/fvm.go b/vendor/github.com/filecoin-project/filecoin-ffi/fvm.go
13a14
> "fmt"
38a40
> Timestamp uint64
65a68
> opts.Timestamp,
78a82
> opts.Timestamp,
86c90
< opts.Tracing,
---
> true,
129,142c133
< return &ApplyRet{
< Return: resp.ReturnVal,
< ExitCode: resp.ExitCode,
< GasUsed: int64(resp.GasUsed),
< MinerPenalty: reformBigInt(resp.PenaltyHi, resp.PenaltyLo),
< MinerTip: reformBigInt(resp.MinerTipHi, resp.MinerTipLo),
< BaseFeeBurn: reformBigInt(resp.BaseFeeBurnHi, resp.BaseFeeBurnLo),
< OverEstimationBurn: reformBigInt(resp.OverEstimationBurnHi, resp.OverEstimationBurnLo),
< Refund: reformBigInt(resp.RefundHi, resp.RefundLo),
< GasRefund: int64(resp.GasRefund),
< GasBurned: int64(resp.GasBurned),
< ExecTraceBytes: resp.ExecTrace,
< FailureInfo: resp.FailureInfo,
< }, nil
---
> return buildResponse(resp)
156a148,160
> return buildResponse(resp)
> }
>
> func buildResponse(resp cgo.FvmMachineExecuteResponseGo) (*ApplyRet, error) {
> var eventsRoot *cid.Cid
> if len(resp.EventsRoot) > 0 {
> if eventsRootCid, err := cid.Cast(resp.EventsRoot); err != nil {
> return nil, fmt.Errorf("failed to cast events root CID: %w", err)
> } else {
> eventsRoot = &eventsRootCid
> }
> }
>
169a174,175
> EventsRoot: eventsRoot,
> EventsBytes: resp.Events,
195a202,203
> EventsRoot *cid.Cid
> EventsBytes []byte
Binary files a/vendor/github.com/filecoin-project/filecoin-ffi/libfilcrypto.a and b/vendor/github.com/filecoin-project/filecoin-ffi/libfilcrypto.a differ
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go
55a56,59
> func (pi PieceInfo) Defined() bool {
> return pi.PieceCID.Defined() || len(pi.Deals) > 0
> }
>
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client.go
6d5
< "fmt"
259,266c258
< // Check if there's already an active retrieval deal with the same peer
< // for the same payload CID
< err := c.checkForActiveDeal(payloadCID, p.ID)
< if err != nil {
< return 0, err
< }
<
< err = c.addMultiaddrs(ctx, p)
---
> err := c.addMultiaddrs(ctx, p)
308,332d299
< }
<
< // Check if there's already an active retrieval deal with the same peer
< // for the same payload CID
< func (c *Client) checkForActiveDeal(payloadCID cid.Cid, pid peer.ID) error {
< var deals []retrievalmarket.ClientDealState
< err := c.stateMachines.List(&deals)
< if err != nil {
< return err
< }
<
< for _, deal := range deals {
< match := deal.Sender == pid && deal.PayloadCID == payloadCID
< active := !clientstates.IsFinalityState(deal.Status)
< if match && active {
< msg := fmt.Sprintf("there is an active retrieval deal with peer %s ", pid)
< msg += fmt.Sprintf("for payload CID %s ", payloadCID)
< msg += fmt.Sprintf("(retrieval deal ID %d, state %s) - ",
< deal.ID, retrievalmarket.DealStatuses[deal.Status])
< msg += "existing deal must be cancelled before starting a new retrieval deal"
< err := xerrors.Errorf(msg)
< return err
< }
< }
< return nil
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider_environments.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider_environments.go
5a6
> "fmt"
34,36c35,43
< storageDeals, err := pve.p.storageDealsForPiece(pieceCid != nil, payloadCid, piece)
< if err != nil {
< return retrievalmarket.Ask{}, xerrors.Errorf("failed to fetch deals for payload: %w", err)
---
> pieces, piecesErr := pve.p.getAllPieceInfoForPayload(payloadCid)
> // err may be non-nil, but we may have successfuly found >0 pieces, so defer error handling till
> // we have no other option.
> storageDeals := pve.p.getStorageDealsForPiece(pieceCid != nil, pieces, piece)
> if len(storageDeals) == 0 {
> if piecesErr != nil {
> return retrievalmarket.Ask{}, fmt.Errorf("failed to fetch deals for payload [%s]: %w", payloadCid.String(), piecesErr)
> }
> return retrievalmarket.Ask{}, fmt.Errorf("no storage deals found for payload [%s]", payloadCid.String())
57c64,74
< return pve.p.getPieceInfoFromCid(context.TODO(), c, inPieceCid)
---
> pieces, piecesErr := pve.p.getAllPieceInfoForPayload(c)
> // err may be non-nil, but we may have successfuly found >0 pieces, so defer error handling till
> // we have no other option.
> pieceInfo, isUnsealed := pve.p.getBestPieceInfoMatch(context.TODO(), pieces, inPieceCid)
> if pieceInfo.Defined() {
> return pieceInfo, isUnsealed, nil
> }
> if piecesErr != nil {
> return piecestore.PieceInfoUndefined, false, piecesErr
> }
> return piecestore.PieceInfoUndefined, false, fmt.Errorf("unknown pieceCID %s", pieceCID.String())
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider.go
34a35,42
> // MaxIdentityCIDBytes is the largest identity CID as a PayloadCID that we are
> // willing to decode
> const MaxIdentityCIDBytes = 2 << 10
>
> // MaxIdentityCIDLinks is the maximum number of links contained within an
> // identity CID that we are willing to check for matching pieces
> const MaxIdentityCIDLinks = 32
>
56a65
> subQueryEvt *pubsub.PubSub
126a136
> subQueryEvt: pubsub.New(queryEvtDispatcher),
251a262,267
> // SubscribeToValidationEvents subscribes to an event that is fired when the
> // provider validates a request for data
> func (p *Provider) SubscribeToValidationEvents(subscriber retrievalmarket.ProviderValidationSubscriber) retrievalmarket.Unsubscribe {
> return p.requestValidator.Subscribe(subscriber)
> }
>
277a294,312
> // SubscribeToQueryEvents subscribes to an event that is fired when a message
> // is received on the query protocol
> func (p *Provider) SubscribeToQueryEvents(subscriber retrievalmarket.ProviderQueryEventSubscriber) retrievalmarket.Unsubscribe {
> return retrievalmarket.Unsubscribe(p.subQueryEvt.Subscribe(subscriber))
> }
>
> func queryEvtDispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error {
> e, ok := evt.(retrievalmarket.ProviderQueryEvent)
> if !ok {
> return errors.New("wrong type of event")
> }
> cb, ok := subscriberFn.(retrievalmarket.ProviderQueryEventSubscriber)
> if !ok {
> return errors.New("wrong type of callback")
> }
> cb(e)
> return nil
> }
>
303a339,341
> msgEvt := retrievalmarket.ProviderQueryEvent{
> Response: resp,
> }
305c343,345
< log.Errorf("Retrieval query: writing query response: %s", err)
---
> err = fmt.Errorf("Retrieval query: writing query response: %w", err)
> log.Error(err)
> msgEvt.Error = err
306a347
> p.subQueryEvt.Publish(msgEvt)
319c360,362
< log.Errorf("Retrieval query: GetChainHead: %s", err)
---
> err = fmt.Errorf("Retrieval query: GetChainHead: %w", err)
> log.Error(err)
> p.subQueryEvt.Publish(retrievalmarket.ProviderQueryEvent{Error: err})
341,347c384,398
< pieceInfo, isUnsealed, err := p.getPieceInfoFromCid(ctx, query.PayloadCID, pieceCID)
< if err != nil {
< log.Errorf("Retrieval query: getPieceInfoFromCid: %s", err)
< if !xerrors.Is(err, retrievalmarket.ErrNotFound) {
< answer.Status = retrievalmarket.QueryResponseError
< answer.Message = fmt.Sprintf("failed to fetch piece to retrieve from: %s", err)
< } else {
---
>
> pieces, piecesErr := p.getAllPieceInfoForPayload(query.PayloadCID)
> // err may be non-nil, but we may have successfuly found >0 pieces, so defer error handling till
> // we have no other option.
>
> pieceInfo, isUnsealed := p.getBestPieceInfoMatch(ctx, pieces, pieceCID)
> if !pieceInfo.Defined() {
> if piecesErr != nil {
> log.Errorf("Retrieval query: getPieceInfoFromCid: %s", piecesErr)
> if !errors.Is(piecesErr, retrievalmarket.ErrNotFound) {
> answer.Status = retrievalmarket.QueryResponseError
> answer.Message = fmt.Sprintf("failed to fetch piece to retrieve from: %s", piecesErr)
> }
> }
> if answer.Message == "" {
350d400
<
359,360c409,411
< storageDeals, err := p.storageDealsForPiece(query.PieceCID != nil, query.PayloadCID, pieceInfo)
< if err != nil {
---
> storageDeals := p.getStorageDealsForPiece(query.PieceCID != nil, pieces, pieceInfo)
>
> if len(storageDeals) == 0 {
363c414,418
< answer.Message = fmt.Sprintf("failed to fetch storage deals containing payload: %s", err)
---
> if piecesErr != nil {
> answer.Message = fmt.Sprintf("failed to fetch storage deals containing payload [%s]: %s", query.PayloadCID.String(), piecesErr.Error())
> } else {
> answer.Message = fmt.Sprintf("failed to fetch storage deals containing payload [%s]", query.PayloadCID.String())
> }
391,522d445
< }
<
< // Given the CID of a block, find a piece that contains that block.
< // If the client has specified which piece they want, return that piece.
< // Otherwise prefer pieces that are already unsealed.
< func (p *Provider) getPieceInfoFromCid(ctx context.Context, payloadCID, clientPieceCID cid.Cid) (piecestore.PieceInfo, bool, error) {
< // Get all pieces that contain the target block
< piecesWithTargetBlock, err := p.dagStore.GetPiecesContainingBlock(payloadCID)
< if err != nil {
< return piecestore.PieceInfoUndefined, false, xerrors.Errorf("getting pieces for cid %s: %w", payloadCID, err)
< }
<
< // For each piece that contains the target block
< var lastErr error
< var sealedPieceInfo *piecestore.PieceInfo
< for _, pieceWithTargetBlock := range piecesWithTargetBlock {
< // Get the deals for the piece
< pieceInfo, err := p.pieceStore.GetPieceInfo(pieceWithTargetBlock)
< if err != nil {
< lastErr = err
< continue
< }
<
< // if client wants to retrieve the payload from a specific piece, just return that piece.
< if clientPieceCID.Defined() && pieceInfo.PieceCID.Equals(clientPieceCID) {
< return pieceInfo, p.pieceInUnsealedSector(ctx, pieceInfo), nil
< }
<
< // if client doesn't have a preference for a particular piece, prefer a piece
< // for which an unsealed sector exists.
< if clientPieceCID.Equals(cid.Undef) {
< if p.pieceInUnsealedSector(ctx, pieceInfo) {
< // The piece is in an unsealed sector, so just return it
< return pieceInfo, true, nil
< }
<
< if sealedPieceInfo == nil {
< // The piece is not in an unsealed sector, so save it but keep
< // checking other pieces to see if there is one that is in an
< // unsealed sector
< sealedPieceInfo = &pieceInfo
< }
< }
<
< }
<
< // Found a piece containing the target block, piece is in a sealed sector
< if sealedPieceInfo != nil {
< return *sealedPieceInfo, false, nil
< }
<
< // Couldn't find a piece containing the target block
< if lastErr == nil {
< lastErr = xerrors.Errorf("unknown pieceCID %s", clientPieceCID.String())
< }
<
< // Error finding a piece containing the target block
< return piecestore.PieceInfoUndefined, false, xerrors.Errorf("could not locate piece: %w", lastErr)
< }
<
< func (p *Provider) pieceInUnsealedSector(ctx context.Context, pieceInfo piecestore.PieceInfo) bool {
< for _, di := range pieceInfo.Deals {
< isUnsealed, err := p.sa.IsUnsealed(ctx, di.SectorID, di.Offset.Unpadded(), di.Length.Unpadded())
< if err != nil {
< log.Errorf("failed to find out if sector %d is unsealed, err=%s", di.SectorID, err)
< continue
< }
< if isUnsealed {
< return true
< }
< }
<
< return false
< }
<
< func (p *Provider) storageDealsForPiece(clientSpecificPiece bool, payloadCID cid.Cid, pieceInfo piecestore.PieceInfo) ([]abi.DealID, error) {
< var storageDeals []abi.DealID
< var err error
< if clientSpecificPiece {
< // If the user wants to retrieve the payload from a specific piece,
< // we only need to inspect storage deals made for that piece to quote a price.
< for _, d := range pieceInfo.Deals {
< storageDeals = append(storageDeals, d.DealID)
< }
< } else {
< // If the user does NOT want to retrieve from a specific piece, we'll have to inspect all storage deals
< // made for that piece to quote a price.
< storageDeals, err = p.getAllDealsContainingPayload(payloadCID)
< if err != nil {
< return nil, xerrors.Errorf("failed to fetch deals for payload: %w", err)
< }
< }
<
< if len(storageDeals) == 0 {
< return nil, xerrors.New("no storage deals found")
< }
<
< return storageDeals, nil
< }
<
< func (p *Provider) getAllDealsContainingPayload(payloadCID cid.Cid) ([]abi.DealID, error) {
< // Get all pieces that contain the target block
< piecesWithTargetBlock, err := p.dagStore.GetPiecesContainingBlock(payloadCID)
< if err != nil {
< return nil, xerrors.Errorf("getting pieces for cid %s: %w", payloadCID, err)
< }
<
< // For each piece that contains the target block
< var lastErr error
< var dealsIds []abi.DealID
< for _, pieceWithTargetBlock := range piecesWithTargetBlock {
< // Get the deals for the piece
< pieceInfo, err := p.pieceStore.GetPieceInfo(pieceWithTargetBlock)
< if err != nil {
< lastErr = err
< continue
< }
<
< for _, d := range pieceInfo.Deals {
< dealsIds = append(dealsIds, d.DealID)
< }
< }
<
< if lastErr == nil && len(dealsIds) == 0 {
< return nil, xerrors.New("no deals found")
< }
<
< if lastErr != nil && len(dealsIds) == 0 {
< return nil, xerrors.Errorf("failed to fetch deals containing payload %s: %w", payloadCID, lastErr)
< }
<
< return dealsIds, nil
Only in b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl: provider_pieces.go
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation/requestvalidation.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation/requestvalidation.go
8a9
> "github.com/hannahhoward/go-pubsub"
49c50,51
< env ValidationEnvironment
---
> env ValidationEnvironment
> psub *pubsub.PubSub
54c56,59
< return &ProviderRequestValidator{env}
---
> return &ProviderRequestValidator{
> env: env,
> psub: pubsub.New(queryValidationDispatcher),
> }
75a81,89
> rv.psub.Publish(retrievalmarket.ProviderValidationEvent{
> IsRestart: isRestart,
> Receiver: receiver,
> Proposal: proposal,
> BaseCid: baseCid,
> Selector: selector,
> Response: response,
> Error: err,
> })
196a211,227
> }
>
> func (rv *ProviderRequestValidator) Subscribe(subscriber retrievalmarket.ProviderValidationSubscriber) retrievalmarket.Unsubscribe {
> return retrievalmarket.Unsubscribe(rv.psub.Subscribe(subscriber))
> }
>
> func queryValidationDispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error {
> e, ok := evt.(retrievalmarket.ProviderValidationEvent)
> if !ok {
> return errors.New("wrong type of event")
> }
> cb, ok := subscriberFn.(retrievalmarket.ProviderValidationSubscriber)
> if !ok {
> return errors.New("wrong type of callback")
> }
> cb(e)
> return nil
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations/maptypes/maptypes_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations/maptypes/maptypes_cbor_gen.go
14c14
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> peer "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations/migrations_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations/migrations_cbor_gen.go
15c15
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> peer "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/provider.go
5a6,7
> "github.com/filecoin-project/go-state-types/abi"
>
11a14,19
> // ProviderQueryEventSubscriber is a callback that is registered to listen for query message events
> type ProviderQueryEventSubscriber func(evt ProviderQueryEvent)
>
> // ProviderValidationSubscriber is a callback that is registered to listen for validation events
> type ProviderValidationSubscriber func(evt ProviderValidationEvent)
>
29a38,41
> // GetDynamicAsk quotes a dynamic price for the retrieval deal by calling the user configured
> // dynamic pricing function. It passes the static price parameters set in the Ask Store to the pricing function.
> GetDynamicAsk(ctx context.Context, input PricingInput, storageDeals []abi.DealID) (Ask, error)
>
31a44,51
>
> // SubscribeToQueryEvents subscribes to an event that is fired when a message
> // is received on the query protocol
> SubscribeToQueryEvents(subscriber ProviderQueryEventSubscriber) Unsubscribe
>
> // SubscribeToValidationEvents subscribes to an event that is fired when the
> // provider validates a request for data
> SubscribeToValidationEvents(subscriber ProviderValidationSubscriber) Unsubscribe
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go
15c15
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> peer "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go
74a75,89
> type ProviderQueryEvent struct {
> Response QueryResponse
> Error error
> }
>
> type ProviderValidationEvent struct {
> IsRestart bool
> Receiver peer.ID
> Proposal *DealProposal
> BaseCid cid.Cid
> Selector ipld.Node
> Response *DealResponse
> Error error
> }
>
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/generators.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/generators.go
18d17
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
19a19
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/mockindexprovider.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/mockindexprovider.go
7a8
> "github.com/libp2p/go-libp2p-core/peer"
35c36
< func (m *MockIndexProvider) NotifyPut(ctx context.Context, contextID []byte, metadata metadata.Metadata) (cid.Cid, error) {
---
> func (m *MockIndexProvider) NotifyPut(ctx context.Context, addr *peer.AddrInfo, contextID []byte, metadata metadata.Metadata) (cid.Cid, error) {
44c45
< func (m *MockIndexProvider) NotifyRemove(ctx context.Context, contextID []byte) (cid.Cid, error) {
---
> func (m *MockIndexProvider) NotifyRemove(ctx context.Context, p peer.ID, contextID []byte) (cid.Cid, error) {
Only in b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil: test_identitycid.go
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client.go
22c22
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils.go
18c18
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider_environments.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider_environments.go
17d16
< "github.com/filecoin-project/index-provider/metadata"
43,47c42
< mt := metadata.New(&metadata.GraphsyncFilecoinV1{
< PieceCID: deal.Proposal.PieceCID,
< FastRetrieval: deal.FastRetrieval,
< VerifiedDeal: deal.Proposal.VerifiedDeal,
< })
---
> mt := p.p.metadataForDeal(deal)
55c50
< return p.p.indexProvider.NotifyPut(ctx, deal.ProposalCid.Bytes(), mt)
---
> return p.p.indexProvider.NotifyPut(ctx, nil, deal.ProposalCid.Bytes(), mt)
59c54
< _, err := p.p.indexProvider.NotifyRemove(ctx, proposalCid.Bytes())
---
> _, err := p.p.indexProvider.NotifyRemove(ctx, peer.ID(""), proposalCid.Bytes())
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider.go
4a5
> "errors"
14a16
> "github.com/libp2p/go-libp2p-core/peer"
61a64,73
> type MetadataFunc func(storagemarket.MinerDeal) metadata.Metadata
>
> func defaultMetadataFunc(deal storagemarket.MinerDeal) metadata.Metadata {
> return metadata.Default.New(&metadata.GraphsyncFilecoinV1{
> PieceCID: deal.Proposal.PieceCID,
> FastRetrieval: deal.FastRetrieval,
> VerifiedDeal: deal.Proposal.VerifiedDeal,
> })
> }
>
83,85c95,98
< dagStore stores.DAGStoreWrapper
< indexProvider provider.Interface
< stores *stores.ReadWriteBlockstores
---
> dagStore stores.DAGStoreWrapper
> indexProvider provider.Interface
> metadataForDeal MetadataFunc
> stores *stores.ReadWriteBlockstores
115a129,134
> func CustomMetadataGenerator(metadataFunc MetadataFunc) StorageProviderOption {
> return func(p *Provider) {
> p.metadataForDeal = metadataFunc
> }
> }
>
145a165
> metadataForDeal: defaultMetadataFunc,
544,549d563
< mt := metadata.New(&metadata.GraphsyncFilecoinV1{
< PieceCID: deal.Proposal.PieceCID,
< FastRetrieval: deal.FastRetrieval,
< VerifiedDeal: deal.Proposal.VerifiedDeal,
< })
<
554c568
< annCid, err := p.indexProvider.NotifyPut(ctx, deal.ProposalCid.Bytes(), mt)
---
> annCid, err := p.indexProvider.NotifyPut(ctx, nil, deal.ProposalCid.Bytes(), p.metadataForDeal(deal))
594,595c608,612
< merr = multierror.Append(merr, err)
< log.Errorw("failed to announce deal to Index provider", "proposalCid", d.ProposalCid, "err", err)
---
> // don't log already advertised errors as errors - just skip them
> if !errors.Is(err, provider.ErrAlreadyAdvertised) {
> merr = multierror.Append(merr, err)
> log.Errorw("failed to announce deal to Index provider", "proposalCid", d.ProposalCid, "err", err)
> }
790c807
< p.indexProvider.RegisterMultihashLister(func(ctx context.Context, contextID []byte) (provider.MultihashIterator, error) {
---
> p.indexProvider.RegisterMultihashLister(func(ctx context.Context, pid peer.ID, contextID []byte) (provider.MultihashIterator, error) {
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_fsm.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_fsm.go
280d279
< storagemarket.StorageDealStaged,
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states.go
18d17
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
19a19
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils.go
12c12
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/migrations/migrations_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/migrations/migrations_cbor_gen.go
13c13
< market "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> market "github.com/filecoin-project/go-state-types/builtin/v9/market"
16c16
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> peer "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/migrations/migrations.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/migrations/migrations.go
16c16
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/migrations/migrations_mapenc_types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/migrations/migrations_mapenc_types_cbor_gen.go
17c17
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> peer "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types_cbor_gen.go
12c12
< market "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> market "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types.go
7c7
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go
10d9
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
11a11
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go
14c14
< market "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> market "github.com/filecoin-project/go-state-types/builtin/v9/market"
17c17
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> peer "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go
16c16
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/builtin/v9/market"
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/stores/filestore.go b/vendor/github.com/filecoin-project/go-fil-markets/stores/filestore.go
45a46
> carv2.StoreIdentityCIDs(true),
diff -r --color a/vendor/github.com/filecoin-project/go-fil-markets/stores/rw_bstores.go b/vendor/github.com/filecoin-project/go-fil-markets/stores/rw_bstores.go
6a7
> carv2 "github.com/ipld/go-car/v2"
41c42
< bs, err := blockstore.OpenReadWrite(path, []cid.Cid{rootCid}, blockstore.UseWholeCIDs(true))
---
> bs, err := blockstore.OpenReadWrite(path, []cid.Cid{rootCid}, blockstore.UseWholeCIDs(true), carv2.StoreIdentityCIDs(true))
diff -r --color a/vendor/github.com/filecoin-project/go-jsonrpc/errors.go b/vendor/github.com/filecoin-project/go-jsonrpc/errors.go
35c35
< -1111111: reflect.TypeOf(RPCConnectionError{}),
---
> -1111111: reflect.TypeOf(&RPCConnectionError{}),
Only in b/vendor/github.com/filecoin-project/go-jsonrpc: README.md
Only in a/vendor/github.com/filecoin-project/go-legs: httpsync
Only in a/vendor/github.com/filecoin-project/go-legs: interface.go
Only in a/vendor/github.com/filecoin-project/go-legs: option.go
Only in a/vendor/github.com/filecoin-project/go-legs: README.md
Only in a/vendor/github.com/filecoin-project/go-legs: selector.go
Only in a/vendor/github.com/filecoin-project/go-legs: subscriber.go
Only in a/vendor/github.com/filecoin-project/go-legs: version.json
Only in b/vendor/github.com/filecoin-project/go-state-types/abi: address.go
Only in b/vendor/github.com/filecoin-project/go-state-types/abi: cbor_string.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/abi/key.go b/vendor/github.com/filecoin-project/go-state-types/abi/key.go
23a24,29
> type IdAddrKey address.Address
>
> func (k IdAddrKey) Key() string {
> return string(address.Address(k).Payload())
> }
>
26c32
< First address.Address
---
> First address.Address
36,37c42,43
< func NewAddrPairKey(first address.Address, second address.Address) *AddrPairKey{
< return &AddrPairKey{
---
> func NewAddrPairKey(first address.Address, second address.Address) *AddrPairKey {
> return &AddrPairKey{
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin: actor_tree.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin: cbor_gen.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin: message_accumulator.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin: method_meta.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/methods.go
8,9c8,10
< MethodSend = abi.MethodNum(0)
< MethodConstructor = abi.MethodNum(1)
---
> MethodSend = abi.MethodNum(0)
> MethodConstructor = abi.MethodNum(1)
> UniversalReceiverHookMethodNum = abi.MethodNum(3726118371)
13,16c14,18
< Constructor abi.MethodNum
< PubkeyAddress abi.MethodNum
< AuthenticateMessage abi.MethodNum
< }{MethodConstructor, 2, 3}
---
> Constructor abi.MethodNum
> PubkeyAddress abi.MethodNum
> AuthenticateMessage abi.MethodNum
> UniversalReceiverHook abi.MethodNum
> }{MethodConstructor, 2, 3, UniversalReceiverHookMethodNum}
45c47,48
< }{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9}
---
> UniversalReceiverHook abi.MethodNum
> }{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, UniversalReceiverHookMethodNum}
110c113,114
< }{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
---
> ExtendSectorExpiration2 abi.MethodNum
> }{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
117,118c121,122
< UseBytes abi.MethodNum
< RestoreBytes abi.MethodNum
---
> Deprecated1 abi.MethodNum
> Deprecated2 abi.MethodNum
120c124,148
< }{MethodConstructor, 2, 3, 4, 5, 6, 7}
---
> RemoveExpiredAllocations abi.MethodNum
> ClaimAllocations abi.MethodNum
> GetClaims abi.MethodNum
> ExtendClaimTerms abi.MethodNum
> RemoveExpiredClaims abi.MethodNum
> UniversalReceiverHook abi.MethodNum
> }{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, UniversalReceiverHookMethodNum}
>
> var MethodsDatacap = struct {
> Constructor abi.MethodNum
> Mint abi.MethodNum
> Destroy abi.MethodNum
> Name abi.MethodNum
> Symbol abi.MethodNum
> TotalSupply abi.MethodNum
> BalanceOf abi.MethodNum
> Transfer abi.MethodNum
> TransferFrom abi.MethodNum
> IncreaseAllowance abi.MethodNum
> DecreaseAllowance abi.MethodNum
> RevokeAllowance abi.MethodNum
> Burn abi.MethodNum
> BurnFrom abi.MethodNum
> Allowance abi.MethodNum
> }{MethodConstructor, 2, 3, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/quantize.go b/vendor/github.com/filecoin-project/go-state-types/builtin/quantize.go
13a14,40
>
> var NoQuantization = NewQuantSpec(1, 0)
>
> func (q QuantSpec) QuantizeUp(e abi.ChainEpoch) abi.ChainEpoch {
> return QuantizeUp(e, q.unit, q.offset)
> }
>
> // Rounds e to the nearest exact multiple of the quantization unit offset by
> // offsetSeed % unit, rounding up.
> // This function is equivalent to `unit * ceil(e - (offsetSeed % unit) / unit) + (offsetSeed % unit)`
> // with the variables/operations are over real numbers instead of ints.
> // Precondition: unit >= 0 else behaviour is undefined
> func QuantizeUp(e abi.ChainEpoch, unit abi.ChainEpoch, offsetSeed abi.ChainEpoch) abi.ChainEpoch {
> offset := offsetSeed % unit
>
> remainder := (e - offset) % unit
> quotient := (e - offset) / unit
> // Don't round if epoch falls on a quantization epoch
> if remainder == 0 {
> return unit*quotient + offset
> }
> // Negative truncating division rounds up
> if e-offset < 0 {
> return unit*quotient + offset
> }
> return unit*(quotient+1) + offset
> }
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/shared.go b/vendor/github.com/filecoin-project/go-state-types/builtin/shared.go
12a13,14
> const DefaultTokenActorBitwidth = 3
>
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/singletons.go b/vendor/github.com/filecoin-project/go-state-types/builtin/singletons.go
16a17
> DatacapActorAddr = mustMakeAddress(7)
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/account: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/account/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/account/methods.go
4a5
>
5a7
> "github.com/filecoin-project/go-state-types/builtin"
8,10c10,12
< var Methods = []interface{}{
< 1: *new(func(interface{}, *address.Address) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *abi.EmptyValue) *address.Address), // PubkeyAddress
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*address.Address) *abi.EmptyValue)}, // Constructor
> 2: {"PubkeyAddress", *new(func(*abi.EmptyValue) *address.Address)}, // PubkeyAddress
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8: check.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/cron: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/cron/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/cron/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,9c8,10
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // EpochTick
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"EpochTick", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // EpochTick
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/init: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/init/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/init/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,9c8,10
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *ExecParams) *ExecReturn), // Exec
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"Exec", *new(func(*ExecParams) *ExecReturn)}, // Exec
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/methods.go
4a5
>
5a7
> "github.com/filecoin-project/go-state-types/builtin"
8,17c10,19
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *address.Address) *abi.EmptyValue), // AddBalance
< 3: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance
< 4: *new(func(interface{}, *PublishStorageDealsParams) *PublishStorageDealsReturn), // PublishStorageDeals
< 5: *new(func(interface{}, *VerifyDealsForActivationParams) *VerifyDealsForActivationReturn), // VerifyDealsForActivation
< 6: *new(func(interface{}, *ActivateDealsParams) *abi.EmptyValue), // ActivateDeals
< 7: *new(func(interface{}, *OnMinerSectorsTerminateParams) *abi.EmptyValue), // OnMinerSectorsTerminate
< 8: *new(func(interface{}, *ComputeDataCommitmentParams) *ComputeDataCommitmentReturn), // ComputeDataCommitment
< 9: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Constructor
> 2: {"AddBalance", *new(func(*address.Address) *abi.EmptyValue)}, // AddBalance
> 3: {"WithdrawBalance", *new(func(*WithdrawBalanceParams) *abi.TokenAmount)}, // WithdrawBalance
> 4: {"PublishStorageDeals", *new(func(*PublishStorageDealsParams) *PublishStorageDealsReturn)}, // PublishStorageDeals
> 5: {"VerifyDealsForActivation", *new(func(*VerifyDealsForActivationParams) *VerifyDealsForActivationReturn)}, // VerifyDealsForActivation
> 6: {"ActivateDeals", *new(func(*ActivateDealsParams) *abi.EmptyValue)}, // ActivateDeals
> 7: {"OnMinerSectorsTerminate", *new(func(*OnMinerSectorsTerminateParams) *abi.EmptyValue)}, // OnMinerSectorsTerminate
> 8: {"ComputeDataCommitment", *new(func(*ComputeDataCommitmentParams) *ComputeDataCommitmentReturn)}, // ComputeDataCommitment
> 9: {"CronTick", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // CronTick
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/policy.go
8a9,11
> // The number of epochs between payment and other state processing for deals.
> const DealUpdatesInterval = builtin.EpochsInDay // PARAM_SPEC
>
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/set_multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/set_multimap.go
3a4
> "github.com/filecoin-project/go-state-types/abi"
4a6,7
> cbg "github.com/whyrusleeping/cbor-gen"
> "golang.org/x/xerrors"
14a18,27
> // Interprets a store as a HAMT-based map of HAMT-based sets with root `r`.
> // Both inner and outer HAMTs are interpreted with branching factor 2^bitwidth.
> func AsSetMultimap(s adt.Store, r cid.Cid, outerBitwidth, innerBitwidth int) (*SetMultimap, error) {
> m, err := adt.AsMap(s, r, outerBitwidth)
> if err != nil {
> return nil, err
> }
> return &SetMultimap{mp: m, store: s, innerBitwidth: innerBitwidth}, nil
> }
>
36a50,88
> }
>
> func parseDealKey(s string) (abi.DealID, error) {
> key, err := abi.ParseUIntKey(s)
> return abi.DealID(key), err
> }
>
> func (mm *SetMultimap) get(key abi.Keyer) (*adt.Set, bool, error) {
> var setRoot cbg.CborCid
> found, err := mm.mp.Get(key, &setRoot)
> if err != nil {
> return nil, false, xerrors.Errorf("failed to load set key: %v: %w", key, err)
> }
> var set *adt.Set
> if found {
> set, err = adt.AsSet(mm.store, cid.Cid(setRoot), mm.innerBitwidth)
> if err != nil {
> return nil, false, err
> }
> }
> return set, found, nil
> }
>
> // Iterates all entries for a key, iteration halts if the function returns an error.
> func (mm *SetMultimap) ForEach(epoch abi.ChainEpoch, fn func(id abi.DealID) error) error {
> set, found, err := mm.get(abi.UIntKey(uint64(epoch)))
> if err != nil {
> return err
> }
> if found {
> return set.ForEach(func(k string) error {
> v, err := parseDealKey(k)
> if err != nil {
> return err
> }
> return fn(v)
> })
> }
> return nil
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadline_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadline_state.go
98a99,137
> // Deadline (singular)
> //
>
> func ConstructDeadline(store adt.Store) (*Deadline, error) {
> emptyPartitionsArrayCid, err := adt.StoreEmptyArray(store, DeadlinePartitionsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty partitions array: %w", err)
> }
> emptyDeadlineExpirationArrayCid, err := adt.StoreEmptyArray(store, DeadlineExpirationAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty deadline expiration array: %w", err)
> }
>
> emptySectorsSnapshotArrayCid, err := adt.StoreEmptyArray(store, SectorsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty sectors snapshot array: %w", err)
> }
>
> emptyPoStSubmissionsArrayCid, err := adt.StoreEmptyArray(store, DeadlineOptimisticPoStSubmissionsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty proofs array: %w", err)
> }
>
> return &Deadline{
> Partitions: emptyPartitionsArrayCid,
> ExpirationsEpochs: emptyDeadlineExpirationArrayCid,
> EarlyTerminations: bitfield.New(),
> LiveSectors: 0,
> TotalSectors: 0,
> FaultyPower: NewPowerPairZero(),
> PartitionsPoSted: bitfield.New(),
> OptimisticPoStSubmissions: emptyPoStSubmissionsArrayCid,
> PartitionsSnapshot: emptyPartitionsArrayCid,
> SectorsSnapshot: emptySectorsSnapshotArrayCid,
> OptimisticPoStSubmissionsSnapshot: emptyPoStSubmissionsArrayCid,
> }, nil
> }
>
> //
100a140,147
>
> func ConstructDeadlines(emptyDeadlineCid cid.Cid) *Deadlines {
> d := new(Deadlines)
> for i := range d.Due {
> d.Due[i] = emptyDeadlineCid
> }
> return d
> }
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/methods.go
5a6
>
6a8
> "github.com/filecoin-project/go-state-types/builtin"
10,37c12,39
< var Methods = []interface{}{
< 1: *new(func(interface{}, *power.MinerConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *abi.EmptyValue) *GetControlAddressesReturn), // ControlAddresses
< 3: *new(func(interface{}, *ChangeWorkerAddressParams) *abi.EmptyValue), // ChangeWorkerAddress
< 4: *new(func(interface{}, *ChangePeerIDParams) *abi.EmptyValue), // ChangePeerID
< 5: *new(func(interface{}, *SubmitWindowedPoStParams) *abi.EmptyValue), // SubmitWindowedPoSt
< 6: *new(func(interface{}, *PreCommitSectorParams) *abi.EmptyValue), // PreCommitSector
< 7: *new(func(interface{}, *ProveCommitSectorParams) *abi.EmptyValue), // ProveCommitSector
< 8: *new(func(interface{}, *ExtendSectorExpirationParams) *abi.EmptyValue), // ExtendSectorExpiration
< 9: *new(func(interface{}, *TerminateSectorsParams) *TerminateSectorsReturn), // TerminateSectors
< 10: *new(func(interface{}, *DeclareFaultsParams) *abi.EmptyValue), // DeclareFaults
< 11: *new(func(interface{}, *DeclareFaultsRecoveredParams) *abi.EmptyValue), // DeclareFaultsRecovered
< 12: *new(func(interface{}, *DeferredCronEventParams) *abi.EmptyValue), // OnDeferredCronEvent
< 13: *new(func(interface{}, *CheckSectorProvenParams) *abi.EmptyValue), // CheckSectorProven
< 14: *new(func(interface{}, *ApplyRewardParams) *abi.EmptyValue), // ApplyRewards
< 15: *new(func(interface{}, *ReportConsensusFaultParams) *abi.EmptyValue), // ReportConsensusFault
< 16: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance
< 17: *new(func(interface{}, *ConfirmSectorProofsParams) *abi.EmptyValue), // ConfirmSectorProofsValid
< 18: *new(func(interface{}, *ChangeMultiaddrsParams) *abi.EmptyValue), // ChangeMultiaddrs
< 19: *new(func(interface{}, *CompactPartitionsParams) *abi.EmptyValue), // CompactPartitions
< 20: *new(func(interface{}, *CompactSectorNumbersParams) *abi.EmptyValue), // CompactSectorNumbers
< 21: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // ConfirmUpdateWorkerKey
< 22: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // RepayDebt
< 23: *new(func(interface{}, *address.Address) *abi.EmptyValue), // ChangeOwnerAddress
< 24: *new(func(interface{}, *DisputeWindowedPoStParams) *abi.EmptyValue), // DisputeWindowedPoSt
< 25: *new(func(interface{}, *PreCommitSectorBatchParams) *abi.EmptyValue), // PreCommitSectorBatch
< 26: *new(func(interface{}, *ProveCommitAggregateParams) *abi.EmptyValue), // ProveCommitAggregate
< 27: *new(func(interface{}, *ProveReplicaUpdatesParams) *bitfield.BitField), // ProveReplicaUpdates
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*power.MinerConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"ControlAddresses", *new(func(*abi.EmptyValue) *GetControlAddressesReturn)}, // ControlAddresses
> 3: {"ChangeWorkerAddress", *new(func(*ChangeWorkerAddressParams) *abi.EmptyValue)}, // ChangeWorkerAddress
> 4: {"ChangePeerID", *new(func(*ChangePeerIDParams) *abi.EmptyValue)}, // ChangePeerID
> 5: {"SubmitWindowedPoSt", *new(func(*SubmitWindowedPoStParams) *abi.EmptyValue)}, // SubmitWindowedPoSt
> 6: {"PreCommitSector", *new(func(*PreCommitSectorParams) *abi.EmptyValue)}, // PreCommitSector
> 7: {"ProveCommitSector", *new(func(*ProveCommitSectorParams) *abi.EmptyValue)}, // ProveCommitSector
> 8: {"ExtendSectorExpiration", *new(func(*ExtendSectorExpirationParams) *abi.EmptyValue)}, // ExtendSectorExpiration
> 9: {"TerminateSectors", *new(func(*TerminateSectorsParams) *TerminateSectorsReturn)}, // TerminateSectors
> 10: {"DeclareFaults", *new(func(*DeclareFaultsParams) *abi.EmptyValue)}, // DeclareFaults
> 11: {"DeclareFaultsRecovered", *new(func(*DeclareFaultsRecoveredParams) *abi.EmptyValue)}, // DeclareFaultsRecovered
> 12: {"OnDeferredCronEvent", *new(func(*DeferredCronEventParams) *abi.EmptyValue)}, // OnDeferredCronEvent
> 13: {"CheckSectorProven", *new(func(*CheckSectorProvenParams) *abi.EmptyValue)}, // CheckSectorProven
> 14: {"ApplyRewards", *new(func(*ApplyRewardParams) *abi.EmptyValue)}, // ApplyRewards
> 15: {"ReportConsensusFault", *new(func(*ReportConsensusFaultParams) *abi.EmptyValue)}, // ReportConsensusFault
> 16: {"WithdrawBalance", *new(func(*WithdrawBalanceParams) *abi.TokenAmount)}, // WithdrawBalance
> 17: {"ConfirmSectorProofsValid", *new(func(*ConfirmSectorProofsParams) *abi.EmptyValue)}, // ConfirmSectorProofsValid
> 18: {"ChangeMultiaddrs", *new(func(*ChangeMultiaddrsParams) *abi.EmptyValue)}, // ChangeMultiaddrs
> 19: {"CompactPartitions", *new(func(*CompactPartitionsParams) *abi.EmptyValue)}, // CompactPartitions
> 20: {"CompactSectorNumbers", *new(func(*CompactSectorNumbersParams) *abi.EmptyValue)}, // CompactSectorNumbers
> 21: {"ConfirmUpdateWorkerKey", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // ConfirmUpdateWorkerKey
> 22: {"RepayDebt", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // RepayDebt
> 23: {"ChangeOwnerAddress", *new(func(*address.Address) *abi.EmptyValue)}, // ChangeOwnerAddress
> 24: {"DisputeWindowedPoSt", *new(func(*DisputeWindowedPoStParams) *abi.EmptyValue)}, // DisputeWindowedPoSt
> 25: {"PreCommitSectorBatch", *new(func(*PreCommitSectorBatchParams) *abi.EmptyValue)}, // PreCommitSectorBatch
> 26: {"ProveCommitAggregate", *new(func(*ProveCommitAggregateParams) *abi.EmptyValue)}, // ProveCommitAggregate
> 27: {"ProveReplicaUpdates", *new(func(*ProveReplicaUpdatesParams) *bitfield.BitField)}, // ProveReplicaUpdates
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_state.go
301a302,313
>
> // pre-commit clean up
> func (st *State) QuantSpecEveryDeadline() builtin.QuantSpec {
> return builtin.NewQuantSpec(WPoStChallengeWindow, st.ProvingPeriodStart)
> }
>
> // Return true when the miner actor needs to continue scheduling deadline crons
> func (st *State) ContinueDeadlineCron() bool {
> return !st.PreCommitDeposits.IsZero() ||
> !st.InitialPledge.IsZero() ||
> !st.LockedFunds.IsZero()
> }
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/partition_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/partition_state.go
6a7
> "github.com/filecoin-project/go-state-types/builtin/v8/util/adt"
91a93,100
> func (d *Deadline) PartitionsSnapshotArray(store adt.Store) (*adt.Array, error) {
> arr, err := adt.AsArray(store, d.PartitionsSnapshot, DeadlinePartitionsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to load partitions snapshot: %w", err)
> }
> return arr, nil
> }
>
115a125,137
> }
>
> func (pp *PowerPair) Equals(other PowerPair) bool {
> return pp.Raw.Equals(other.Raw) && pp.QA.Equals(other.QA)
> }
>
> func (pp PowerPair) IsZero() bool {
> return pp.Raw.IsZero() && pp.QA.IsZero()
> }
>
> // Active power is power of non-faulty sectors.
> func (p *Partition) ActivePower() PowerPair {
> return p.LivePower.Sub(p.FaultyPower).Sub(p.UnprovenPower)
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/policy.go
165a166,171
> // The quality-adjusted power for a sector.
> func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower {
> duration := sector.Expiration - sector.Activation
> return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight)
> }
>
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/multisig: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/multisig/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/multisig/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,16c8,17
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *ProposeParams) *ProposeReturn), // Propose
< 3: *new(func(interface{}, *TxnIDParams) *ApproveReturn), // Approve
< 4: *new(func(interface{}, *TxnIDParams) *abi.EmptyValue), // Cancel
< 5: *new(func(interface{}, *AddSignerParams) *abi.EmptyValue), // AddSigner
< 6: *new(func(interface{}, *RemoveSignerParams) *abi.EmptyValue), // RemoveSigner
< 7: *new(func(interface{}, *SwapSignerParams) *abi.EmptyValue), // SwapSigner
< 8: *new(func(interface{}, *ChangeNumApprovalsThresholdParams) *abi.EmptyValue), // ChangeNumApprovalsThreshold
< 9: *new(func(interface{}, *LockBalanceParams) *abi.EmptyValue), // LockBalance
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"Propose", *new(func(*ProposeParams) *ProposeReturn)}, // Propose
> 3: {"Approve", *new(func(*TxnIDParams) *ApproveReturn)}, // Approve
> 4: {"Cancel", *new(func(*TxnIDParams) *abi.EmptyValue)}, // Cancel
> 5: {"AddSigner", *new(func(*AddSignerParams) *abi.EmptyValue)}, // AddSigner
> 6: {"RemoveSigner", *new(func(*RemoveSignerParams) *abi.EmptyValue)}, // RemoveSigner
> 7: {"SwapSigner", *new(func(*SwapSignerParams) *abi.EmptyValue)}, // SwapSigner
> 8: {"ChangeNumApprovalsThreshold", *new(func(*ChangeNumApprovalsThresholdParams) *abi.EmptyValue)}, // ChangeNumApprovalsThreshold
> 9: {"LockBalance", *new(func(*LockBalanceParams) *abi.EmptyValue)}, // LockBalance
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/multisig: policy.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/paych: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/paych/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/paych/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,11c8,12
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *UpdateChannelStateParams) *abi.EmptyValue), // UpdateChannelState
< 3: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Settle
< 4: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Collect
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"UpdateChannelState", *new(func(*UpdateChannelStateParams) *abi.EmptyValue)}, // UpdateChannelState
> 3: {"Settle", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Settle
> 4: {"Collect", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Collect
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/cbor_gen.go
1225a1226,1309
>
> var lengthBufCronEvent = []byte{130}
>
> func (t *CronEvent) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufCronEvent); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.MinerAddr (address.Address) (struct)
> if err := t.MinerAddr.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.CallbackPayload ([]uint8) (slice)
> if len(t.CallbackPayload) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field t.CallbackPayload was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.CallbackPayload))); err != nil {
> return err
> }
>
> if _, err := w.Write(t.CallbackPayload[:]); err != nil {
> return err
> }
> return nil
> }
>
> func (t *CronEvent) UnmarshalCBOR(r io.Reader) error {
> *t = CronEvent{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.MinerAddr (address.Address) (struct)
>
> {
>
> if err := t.MinerAddr.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.MinerAddr: %w", err)
> }
>
> }
> // t.CallbackPayload ([]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.CallbackPayload: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.CallbackPayload = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.CallbackPayload[:]); err != nil {
> return err
> }
> return nil
> }
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
8,17c9,18
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *CreateMinerParams) *CreateMinerReturn), // CreateMiner
< 3: *new(func(interface{}, *UpdateClaimedPowerParams) *abi.EmptyValue), // UpdateClaimedPower
< 4: *new(func(interface{}, *EnrollCronEventParams) *abi.EmptyValue), // EnrollCronEvent
< 5: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick
< 6: *new(func(interface{}, *abi.TokenAmount) *abi.EmptyValue), // UpdatePledgeTotal
< 7: nil,
< 8: *new(func(interface{}, *proof.SealVerifyInfo) *abi.EmptyValue), // SubmitPoRepForBulkVerify
< 9: *new(func(interface{}, *abi.EmptyValue) *CurrentTotalPowerReturn), // CurrentTotalPower
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Constructor
> 2: {"CreateMiner", *new(func(*CreateMinerParams) *CreateMinerReturn)}, // CreateMiner
> 3: {"UpdateClaimedPower", *new(func(*UpdateClaimedPowerParams) *abi.EmptyValue)}, // UpdateClaimedPower
> 4: {"EnrollCronEvent", *new(func(*EnrollCronEventParams) *abi.EmptyValue)}, // EnrollCronEvent
> 5: {"CronTick", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // CronTick
> 6: {"UpdatePledgeTotal", *new(func(*abi.TokenAmount) *abi.EmptyValue)}, // UpdatePledgeTotal
> 7: {"OnConsensusFault", nil}, // deprecated
> 8: {"SubmitPoRepForBulkVerify", *new(func(*proof.SealVerifyInfo) *abi.EmptyValue)}, // SubmitPoRepForBulkVerify
> 9: {"CurrentTotalPower", *new(func(*abi.EmptyValue) *CurrentTotalPowerReturn)}, // CurrentTotalPower
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_state.go
35c35,41
< const ConsensusMinerMinMiners = 4 // PARAM_SPEC
---
> const ConsensusMinerMinMiners = 4
>
> // PARAM_SPEC// Maximum number of prove-commits each miner can submit in one epoch.
> //
> // This limits the number of proof partitions we may need to load in the cron call path.
> // Onboarding 1EiB/year requires at least 32 prove-commits per epoch.
> const MaxMinerProveCommitsPerEpoch = 200 // PARAM_SPEC
106a113,117
> }
>
> type CronEvent struct {
> MinerAddr addr.Address
> CallbackPayload []byte
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/reward: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/reward/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/reward/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,11c8,12
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.StoragePower) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *AwardBlockRewardParams) *abi.EmptyValue), // AwardBlockReward
< 3: *new(func(interface{}, *abi.EmptyValue) *ThisEpochRewardReturn), // ThisEpochReward
< 4: *new(func(interface{}, *abi.StoragePower) *abi.EmptyValue), // UpdateNetworkKPI
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.StoragePower) *abi.EmptyValue)}, // Constructor
> 2: {"AwardBlockReward", *new(func(*AwardBlockRewardParams) *abi.EmptyValue)}, // AwardBlockReward
> 3: {"ThisEpochReward", *new(func(*abi.EmptyValue) *ThisEpochRewardReturn)}, // ThisEpochReward
> 4: {"UpdateNetworkKPI", *new(func(*abi.StoragePower) *abi.EmptyValue)}, // UpdateNetworkKPI
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,8c8,9
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Constructor
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/balancetable.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/balancetable.go
40a41,51
>
> // Returns the total balance held by this BalanceTable
> func (t *BalanceTable) Total() (abi.TokenAmount, error) {
> total := big.Zero()
> var cur abi.TokenAmount
> err := (*Map)(t).ForEach(&cur, func(key string) error {
> total = big.Add(total, cur)
> return nil
> })
> return total, err
> }
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/multimap.go
3c3,6
< import "github.com/ipfs/go-cid"
---
> import (
> "github.com/ipfs/go-cid"
> cbg "github.com/whyrusleeping/cbor-gen"
> )
11a15,25
> // Interprets a store as a HAMT-based map of AMTs with root `r`.
> // The outer map is interpreted with a branching factor of 2^bitwidth.
> func AsMultimap(s Store, r cid.Cid, outerBitwidth, innerBitwidth int) (*Multimap, error) {
> m, err := AsMap(s, r, outerBitwidth)
> if err != nil {
> return nil, err
> }
>
> return &Multimap{m, innerBitwidth}, nil
> }
>
33a48,63
> }
>
> func (mm *Multimap) ForAll(fn func(k string, arr *Array) error) error {
> var arrRoot cbg.CborCid
> if err := mm.mp.ForEach(&arrRoot, func(k string) error {
> arr, err := AsArray(mm.mp.store, cid.Cid(arrRoot), mm.innerBitwidth)
> if err != nil {
> return err
> }
>
> return fn(k, arr)
> }); err != nil {
> return err
> }
>
> return nil
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt: set.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util: bitfield.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util: bitfield_queue.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/verifreg: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/verifreg/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/verifreg/methods.go
4a5
>
5a7
> "github.com/filecoin-project/go-state-types/builtin"
8,15c10,17
< var Methods = []interface{}{
< 1: *new(func(interface{}, *address.Address) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *AddVerifierParams) *abi.EmptyValue), // AddVerifier
< 3: *new(func(interface{}, *address.Address) *abi.EmptyValue), // RemoveVerifier
< 4: *new(func(interface{}, *AddVerifiedClientParams) *abi.EmptyValue), // AddVerifiedClient
< 5: *new(func(interface{}, *UseBytesParams) *abi.EmptyValue), // UseBytes
< 6: *new(func(interface{}, *RestoreBytesParams) *abi.EmptyValue), // RestoreBytes
< 7: *new(func(interface{}, *RemoveDataCapParams) *RemoveDataCapReturn), // RemoveVerifiedClientDataCap
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*address.Address) *abi.EmptyValue)}, // Constructor
> 2: {"AddVerifier", *new(func(*AddVerifierParams) *abi.EmptyValue)}, // AddVerifier
> 3: {"RemoveVerifier", *new(func(*address.Address) *abi.EmptyValue)}, // RemoveVerifier
> 4: {"AddVerifiedClient", *new(func(*AddVerifiedClientParams) *abi.EmptyValue)}, // AddVerifiedClient
> 5: {"UseBytes", *new(func(*UseBytesParams) *abi.EmptyValue)}, // UseBytes
> 6: {"RestoreBytes", *new(func(*RestoreBytesParams) *abi.EmptyValue)}, // RestoreBytes
> 7: {"RemoveVerifiedClientDataCap", *new(func(*RemoveDataCapParams) *RemoveDataCapReturn)}, // RemoveVerifiedClientDataCap
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/account/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/account/cbor_gen.go
61a62,165
>
> var lengthBufAuthenticateMessageParams = []byte{130}
>
> func (t *AuthenticateMessageParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufAuthenticateMessageParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Signature ([]uint8) (slice)
> if len(t.Signature) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field t.Signature was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Signature))); err != nil {
> return err
> }
>
> if _, err := w.Write(t.Signature[:]); err != nil {
> return err
> }
>
> // t.Message ([]uint8) (slice)
> if len(t.Message) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field t.Message was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Message))); err != nil {
> return err
> }
>
> if _, err := w.Write(t.Message[:]); err != nil {
> return err
> }
> return nil
> }
>
> func (t *AuthenticateMessageParams) UnmarshalCBOR(r io.Reader) error {
> *t = AuthenticateMessageParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Signature ([]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.Signature: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.Signature = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.Signature[:]); err != nil {
> return err
> }
> // t.Message ([]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.Message: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.Message = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.Message[:]); err != nil {
> return err
> }
> return nil
> }
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/account: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/account/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/account/methods.go
4a5
>
5a7
> "github.com/filecoin-project/go-state-types/builtin"
8,11c10,14
< var Methods = []interface{}{
< 1: *new(func(interface{}, *address.Address) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *abi.EmptyValue) *address.Address), // PubkeyAddress
< 3: *new(func(interface{}, *AuthenticateMessageParams) *abi.EmptyValue), // AuthenticateMessage
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*address.Address) *abi.EmptyValue)}, // Constructor
> 2: {"PubkeyAddress", *new(func(*abi.EmptyValue) *address.Address)}, // PubkeyAddress
> 3: {"AuthenticateMessage", *new(func(*AuthenticateMessageParams) *abi.EmptyValue)}, // AuthenticateMessage
> uint64(builtin.UniversalReceiverHookMethodNum): {"UniversalReceiverHook", *new(func(*[]byte) *abi.EmptyValue)}, // UniversalReceiverHook
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9: check.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/cron: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/cron/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/cron/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,9c8,10
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // EpochTick
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"EpochTick", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // EpochTick
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9: datacap
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/init: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/init/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/init/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,9c8,10
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *ExecParams) *ExecReturn), // Exec
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"Exec", *new(func(*ExecParams) *ExecReturn)}, // Exec
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/cbor_gen.go
9a10
> verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
16c17
< var lengthBufState = []byte{139}
---
> var lengthBufState = []byte{140}
95a97,103
>
> // t.PendingDealAllocationIds (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.PendingDealAllocationIds); err != nil {
> return xerrors.Errorf("failed to write cid field t.PendingDealAllocationIds: %w", err)
> }
>
113c121
< if extra != 11 {
---
> if extra != 12 {
254a263,274
> // t.PendingDealAllocationIds (cid.Cid) (struct)
>
> {
>
> c, err := cbg.ReadCid(br)
> if err != nil {
> return xerrors.Errorf("failed to read cid field t.PendingDealAllocationIds: %w", err)
> }
>
> t.PendingDealAllocationIds = c
>
> }
258c278
< var lengthBufDealState = []byte{131}
---
> var lengthBufDealState = []byte{132}
302a323,329
>
> // t.VerifiedClaim (verifreg.AllocationId) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.VerifiedClaim)); err != nil {
> return err
> }
>
320c347
< if extra != 3 {
---
> if extra != 4 {
398a426,439
> // t.VerifiedClaim (verifreg.AllocationId) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.VerifiedClaim = verifreg.AllocationId(extra)
>
> }
759c800
< var lengthBufActivateDealsResult = []byte{129}
---
> var lengthBufActivateDealsResult = []byte{130}
770,771c811,823
< // t.Weights (market.DealWeights) (struct)
< if err := t.Weights.MarshalCBOR(w); err != nil {
---
> scratch := make([]byte, 9)
>
> // t.NonVerifiedDealSpace (big.Int) (struct)
> if err := t.NonVerifiedDealSpace.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.VerifiedInfos ([]market.VerifiedDealInfo) (slice)
> if len(t.VerifiedInfos) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.VerifiedInfos was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.VerifiedInfos))); err != nil {
773a826,830
> for _, v := range t.VerifiedInfos {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
791c848
< if extra != 1 {
---
> if extra != 2 {
795c852
< // t.Weights (market.DealWeights) (struct)
---
> // t.NonVerifiedDealSpace (big.Int) (struct)
799,800c856,857
< if err := t.Weights.UnmarshalCBOR(br); err != nil {
< return xerrors.Errorf("unmarshaling t.Weights: %w", err)
---
> if err := t.NonVerifiedDealSpace.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.NonVerifiedDealSpace: %w", err)
803a861,889
> // t.VerifiedInfos ([]market.VerifiedDealInfo) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.VerifiedInfos: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.VerifiedInfos = make([]VerifiedDealInfo, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v VerifiedDealInfo
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.VerifiedInfos[i] = v
> }
>
1782c1868
< var lengthBufDealWeights = []byte{131}
---
> var lengthBufDealSpaces = []byte{130}
1784c1870
< func (t *DealWeights) MarshalCBOR(w io.Writer) error {
---
> func (t *DealSpaces) MarshalCBOR(w io.Writer) error {
1789c1875
< if _, err := w.Write(lengthBufDealWeights); err != nil {
---
> if _, err := w.Write(lengthBufDealSpaces); err != nil {
1793,1797c1879,1880
< scratch := make([]byte, 9)
<
< // t.DealSpace (uint64) (uint64)
<
< if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealSpace)); err != nil {
---
> // t.DealSpace (big.Int) (struct)
> if err := t.DealSpace.MarshalCBOR(w); err != nil {
1801,1807c1884,1885
< // t.DealWeight (big.Int) (struct)
< if err := t.DealWeight.MarshalCBOR(w); err != nil {
< return err
< }
<
< // t.VerifiedDealWeight (big.Int) (struct)
< if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil {
---
> // t.VerifiedDealSpace (big.Int) (struct)
> if err := t.VerifiedDealSpace.MarshalCBOR(w); err != nil {
1813,1814c1891,1892
< func (t *DealWeights) UnmarshalCBOR(r io.Reader) error {
< *t = DealWeights{}
---
> func (t *DealSpaces) UnmarshalCBOR(r io.Reader) error {
> *t = DealSpaces{}
1827c1905
< if extra != 3 {
---
> if extra != 2 {
1831,1845c1909
< // t.DealSpace (uint64) (uint64)
<
< {
<
< maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
< if err != nil {
< return err
< }
< if maj != cbg.MajUnsignedInt {
< return fmt.Errorf("wrong type for uint64 field")
< }
< t.DealSpace = uint64(extra)
<
< }
< // t.DealWeight (big.Int) (struct)
---
> // t.DealSpace (big.Int) (struct)
1849,1850c1913,1914
< if err := t.DealWeight.UnmarshalCBOR(br); err != nil {
< return xerrors.Errorf("unmarshaling t.DealWeight: %w", err)
---
> if err := t.DealSpace.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.DealSpace: %w", err)
1854c1918
< // t.VerifiedDealWeight (big.Int) (struct)
---
> // t.VerifiedDealSpace (big.Int) (struct)
1858,1859c1922,1923
< if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil {
< return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err)
---
> if err := t.VerifiedDealSpace.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.VerifiedDealSpace: %w", err)
1980a2045,2159
> }
> return nil
> }
>
> var lengthBufVerifiedDealInfo = []byte{132}
>
> func (t *VerifiedDealInfo) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufVerifiedDealInfo); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Client (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Client)); err != nil {
> return err
> }
>
> // t.AllocationId (verifreg.AllocationId) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.AllocationId)); err != nil {
> return err
> }
>
> // t.Data (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.Data); err != nil {
> return xerrors.Errorf("failed to write cid field t.Data: %w", err)
> }
>
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
> return err
> }
>
> return nil
> }
>
> func (t *VerifiedDealInfo) UnmarshalCBOR(r io.Reader) error {
> *t = VerifiedDealInfo{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 4 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Client (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Client = abi.ActorID(extra)
>
> }
> // t.AllocationId (verifreg.AllocationId) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.AllocationId = verifreg.AllocationId(extra)
>
> }
> // t.Data (cid.Cid) (struct)
>
> {
>
> c, err := cbg.ReadCid(br)
> if err != nil {
> return xerrors.Errorf("failed to read cid field t.Data: %w", err)
> }
>
> t.Data = c
>
> }
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Size = abi.PaddedPieceSize(extra)
>
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/deal.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/deal.go
9a10,11
> "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
>
31a34
> VerifiedClaim verifreg.AllocationId
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/market_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/market_state.go
7a8
> "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
8a10
> cbg "github.com/whyrusleeping/cbor-gen"
52a55,57
>
> // Verified registry allocation IDs for deals that are not yet activated.
> PendingDealAllocationIds cid.Cid // HAMT[DealID]AllocationID
76a82,85
> emptyPendingDealAllocationMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to create empty map: %w", err)
> }
79,86c88,96
< Proposals: emptyProposalsArrayCid,
< States: emptyStatesArrayCid,
< PendingProposals: emptyPendingProposalsMapCid,
< EscrowTable: emptyBalanceTableCid,
< LockedTable: emptyBalanceTableCid,
< NextID: abi.DealID(0),
< DealOpsByEpoch: emptyDealOpsHamtCid,
< LastCron: abi.ChainEpoch(-1),
---
> Proposals: emptyProposalsArrayCid,
> States: emptyStatesArrayCid,
> PendingProposals: emptyPendingProposalsMapCid,
> EscrowTable: emptyBalanceTableCid,
> LockedTable: emptyBalanceTableCid,
> NextID: abi.DealID(0),
> DealOpsByEpoch: emptyDealOpsHamtCid,
> LastCron: abi.ChainEpoch(-1),
> PendingDealAllocationIds: emptyPendingDealAllocationMapCid,
193a204,226
> }
>
> func (st *State) GetPendingDealAllocationIds(store adt.Store) (map[abi.DealID]verifreg.AllocationId, error) {
> adtMap, err := adt.AsMap(store, st.PendingDealAllocationIds, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("couldn't get map: %x", err)
> }
>
> var dealIdToAllocId = make(map[abi.DealID]verifreg.AllocationId)
> var out cbg.CborInt
> err = adtMap.ForEach(&out, func(key string) error {
> uintKey, err := abi.ParseUIntKey(key)
> if err != nil {
> return xerrors.Errorf("couldn't parse key to uint: %w", err)
> }
> dealIdToAllocId[abi.DealID(uintKey)] = verifreg.AllocationId(out)
> return nil
> })
> if err != nil {
> return nil, err
> }
>
> return dealIdToAllocId, nil
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/market_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/market_types.go
6a7,8
> "github.com/filecoin-project/go-state-types/big"
> "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
52c54,62
< Weights DealWeights
---
> NonVerifiedDealSpace big.Int
> VerifiedInfos []VerifiedDealInfo
> }
>
> type VerifiedDealInfo struct {
> Client abi.ActorID
> AllocationId verifreg.AllocationId
> Data cid.Cid
> Size abi.PaddedPieceSize
60,63c70,72
< type DealWeights struct {
< DealSpace uint64 // Total space in bytes of submitted deals.
< DealWeight abi.DealWeight // Total space*time of submitted deals.
< VerifiedDealWeight abi.DealWeight // Total space*time of submitted verified deals.
---
> type DealSpaces struct {
> DealSpace abi.DealWeight // Total space of submitted deals.
> VerifiedDealSpace abi.DealWeight // Total space of submitted verified deals.
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/methods.go
4a5
>
5a7
> "github.com/filecoin-project/go-state-types/builtin"
8,17c10,19
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *address.Address) *abi.EmptyValue), // AddBalance
< 3: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance
< 4: *new(func(interface{}, *PublishStorageDealsParams) *PublishStorageDealsReturn), // PublishStorageDeals
< 5: *new(func(interface{}, *VerifyDealsForActivationParams) *VerifyDealsForActivationReturn), // VerifyDealsForActivation
< 6: *new(func(interface{}, *ActivateDealsParams) *abi.EmptyValue), // ActivateDeals
< 7: *new(func(interface{}, *OnMinerSectorsTerminateParams) *abi.EmptyValue), // OnMinerSectorsTerminate
< 8: *new(func(interface{}, *ComputeDataCommitmentParams) *ComputeDataCommitmentReturn), // ComputeDataCommitment
< 9: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Constructor
> 2: {"AddBalance", *new(func(*address.Address) *abi.EmptyValue)}, // AddBalance
> 3: {"WithdrawBalance", *new(func(*WithdrawBalanceParams) *abi.TokenAmount)}, // WithdrawBalance
> 4: {"PublishStorageDeals", *new(func(*PublishStorageDealsParams) *PublishStorageDealsReturn)}, // PublishStorageDeals
> 5: {"VerifyDealsForActivation", *new(func(*VerifyDealsForActivationParams) *VerifyDealsForActivationReturn)}, // VerifyDealsForActivation
> 6: {"ActivateDeals", *new(func(*ActivateDealsParams) *abi.EmptyValue)}, // ActivateDeals
> 7: {"OnMinerSectorsTerminate", *new(func(*OnMinerSectorsTerminateParams) *abi.EmptyValue)}, // OnMinerSectorsTerminate
> 8: {"ComputeDataCommitment", *new(func(*ComputeDataCommitmentParams) *ComputeDataCommitmentReturn)}, // ComputeDataCommitment
> 9: {"CronTick", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // CronTick
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/policy.go
21a22,23
> var MarketDefaultAllocationTermBuffer = abi.ChainEpoch(90 * builtin.EpochsInDay)
>
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/set_multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/market/set_multimap.go
3a4
> "github.com/filecoin-project/go-state-types/abi"
4a6,7
> cbg "github.com/whyrusleeping/cbor-gen"
> "golang.org/x/xerrors"
14a18,27
> // Interprets a store as a HAMT-based map of HAMT-based sets with root `r`.
> // Both inner and outer HAMTs are interpreted with branching factor 2^bitwidth.
> func AsSetMultimap(s adt.Store, r cid.Cid, outerBitwidth, innerBitwidth int) (*SetMultimap, error) {
> m, err := adt.AsMap(s, r, outerBitwidth)
> if err != nil {
> return nil, err
> }
> return &SetMultimap{mp: m, store: s, innerBitwidth: innerBitwidth}, nil
> }
>
36a50,88
> }
>
> func parseDealKey(s string) (abi.DealID, error) {
> key, err := abi.ParseUIntKey(s)
> return abi.DealID(key), err
> }
>
> func (mm *SetMultimap) get(key abi.Keyer) (*adt.Set, bool, error) {
> var setRoot cbg.CborCid
> found, err := mm.mp.Get(key, &setRoot)
> if err != nil {
> return nil, false, xerrors.Errorf("failed to load set key: %v: %w", key, err)
> }
> var set *adt.Set
> if found {
> set, err = adt.AsSet(mm.store, cid.Cid(setRoot), mm.innerBitwidth)
> if err != nil {
> return nil, false, err
> }
> }
> return set, found, nil
> }
>
> // Iterates all entries for a key, iteration halts if the function returns an error.
> func (mm *SetMultimap) ForEach(epoch abi.ChainEpoch, fn func(id abi.DealID) error) error {
> set, found, err := mm.get(abi.UIntKey(uint64(epoch)))
> if err != nil {
> return err
> }
> if found {
> return set.ForEach(func(k string) error {
> v, err := parseDealKey(k)
> if err != nil {
> return err
> }
> return fn(v)
> })
> }
> return nil
Only in a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration: cbor_gen.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration: datacap.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration: market.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/miner.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/miner.go
6c6,8
< "github.com/filecoin-project/go-state-types/builtin/v8/market"
---
> "github.com/filecoin-project/go-state-types/exitcode"
>
> adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt"
7a10,11
> "github.com/filecoin-project/go-address"
> "github.com/filecoin-project/go-amt-ipld/v4"
11a16
> "github.com/filecoin-project/go-state-types/builtin/v8/market"
13c18
< "github.com/filecoin-project/go-state-types/builtin/v8/util/adt"
---
> adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt"
21a27,32
> // The minerMigrator performs the following migrations:
> // FIP-0029: Sets the Beneficary to the Owner, and sets empty values for BeneficiaryTerm and PendingBeneficiaryTerm
> // FIP-0034: For each SectorPreCommitOnChainInfo in PreCommitedSectors, calculates the unsealed CID (assuming there are deals)
> // FIP-0045: For each SectorOnChainInfo in Sectors, set SimpleQAPower = (DealWeight == 0 && VerifiedDealWeight == 0)
> // FIP-0045: For each Deadline in Deadlines: for each SectorOnChainInfo in SectorsSnapshot, set SimpleQAPower = (DealWeight == 0 && VerifiedDealWeight == 0)
>
23,24c34,40
< proposals *market.DealArray
< OutCodeCID cid.Cid
---
> emptyPrecommitOnChainInfosV9 cid.Cid
> emptyDeadlineV8 cid.Cid
> emptyDeadlinesV8 cid.Cid
> emptyDeadlineV9 cid.Cid
> emptyDeadlinesV9 cid.Cid
> proposals *market.DealArray
> OutCodeCID cid.Cid
27,29c43,44
< func (m minerMigrator) migratedCodeCID() cid.Cid {
< return m.OutCodeCID
< }
---
> func newMinerMigrator(ctx context.Context, store cbor.IpldStore, marketProposals *market.DealArray, outCode cid.Cid) (*minerMigrator, error) {
> ctxStore := adt8.WrapStore(ctx, store)
31,34c46,48
< func (m minerMigrator) migrateState(ctx context.Context, store cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) {
< var inState miner8.State
< if err := store.Get(ctx, in.head, &inState); err != nil {
< return nil, err
---
> emptyPrecommitMapCidV9, err := adt9.StoreEmptyMap(ctxStore, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty precommit map v9: %w", err)
36,38c50,53
< var inInfo miner8.MinerInfo
< if err := store.Get(ctx, inState.Info, &inInfo); err != nil {
< return nil, err
---
>
> edv8, err := miner8.ConstructDeadline(ctxStore)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty deadline v8: %w", err)
40d54
< wrappedStore := adt.WrapStore(ctx, store)
42c56
< oldPrecommitOnChainInfos, err := adt.AsMap(wrappedStore, inState.PreCommittedSectors, builtin.DefaultHamtBitwidth)
---
> edv8cid, err := store.Put(ctx, edv8)
44c58
< return nil, xerrors.Errorf("failed to load old precommit onchain infos for miner %s: %w", in.address, err)
---
> return nil, xerrors.Errorf("failed to put empty deadline v8: %w", err)
47c61,62
< emptyMap, err := adt.StoreEmptyMap(wrappedStore, builtin.DefaultHamtBitwidth)
---
> edsv8 := miner8.ConstructDeadlines(edv8cid)
> edsv8cid, err := store.Put(ctx, edsv8)
49c64
< return nil, xerrors.Errorf("failed to make empty map: %w", err)
---
> return nil, xerrors.Errorf("failed to construct empty deadlines v8: %w", err)
52c67
< newPrecommitOnChainInfos, err := adt.AsMap(wrappedStore, emptyMap, builtin.DefaultHamtBitwidth)
---
> edv9, err := miner9.ConstructDeadline(ctxStore)
54c69
< return nil, xerrors.Errorf("failed to load empty map: %w", err)
---
> return nil, xerrors.Errorf("failed to construct empty deadline v9: %w", err)
57,66c72,75
< var info miner8.SectorPreCommitOnChainInfo
< err = oldPrecommitOnChainInfos.ForEach(&info, func(key string) error {
< var unsealedCid *cid.Cid
< if len(info.Info.DealIDs) != 0 {
< pieces := make([]abi.PieceInfo, len(info.Info.DealIDs))
< for i, dealID := range info.Info.DealIDs {
< deal, err := m.proposals.GetDealProposal(dealID)
< if err != nil {
< return xerrors.Errorf("error getting deal proposal: %w", err)
< }
---
> edv9cid, err := store.Put(ctx, edv9)
> if err != nil {
> return nil, xerrors.Errorf("failed to put empty deadline v9: %w", err)
> }
68,72c77,80
< pieces[i] = abi.PieceInfo{
< PieceCID: deal.PieceCID,
< Size: deal.PieceSize,
< }
< }
---
> edsv9 := miner9.ConstructDeadlines(edv9cid)
> edsv9cid, err := store.Put(ctx, edsv9)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty deadlines v9: %w", err)
74,77c82
< commd, err := commp.GenerateUnsealedCID(info.Info.SealProof, pieces)
< if err != nil {
< return xerrors.Errorf("failed to generate unsealed CID: %w", err)
< }
---
> }
79,80c84,93
< unsealedCid = &commd
< }
---
> return &minerMigrator{
> emptyPrecommitOnChainInfosV9: emptyPrecommitMapCidV9,
> emptyDeadlineV8: edv8cid,
> emptyDeadlinesV8: edsv8cid,
> emptyDeadlineV9: edv9cid,
> emptyDeadlinesV9: edsv9cid,
> proposals: marketProposals,
> OutCodeCID: outCode,
> }, nil
> }
82,94c95,97
< err = newPrecommitOnChainInfos.Put(miner9.SectorKey(info.Info.SectorNumber), &miner9.SectorPreCommitOnChainInfo{
< Info: miner9.SectorPreCommitInfo{
< SealProof: info.Info.SealProof,
< SectorNumber: info.Info.SectorNumber,
< SealedCID: info.Info.SealedCID,
< SealRandEpoch: info.Info.SealRandEpoch,
< DealIDs: info.Info.DealIDs,
< Expiration: info.Info.Expiration,
< UnsealedCid: unsealedCid,
< },
< PreCommitDeposit: info.PreCommitDeposit,
< PreCommitEpoch: info.PreCommitEpoch,
< })
---
> func (m minerMigrator) migratedCodeCID() cid.Cid {
> return m.OutCodeCID
> }
96,98c99,108
< if err != nil {
< return xerrors.Errorf("failed to write new precommitinfo: %w", err)
< }
---
> func (m minerMigrator) migrateState(ctx context.Context, store cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) {
> var inState miner8.State
> if err := store.Get(ctx, in.head, &inState); err != nil {
> return nil, err
> }
> var inInfo miner8.MinerInfo
> if err := store.Get(ctx, inState.Info, &inInfo); err != nil {
> return nil, err
> }
> wrappedStore := adt8.WrapStore(ctx, store)
100,101c110,113
< return nil
< })
---
> newPrecommits, err := m.migratePrecommits(ctx, wrappedStore, inState.PreCommittedSectors)
> if err != nil {
> return nil, xerrors.Errorf("failed to migrate precommits for miner: %s: %w", in.address, err)
> }
102a115
> newSectors, err := migrateSectorsWithCache(ctx, wrappedStore, in.cache, in.address, inState.Sectors)
104c117
< return nil, xerrors.Errorf("failed to iterate over precommitinfos: %w", err)
---
> return nil, xerrors.Errorf("failed to migrate sectors for miner: %s: %w", in.address, err)
107c120
< newPrecommits, err := newPrecommitOnChainInfos.Root()
---
> newDeadlines, err := m.migrateDeadlines(ctx, wrappedStore, in.cache, inState.Deadlines)
109c122
< return nil, xerrors.Errorf("failed to flush new precommits: %w", err)
---
> return nil, xerrors.Errorf("failed to migrate deadlines: %w", err)
155c168
< Sectors: inState.Sectors,
---
> Sectors: newSectors,
158c171
< Deadlines: inState.Deadlines,
---
> Deadlines: newDeadlines,
167a181,442
> }
>
> func (m minerMigrator) migratePrecommits(ctx context.Context, wrappedStore adt8.Store, inRoot cid.Cid) (cid.Cid, error) {
> oldPrecommitOnChainInfos, err := adt8.AsMap(wrappedStore, inRoot, builtin.DefaultHamtBitwidth)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to load old precommit onchain infos: %w", err)
> }
>
> newPrecommitOnChainInfos, err := adt9.AsMap(wrappedStore, m.emptyPrecommitOnChainInfosV9, builtin.DefaultHamtBitwidth)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to load empty map: %w", err)
> }
>
> var info miner8.SectorPreCommitOnChainInfo
> err = oldPrecommitOnChainInfos.ForEach(&info, func(key string) error {
> var unsealedCid *cid.Cid
> var pieces []abi.PieceInfo
> for _, dealID := range info.Info.DealIDs {
> deal, err := m.proposals.GetDealProposal(dealID)
> if err != nil {
> // Possible for the proposal to be missing if it's expired (but the deal is still in a precommit that's yet to be cleaned up)
> // Just continue in this case, the sector is unProveCommitable anyway, will just fail later
> if exitcode.Unwrap(err, exitcode.ErrIllegalState) != exitcode.ErrNotFound {
> return xerrors.Errorf("error getting deal proposal for sector: %d: %w", info.Info.SectorNumber, err)
> }
>
> continue
> }
>
> pieces = append(pieces, abi.PieceInfo{
> PieceCID: deal.PieceCID,
> Size: deal.PieceSize,
> })
> }
>
> if len(pieces) != 0 {
> commd, err := commp.GenerateUnsealedCID(info.Info.SealProof, pieces)
> if err != nil {
> return xerrors.Errorf("failed to generate unsealed CID: %w", err)
> }
>
> unsealedCid = &commd
> }
>
> err = newPrecommitOnChainInfos.Put(miner9.SectorKey(info.Info.SectorNumber), &miner9.SectorPreCommitOnChainInfo{
> Info: miner9.SectorPreCommitInfo{
> SealProof: info.Info.SealProof,
> SectorNumber: info.Info.SectorNumber,
> SealedCID: info.Info.SealedCID,
> SealRandEpoch: info.Info.SealRandEpoch,
> DealIDs: info.Info.DealIDs,
> Expiration: info.Info.Expiration,
> UnsealedCid: unsealedCid,
> },
> PreCommitDeposit: info.PreCommitDeposit,
> PreCommitEpoch: info.PreCommitEpoch,
> })
>
> if err != nil {
> return xerrors.Errorf("failed to write new precommitinfo: %w", err)
> }
>
> return nil
> })
>
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to iterate over precommitinfos: %w", err)
> }
>
> newPrecommits, err := newPrecommitOnChainInfos.Root()
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to flush new precommits: %w", err)
> }
>
> return newPrecommits, nil
> }
>
> func migrateSectorsWithCache(ctx context.Context, store adt8.Store, cache MigrationCache, minerAddr address.Address, inRoot cid.Cid) (cid.Cid, error) {
> return cache.Load(SectorsAmtKey(inRoot), func() (cid.Cid, error) {
> inArray, err := adt8.AsArray(store, inRoot, miner8.SectorsAmtBitwidth)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to read sectors array: %w", err)
> }
>
> okIn, prevInRoot, err := cache.Read(MinerPrevSectorsInKey(minerAddr))
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to get previous inRoot from cache: %w", err)
> }
>
> okOut, prevOutRoot, err := cache.Read(MinerPrevSectorsOutKey(minerAddr))
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to get previous outRoot from cache: %w", err)
> }
>
> var outArray *adt9.Array
> if okIn && okOut {
> // we have previous work, but the AMT has changed -- diff them
> diffs, err := amt.Diff(ctx, store, store, prevInRoot, inRoot, amt.UseTreeBitWidth(miner9.SectorsAmtBitwidth))
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to diff old and new Sector AMTs: %w", err)
> }
>
> inSectors, err := miner8.LoadSectors(store, inRoot)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to load inSectors: %w", err)
> }
>
> prevOutSectors, err := miner9.LoadSectors(store, prevOutRoot)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to load prevOutSectors: %w", err)
> }
>
> for _, change := range diffs {
> switch change.Type {
> case amt.Remove:
> if err := prevOutSectors.Delete(change.Key); err != nil {
> return cid.Undef, xerrors.Errorf("failed to delete sector from prevOutSectors: %w", err)
> }
> case amt.Add:
> fallthrough
> case amt.Modify:
> sectorNo := abi.SectorNumber(change.Key)
> info, found, err := inSectors.Get(sectorNo)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to get sector %d in inSectors: %w", sectorNo, err)
> }
>
> if !found {
> return cid.Undef, xerrors.Errorf("didn't find sector %d in inSectors", sectorNo)
> }
>
> if err := prevOutSectors.Set(change.Key, migrateSectorInfo(*info)); err != nil {
> return cid.Undef, xerrors.Errorf("failed to set migrated sector %d in prevOutSectors", sectorNo)
> }
> }
> }
>
> outArray = prevOutSectors.Array
> } else {
> // first time we're doing this, do all the work
> outArray, err = migrateSectorsFromScratch(ctx, store, inArray)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to migrate sectors from scratch: %w", err)
> }
> }
>
> outRoot, err := outArray.Root()
> if err != nil {
> return cid.Undef, xerrors.Errorf("error writing new sectors AMT: %w", err)
> }
>
> if err = cache.Write(MinerPrevSectorsInKey(minerAddr), inRoot); err != nil {
> return cid.Undef, xerrors.Errorf("failed to write inkey to cache: %w", err)
> }
>
> if err = cache.Write(MinerPrevSectorsOutKey(minerAddr), outRoot); err != nil {
> return cid.Undef, xerrors.Errorf("failed to write inkey to cache: %w", err)
> }
>
> return outRoot, nil
> })
> }
>
> func migrateSectorsFromScratch(ctx context.Context, store adt8.Store, inArray *adt8.Array) (*adt9.Array, error) {
> outArray, err := adt9.MakeEmptyArray(store, miner9.SectorsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct new sectors array: %w", err)
> }
>
> var sectorInfo miner8.SectorOnChainInfo
> if err = inArray.ForEach(&sectorInfo, func(k int64) error {
> return outArray.Set(uint64(k), migrateSectorInfo(sectorInfo))
> }); err != nil {
> return nil, err
> }
>
> return outArray, err
> }
>
> func (m minerMigrator) migrateDeadlines(ctx context.Context, store adt8.Store, cache MigrationCache, deadlines cid.Cid) (cid.Cid, error) {
> if deadlines == m.emptyDeadlinesV8 {
> return m.emptyDeadlinesV9, nil
> }
>
> var inDeadlines miner8.Deadlines
> err := store.Get(store.Context(), deadlines, &inDeadlines)
> if err != nil {
> return cid.Undef, err
> }
>
> var outDeadlines miner9.Deadlines
> for i, c := range inDeadlines.Due {
> if c == m.emptyDeadlineV8 {
> outDeadlines.Due[i] = m.emptyDeadlineV9
> } else {
> var inDeadline miner8.Deadline
> if err = store.Get(ctx, c, &inDeadline); err != nil {
> return cid.Undef, err
> }
>
> outSectorsSnapshotCid, err := cache.Load(SectorsAmtKey(inDeadline.SectorsSnapshot), func() (cid.Cid, error) {
> inSectorsSnapshot, err := adt8.AsArray(store, inDeadline.SectorsSnapshot, miner8.SectorsAmtBitwidth)
> if err != nil {
> return cid.Undef, err
> }
>
> outSectorsSnapshot, err := migrateSectorsFromScratch(ctx, store, inSectorsSnapshot)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to migrate sectors: %w", err)
> }
>
> return outSectorsSnapshot.Root()
> })
>
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to migrate sectors snapshot: %w", err)
> }
>
> outDeadline := miner9.Deadline{
> Partitions: inDeadline.Partitions,
> ExpirationsEpochs: inDeadline.ExpirationsEpochs,
> PartitionsPoSted: inDeadline.PartitionsPoSted,
> EarlyTerminations: inDeadline.EarlyTerminations,
> LiveSectors: inDeadline.LiveSectors,
> TotalSectors: inDeadline.TotalSectors,
> FaultyPower: miner9.PowerPair(inDeadline.FaultyPower),
> OptimisticPoStSubmissions: inDeadline.OptimisticPoStSubmissions,
> SectorsSnapshot: outSectorsSnapshotCid,
> PartitionsSnapshot: inDeadline.PartitionsSnapshot,
> OptimisticPoStSubmissionsSnapshot: inDeadline.OptimisticPoStSubmissionsSnapshot,
> }
>
> outDlCid, err := store.Put(ctx, &outDeadline)
> if err != nil {
> return cid.Undef, err
> }
>
> outDeadlines.Due[i] = outDlCid
> }
> }
>
> return store.Put(ctx, &outDeadlines)
> }
>
> func migrateSectorInfo(sectorInfo miner8.SectorOnChainInfo) *miner9.SectorOnChainInfo {
> return &miner9.SectorOnChainInfo{
> SectorNumber: sectorInfo.SectorNumber,
> SealProof: sectorInfo.SealProof,
> SealedCID: sectorInfo.SealedCID,
> DealIDs: sectorInfo.DealIDs,
> Activation: sectorInfo.Activation,
> Expiration: sectorInfo.Expiration,
> DealWeight: sectorInfo.DealWeight,
> VerifiedDealWeight: sectorInfo.VerifiedDealWeight,
> InitialPledge: sectorInfo.InitialPledge,
> ExpectedDayReward: sectorInfo.ExpectedDayReward,
> ExpectedStoragePledge: sectorInfo.ExpectedStoragePledge,
> ReplacedSectorAge: sectorInfo.ReplacedSectorAge,
> ReplacedDayReward: sectorInfo.ReplacedDayReward,
> SectorKeyCID: sectorInfo.SectorKeyCID,
> SimpleQAPower: sectorInfo.DealWeight.IsZero() && sectorInfo.VerifiedDealWeight.IsZero(),
> }
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/system.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/system.go
17a18,21
> func (m systemActorMigrator) migratedCodeCID() cid.Cid {
> return m.OutCodeCID
> }
>
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration: test
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/top.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/top.go
8a9,15
> init8 "github.com/filecoin-project/go-state-types/builtin/v8/init"
>
> verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
>
> "github.com/filecoin-project/go-state-types/big"
> adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt"
>
11c18
< "github.com/filecoin-project/go-state-types/builtin/v8/system"
---
> system8 "github.com/filecoin-project/go-state-types/builtin/v8/system"
65a73,89
> func SectorsAmtKey(sectorsAmt cid.Cid) string {
> sectorsAmtKey, err := sectorsAmt.StringOfBase(multibase.Base32)
> if err != nil {
> panic(err)
> }
>
> return "sectorsAmt-" + sectorsAmtKey
> }
>
> func MinerPrevSectorsInKey(m address.Address) string {
> return "prevSectorsIn-" + m.String()
> }
>
> func MinerPrevSectorsOutKey(m address.Address) string {
> return "prevSectorsOut-" + m.String()
> }
>
73a98,101
> emptyMapCid, err := adt9.StoreEmptyMap(adtStore, builtin.DefaultHamtBitwidth)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to create empty map: %w", err)
> }
76c104
< actorsIn, err := LoadTree(adtStore, actorsRootIn)
---
> actorsIn, err := builtin.LoadTree(adtStore, actorsRootIn)
80c108
< actorsOut, err := NewTree(adtStore)
---
> actorsOut, err := builtin.NewTree(adtStore)
95c123
< var systemState system.State
---
> var systemState system8.State
117d144
<
119,121c146
< var deferredCodeIDs = map[cid.Cid]struct{}{
< // None
< }
---
> deferredCodeIDs := make(map[cid.Cid]struct{})
123,124c148,149
< // simple code migrations
< simpleMigrations := make(map[string]cid.Cid, len(oldManifestData.Entries))
---
> // Populated from oldManifestData
> oldCodeIDMap := make(map[string]cid.Cid, len(oldManifestData.Entries))
128,129c153,154
< simpleMigrations[entry.Name] = entry.Code
< if entry.Name == "storageminer" {
---
> oldCodeIDMap[entry.Name] = entry.Code
> if entry.Name == manifest.MinerKey {
138,142c163,170
< for name, oldCodeCID := range simpleMigrations { //nolint:nomaprange
< newCodeCID, ok := newManifest.Get(name)
< if !ok {
< return cid.Undef, xerrors.Errorf("code cid for %s actor not found in new manifest", name)
< }
---
> for name, oldCodeCID := range oldCodeIDMap { //nolint:nomaprange
> if name == manifest.MarketKey || name == manifest.VerifregKey {
> deferredCodeIDs[oldCodeCID] = struct{}{}
> } else {
> newCodeCID, ok := newManifest.Get(name)
> if !ok {
> return cid.Undef, xerrors.Errorf("code cid for %s actor not found in new manifest", name)
> }
144c172,173
< migrations[oldCodeCID] = codeMigrator{newCodeCID}
---
> migrations[oldCodeCID] = codeMigrator{newCodeCID}
> }
147c176,179
< // migrations that migrate both code and state
---
> // migrations that migrate both code and state, override entries in `migrations`
>
> // The System Actor
>
153,157d184
< miner9Cid, ok := newManifest.Get("storageminer")
< if !ok {
< return cid.Undef, xerrors.Errorf("code cid for miner actor not found in new manifest")
< }
<
159a187,188
> // The Miner Actor -- needs loading the market state
>
161c190
< marketActor, ok, err := actorsIn.GetActor(builtin.StorageMarketActorAddr)
---
> marketActorV8, ok, err := actorsIn.GetActor(builtin.StorageMarketActorAddr)
170,172c199,207
< var marketState market8.State
< if err := store.Get(ctx, marketActor.Head, &marketState); err != nil {
< return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
---
> var marketStateV8 market8.State
> if err := store.Get(ctx, marketActorV8.Head, &marketStateV8); err != nil {
> return cid.Undef, xerrors.Errorf("failed to get market actor state: %w", err)
> }
>
> // Find verified pending deals for both datacap and verifreg migrations
> pendingVerifiedDeals, pendingVerifiedDealSize, err := getPendingVerifiedDealsAndTotalSize(ctx, adtStore, marketStateV8)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to get pending verified deals")
175c210
< proposals, err := market8.AsDealProposalArray(adtStore, marketState.Proposals)
---
> proposals, err := market8.AsDealProposalArray(adtStore, marketStateV8.Proposals)
180c215,225
< migrations[miner8Cid] = minerMigrator{proposals, miner9Cid}
---
> miner9Cid, ok := newManifest.Get(manifest.MinerKey)
> if !ok {
> return cid.Undef, xerrors.Errorf("code cid for miner actor not found in new manifest")
> }
>
> mm, err := newMinerMigrator(ctx, store, proposals, miner9Cid)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to create miner migrator: %w", err)
> }
>
> migrations[miner8Cid] = cachedMigration(cache, *mm)
186a232,318
> // The DataCap actor -- needs to be created, and loading the verified registry state
>
> verifregActorV8, ok, err := actorsIn.GetActor(builtin.VerifiedRegistryActorAddr)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to get verifreg actor: %w", err)
> }
>
> if !ok {
> return cid.Undef, xerrors.New("didn't find verifreg actor")
> }
>
> var verifregStateV8 verifreg8.State
> if err := adtStore.Get(ctx, verifregActorV8.Head, &verifregStateV8); err != nil {
> return cid.Undef, xerrors.Errorf("failed to get verifreg actor state: %w", err)
> }
>
> dataCapCode, ok := newManifest.Get(manifest.DataCapKey)
> if !ok {
> return cid.Undef, xerrors.Errorf("failed to find datacap code ID: %w", err)
> }
>
> if err = actorsIn.SetActor(builtin.DatacapActorAddr, &builtin.Actor{
> Code: dataCapCode,
> // we just need to put _something_ defined, this never gets read
> Head: emptyMapCid,
> CallSeqNum: 0,
> Balance: big.Zero(),
> }); err != nil {
> return cid.Undef, xerrors.Errorf("failed to set datacap actor: %w", err)
> }
>
> migrations[dataCapCode] = &datacapMigrator{
> emptyMapCid: emptyMapCid,
> verifregStateV8: verifregStateV8,
> OutCodeCID: dataCapCode,
> pendingVerifiedDealSize: pendingVerifiedDealSize,
> }
>
> // The Verifreg & Market Actor need special handling,
> // - they need to load the init actor state
> // - they need to be done in order -- the output of the verifreg migration is input to the market migration
>
> initActorV8, ok, err := actorsIn.GetActor(builtin.InitActorAddr)
> if err != nil {
> return cid.Undef, xerrors.Errorf("failed to load init actor: %w", err)
> }
>
> if !ok {
> return cid.Undef, xerrors.New("failed to find init actor")
> }
>
> var initStateV8 init8.State
> if err = adtStore.Get(ctx, initActorV8.Head, &initStateV8); err != nil {
> return cid.Undef, xerrors.Errorf("failed to load init state: %w", err)
> }
>
> type verifregMarketResult struct {
> verifregHead cid.Cid
> marketHead cid.Cid
> err error
> }
>
> verifregMarketResultCh := make(chan verifregMarketResult)
> go func() {
> ret := verifregMarketResult{
> verifregHead: cid.Undef,
> marketHead: cid.Undef,
> err: nil,
> }
> verifregHead, dealAllocationTuples, err := migrateVerifreg(ctx, adtStore, priorEpoch, initStateV8, marketStateV8, pendingVerifiedDeals, verifregStateV8, emptyMapCid)
> if err != nil {
> ret.err = xerrors.Errorf("failed to migrate verifreg actor: %w", err)
> verifregMarketResultCh <- ret
> }
>
> ret.verifregHead = verifregHead
>
> marketHead, err := migrateMarket(ctx, adtStore, dealAllocationTuples, marketStateV8, emptyMapCid)
> if err != nil {
> ret.err = xerrors.Errorf("failed to migrate market state: %w", err)
> verifregMarketResultCh <- ret
> }
>
> ret.marketHead = marketHead
> verifregMarketResultCh <- ret
> }()
>
200c332
< if err = actorsIn.ForEach(func(addr address.Address, actorIn *Actor) error {
---
> if err = actorsIn.ForEach(func(addr address.Address, actorIn *builtin.Actor) error {
247a380
>
307a441,473
> verifregCode, ok := newManifest.Get(manifest.VerifregKey)
> if !ok {
> return cid.Undef, xerrors.Errorf("failed to find verifreg code ID: %w", err)
> }
>
> marketCode, ok := newManifest.Get(manifest.MarketKey)
> if !ok {
> return cid.Undef, xerrors.Errorf("failed to find market code ID: %w", err)
> }
>
> verifregMarketHeads := <-verifregMarketResultCh
> if verifregMarketHeads.err != nil {
> return cid.Undef, xerrors.Errorf("failed to migrate verifreg and market: %w", err)
> }
>
> if err = actorsOut.SetActor(builtin.VerifiedRegistryActorAddr, &builtin.Actor{
> Code: verifregCode,
> Head: verifregMarketHeads.verifregHead,
> CallSeqNum: verifregActorV8.CallSeqNum,
> Balance: verifregActorV8.Balance,
> }); err != nil {
> return cid.Undef, xerrors.Errorf("failed to set verifreg actor: %w", err)
> }
>
> if err = actorsOut.SetActor(builtin.StorageMarketActorAddr, &builtin.Actor{
> Code: marketCode,
> Head: verifregMarketHeads.marketHead,
> CallSeqNum: marketActorV8.CallSeqNum,
> Balance: marketActorV8.Balance,
> }); err != nil {
> return cid.Undef, xerrors.Errorf("failed to set market actor: %w", err)
> }
>
310c476,477
< log.Log(rt.INFO, "All %d done after %v (%.0f/s). Flushing state tree root.", doneCount, elapsed, rate)
---
> log.Log(rt.INFO, "All %d done after %v (%.0f/s), flushing state root.", doneCount, elapsed, rate)
>
329a497
> migratedCodeCID() cid.Cid
334c502
< Actor
---
> builtin.Actor
341c509
< Actor
---
> builtin.Actor
359c527
< Actor{
---
> builtin.Actor{
377a546,579
> }
>
> func (n codeMigrator) migratedCodeCID() cid.Cid {
> return n.OutCodeCID
> }
>
> // Migrator that uses cached transformation if it exists
> type cachedMigrator struct {
> cache MigrationCache
> actorMigration
> }
>
> func (c cachedMigrator) migrateState(ctx context.Context, store cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) {
> newHead, err := c.cache.Load(ActorHeadKey(in.address, in.head), func() (cid.Cid, error) {
> result, err := c.actorMigration.migrateState(ctx, store, in)
> if err != nil {
> return cid.Undef, err
> }
> return result.newHead, nil
> })
> if err != nil {
> return nil, err
> }
> return &actorMigrationResult{
> newCodeCID: c.migratedCodeCID(),
> newHead: newHead,
> }, nil
> }
>
> func cachedMigration(cache MigrationCache, m actorMigration) actorMigration {
> return cachedMigrator{
> actorMigration: m,
> cache: cache,
> }
Only in a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration: tree.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/util.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/util.go
3a4
> "context"
5a7,12
> adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt"
>
> "github.com/filecoin-project/go-state-types/abi"
> "github.com/filecoin-project/go-state-types/builtin"
> market8 "github.com/filecoin-project/go-state-types/builtin/v8/market"
> adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt"
62a70,130
> }
>
> func getPendingVerifiedDealsAndTotalSize(ctx context.Context, adtStore adt8.Store, marketStateV8 market8.State) ([]abi.DealID, uint64, error) {
> pendingProposals, err := adt8.AsSet(adtStore, marketStateV8.PendingProposals, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, 0, xerrors.Errorf("failed to load pending proposals: %w", err)
> }
>
> proposals, err := market8.AsDealProposalArray(adtStore, marketStateV8.Proposals)
> if err != nil {
> return nil, 0, xerrors.Errorf("failed to get proposals: %w", err)
> }
>
> // We only want those pending deals that haven't been activated -- an activated deal has an entry in dealStates8
> dealStates8, err := adt9.AsArray(adtStore, marketStateV8.States, market8.StatesAmtBitwidth)
> if err != nil {
> return nil, 0, xerrors.Errorf("failed to load v8 states array: %w", err)
> }
>
> var pendingVerifiedDeals []abi.DealID
> pendingSize := uint64(0)
> var proposal market8.DealProposal
> if err = proposals.ForEach(&proposal, func(dealID int64) error {
> // Nothing to do for unverified deals
> if !proposal.VerifiedDeal {
> return nil
> }
>
> pcid, err := proposal.Cid()
> if err != nil {
> return err
> }
>
> isPending, err := pendingProposals.Has(abi.CidKey(pcid))
> if err != nil {
> return xerrors.Errorf("failed to check pending: %w", err)
> }
>
> // Nothing to do for not-pending deals
> if !isPending {
> return nil
> }
>
> var _dealState8 market8.DealState
> found, err := dealStates8.Get(uint64(dealID), &_dealState8)
> if err != nil {
> return xerrors.Errorf("failed to lookup deal state: %w", err)
> }
>
> // the deal has an entry in deal states, which means it's already been allocated, nothing to do
> if found {
> return nil
> }
>
> pendingVerifiedDeals = append(pendingVerifiedDeals, abi.DealID(dealID))
> pendingSize += uint64(proposal.PieceSize)
> return nil
> }); err != nil {
> return nil, 0, xerrors.Errorf("failed to iterate over proposals: %w", err)
> }
> return pendingVerifiedDeals, pendingSize, nil
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration: verifreg.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/cbor_gen.go
10a11
> verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
1786c1787
< var lengthBufSectorOnChainInfo = []byte{142}
---
> var lengthBufSectorOnChainInfo = []byte{143}
1910a1912,1915
> // t.SimpleQAPower (bool) (bool)
> if err := cbg.WriteBool(w, t.SimpleQAPower); err != nil {
> return err
> }
1928c1933
< if extra != 14 {
---
> if extra != 15 {
2166a2172,2188
> // t.SimpleQAPower (bool) (bool)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajOther {
> return fmt.Errorf("booleans must be major type 7")
> }
> switch extra {
> case 20:
> t.SimpleQAPower = false
> case 21:
> t.SimpleQAPower = true
> default:
> return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
> }
3782a3805,3883
> var lengthBufExtendSectorExpiration2Params = []byte{129}
>
> func (t *ExtendSectorExpiration2Params) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufExtendSectorExpiration2Params); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Extensions ([]miner.ExpirationExtension2) (slice)
> if len(t.Extensions) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Extensions was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Extensions))); err != nil {
> return err
> }
> for _, v := range t.Extensions {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *ExtendSectorExpiration2Params) UnmarshalCBOR(r io.Reader) error {
> *t = ExtendSectorExpiration2Params{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 1 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Extensions ([]miner.ExpirationExtension2) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Extensions: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Extensions = make([]ExpirationExtension2, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v ExpirationExtension2
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.Extensions[i] = v
> }
>
> return nil
> }
>
6490a6592,6910
> return nil
> }
>
> var lengthBufExpirationExtension2 = []byte{133}
>
> func (t *ExpirationExtension2) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufExpirationExtension2); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Deadline (uint64) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil {
> return err
> }
>
> // t.Partition (uint64) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil {
> return err
> }
>
> // t.Sectors (bitfield.BitField) (struct)
> if err := t.Sectors.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.SectorsWithClaims ([]miner.SectorClaim) (slice)
> if len(t.SectorsWithClaims) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.SectorsWithClaims was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SectorsWithClaims))); err != nil {
> return err
> }
> for _, v := range t.SectorsWithClaims {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
>
> // t.NewExpiration (abi.ChainEpoch) (int64)
> if t.NewExpiration >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NewExpiration)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.NewExpiration-1)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *ExpirationExtension2) UnmarshalCBOR(r io.Reader) error {
> *t = ExpirationExtension2{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 5 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Deadline (uint64) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Deadline = uint64(extra)
>
> }
> // t.Partition (uint64) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Partition = uint64(extra)
>
> }
> // t.Sectors (bitfield.BitField) (struct)
>
> {
>
> if err := t.Sectors.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.Sectors: %w", err)
> }
>
> }
> // t.SectorsWithClaims ([]miner.SectorClaim) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.SectorsWithClaims: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.SectorsWithClaims = make([]SectorClaim, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v SectorClaim
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.SectorsWithClaims[i] = v
> }
>
> // t.NewExpiration (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.NewExpiration = abi.ChainEpoch(extraI)
> }
> return nil
> }
>
> var lengthBufSectorClaim = []byte{131}
>
> func (t *SectorClaim) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufSectorClaim); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.SectorNumber (abi.SectorNumber) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil {
> return err
> }
>
> // t.MaintainClaims ([]verifreg.ClaimId) (slice)
> if len(t.MaintainClaims) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.MaintainClaims was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.MaintainClaims))); err != nil {
> return err
> }
> for _, v := range t.MaintainClaims {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
>
> // t.DropClaims ([]verifreg.ClaimId) (slice)
> if len(t.DropClaims) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.DropClaims was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DropClaims))); err != nil {
> return err
> }
> for _, v := range t.DropClaims {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *SectorClaim) UnmarshalCBOR(r io.Reader) error {
> *t = SectorClaim{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 3 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.SectorNumber (abi.SectorNumber) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.SectorNumber = abi.SectorNumber(extra)
>
> }
> // t.MaintainClaims ([]verifreg.ClaimId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.MaintainClaims: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.MaintainClaims = make([]verifreg.ClaimId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.MaintainClaims slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.MaintainClaims was not a uint, instead got %d", maj)
> }
>
> t.MaintainClaims[i] = verifreg.ClaimId(val)
> }
>
> // t.DropClaims ([]verifreg.ClaimId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.DropClaims: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.DropClaims = make([]verifreg.ClaimId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.DropClaims slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.DropClaims was not a uint, instead got %d", maj)
> }
>
> t.DropClaims[i] = verifreg.ClaimId(val)
> }
>
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadline_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadline_state.go
66c66
< SectorsSnapshot cid.Cid
---
> SectorsSnapshot cid.Cid // Array, AMT[SectorNumber]SectorOnChainInfo (sparse)
98a99,137
> // Deadline (singular)
> //
>
> func ConstructDeadline(store adt.Store) (*Deadline, error) {
> emptyPartitionsArrayCid, err := adt.StoreEmptyArray(store, DeadlinePartitionsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty partitions array: %w", err)
> }
> emptyDeadlineExpirationArrayCid, err := adt.StoreEmptyArray(store, DeadlineExpirationAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty deadline expiration array: %w", err)
> }
>
> emptySectorsSnapshotArrayCid, err := adt.StoreEmptyArray(store, SectorsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty sectors snapshot array: %w", err)
> }
>
> emptyPoStSubmissionsArrayCid, err := adt.StoreEmptyArray(store, DeadlineOptimisticPoStSubmissionsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to construct empty proofs array: %w", err)
> }
>
> return &Deadline{
> Partitions: emptyPartitionsArrayCid,
> ExpirationsEpochs: emptyDeadlineExpirationArrayCid,
> EarlyTerminations: bitfield.New(),
> LiveSectors: 0,
> TotalSectors: 0,
> FaultyPower: NewPowerPairZero(),
> PartitionsPoSted: bitfield.New(),
> OptimisticPoStSubmissions: emptyPoStSubmissionsArrayCid,
> PartitionsSnapshot: emptyPartitionsArrayCid,
> SectorsSnapshot: emptySectorsSnapshotArrayCid,
> OptimisticPoStSubmissionsSnapshot: emptyPoStSubmissionsArrayCid,
> }, nil
> }
>
> //
100a140,147
>
> func ConstructDeadlines(emptyDeadlineCid cid.Cid) *Deadlines {
> d := new(Deadlines)
> for i := range d.Due {
> d.Due[i] = emptyDeadlineCid
> }
> return d
> }
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/methods.go
5a6
>
6a8
> "github.com/filecoin-project/go-state-types/builtin"
10,41c12,47
< var Methods = []interface{}{
< 1: *new(func(interface{}, *power.MinerConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *abi.EmptyValue) *GetControlAddressesReturn), // ControlAddresses
< 3: *new(func(interface{}, *ChangeWorkerAddressParams) *abi.EmptyValue), // ChangeWorkerAddress
< 4: *new(func(interface{}, *ChangePeerIDParams) *abi.EmptyValue), // ChangePeerID
< 5: *new(func(interface{}, *SubmitWindowedPoStParams) *abi.EmptyValue), // SubmitWindowedPoSt
< 6: *new(func(interface{}, *PreCommitSectorParams) *abi.EmptyValue), // PreCommitSector
< 7: *new(func(interface{}, *ProveCommitSectorParams) *abi.EmptyValue), // ProveCommitSector
< 8: *new(func(interface{}, *ExtendSectorExpirationParams) *abi.EmptyValue), // ExtendSectorExpiration
< 9: *new(func(interface{}, *TerminateSectorsParams) *TerminateSectorsReturn), // TerminateSectors
< 10: *new(func(interface{}, *DeclareFaultsParams) *abi.EmptyValue), // DeclareFaults
< 11: *new(func(interface{}, *DeclareFaultsRecoveredParams) *abi.EmptyValue), // DeclareFaultsRecovered
< 12: *new(func(interface{}, *DeferredCronEventParams) *abi.EmptyValue), // OnDeferredCronEvent
< 13: *new(func(interface{}, *CheckSectorProvenParams) *abi.EmptyValue), // CheckSectorProven
< 14: *new(func(interface{}, *ApplyRewardParams) *abi.EmptyValue), // ApplyRewards
< 15: *new(func(interface{}, *ReportConsensusFaultParams) *abi.EmptyValue), // ReportConsensusFault
< 16: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance
< 17: *new(func(interface{}, *ConfirmSectorProofsParams) *abi.EmptyValue), // ConfirmSectorProofsValid
< 18: *new(func(interface{}, *ChangeMultiaddrsParams) *abi.EmptyValue), // ChangeMultiaddrs
< 19: *new(func(interface{}, *CompactPartitionsParams) *abi.EmptyValue), // CompactPartitions
< 20: *new(func(interface{}, *CompactSectorNumbersParams) *abi.EmptyValue), // CompactSectorNumbers
< 21: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // ConfirmUpdateWorkerKey
< 22: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // RepayDebt
< 23: *new(func(interface{}, *address.Address) *abi.EmptyValue), // ChangeOwnerAddress
< 24: *new(func(interface{}, *DisputeWindowedPoStParams) *abi.EmptyValue), // DisputeWindowedPoSt
< 25: *new(func(interface{}, *PreCommitSectorBatchParams) *abi.EmptyValue), // PreCommitSectorBatch
< 26: *new(func(interface{}, *ProveCommitAggregateParams) *abi.EmptyValue), // ProveCommitAggregate
< 27: *new(func(interface{}, *ProveReplicaUpdatesParams) *bitfield.BitField), // ProveReplicaUpdates
< 28: *new(func(interface{}, *PreCommitSectorBatchParams2) *abi.EmptyValue), // PreCommitSectorBatch2
< 29: *new(func(interface{}, *ProveReplicaUpdatesParams2) *bitfield.BitField), // ProveReplicaUpdates2
< 30: *new(func(interface{}, *ChangeBeneficiaryParams) *abi.EmptyValue), // ChangeBeneficiary
< 31: *new(func(interface{}, *abi.EmptyValue) *GetBeneficiaryReturn), // GetBeneficiary
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*power.MinerConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"ControlAddresses", *new(func(*abi.EmptyValue) *GetControlAddressesReturn)}, // ControlAddresses
> 3: {"ChangeWorkerAddress", *new(func(*ChangeWorkerAddressParams) *abi.EmptyValue)}, // ChangeWorkerAddress
> 4: {"ChangePeerID", *new(func(*ChangePeerIDParams) *abi.EmptyValue)}, // ChangePeerID
> 5: {"SubmitWindowedPoSt", *new(func(*SubmitWindowedPoStParams) *abi.EmptyValue)}, // SubmitWindowedPoSt
> 6: {"PreCommitSector", *new(func(*PreCommitSectorParams) *abi.EmptyValue)}, // PreCommitSector
> 7: {"ProveCommitSector", *new(func(*ProveCommitSectorParams) *abi.EmptyValue)}, // ProveCommitSector
> 8: {"ExtendSectorExpiration", *new(func(*ExtendSectorExpirationParams) *abi.EmptyValue)}, // ExtendSectorExpiration
> 9: {"TerminateSectors", *new(func(*TerminateSectorsParams) *TerminateSectorsReturn)}, // TerminateSectors
> 10: {"DeclareFaults", *new(func(*DeclareFaultsParams) *abi.EmptyValue)}, // DeclareFaults
> 11: {"DeclareFaultsRecovered", *new(func(*DeclareFaultsRecoveredParams) *abi.EmptyValue)}, // DeclareFaultsRecovered
> 12: {"OnDeferredCronEvent", *new(func(*DeferredCronEventParams) *abi.EmptyValue)}, // OnDeferredCronEvent
> 13: {"CheckSectorProven", *new(func(*CheckSectorProvenParams) *abi.EmptyValue)}, // CheckSectorProven
> 14: {"ApplyRewards", *new(func(*ApplyRewardParams) *abi.EmptyValue)}, // ApplyRewards
> 15: {"ReportConsensusFault", *new(func(*ReportConsensusFaultParams) *abi.EmptyValue)}, // ReportConsensusFault
> 16: {"WithdrawBalance", *new(func(*WithdrawBalanceParams) *abi.TokenAmount)}, // WithdrawBalance
> 17: {"ConfirmSectorProofsValid", *new(func(*ConfirmSectorProofsParams) *abi.EmptyValue)}, // ConfirmSectorProofsValid
> 18: {"ChangeMultiaddrs", *new(func(*ChangeMultiaddrsParams) *abi.EmptyValue)}, // ChangeMultiaddrs
> 19: {"CompactPartitions", *new(func(*CompactPartitionsParams) *abi.EmptyValue)}, // CompactPartitions
> 20: {"CompactSectorNumbers", *new(func(*CompactSectorNumbersParams) *abi.EmptyValue)}, // CompactSectorNumbers
> 21: {"ConfirmUpdateWorkerKey", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // ConfirmUpdateWorkerKey
> 22: {"RepayDebt", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // RepayDebt
> 23: {"ChangeOwnerAddress", *new(func(*address.Address) *abi.EmptyValue)}, // ChangeOwnerAddress
> 24: {"DisputeWindowedPoSt", *new(func(*DisputeWindowedPoStParams) *abi.EmptyValue)}, // DisputeWindowedPoSt
> 25: {"PreCommitSectorBatch", *new(func(*PreCommitSectorBatchParams) *abi.EmptyValue)}, // PreCommitSectorBatch
> 26: {"ProveCommitAggregate", *new(func(*ProveCommitAggregateParams) *abi.EmptyValue)}, // ProveCommitAggregate
> 27: {"ProveReplicaUpdates", *new(func(*ProveReplicaUpdatesParams) *bitfield.BitField)}, // ProveReplicaUpdates
> // NB: the name of this method must not change across actor/network versions
> 28: {"PreCommitSectorBatch2", *new(func(*PreCommitSectorBatchParams2) *abi.EmptyValue)}, // PreCommitSectorBatch2
> // NB: the name of this method must not change across actor/network versions
> 29: {"ProveReplicaUpdates2", *new(func(*ProveReplicaUpdatesParams2) *bitfield.BitField)}, // ProveReplicaUpdates2
> 30: {"ChangeBeneficiary", *new(func(*ChangeBeneficiaryParams) *abi.EmptyValue)}, // ChangeBeneficiary
> 31: {"GetBeneficiary", *new(func(*abi.EmptyValue) *GetBeneficiaryReturn)}, // GetBeneficiary
> // NB: the name of this method must not change across actor/network versions
> 32: {"ExtendSectorExpiration2", *new(func(*ExtendSectorExpiration2Params) *abi.EmptyValue)}, // ExtendSectorExpiration2
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_state.go
175a176
> SimpleQAPower bool // Flag for QA power mechanism introduced in FIP-0045
319a321,332
> }
>
> // pre-commit clean up
> func (st *State) QuantSpecEveryDeadline() builtin.QuantSpec {
> return builtin.NewQuantSpec(WPoStChallengeWindow, st.ProvingPeriodStart)
> }
>
> // Return true when the miner actor needs to continue scheduling deadline crons
> func (st *State) ContinueDeadlineCron() bool {
> return !st.PreCommitDeposits.IsZero() ||
> !st.InitialPledge.IsZero() ||
> !st.LockedFunds.IsZero()
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_types.go
10a11
> "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
351a353,370
> }
>
> type ExtendSectorExpiration2Params struct {
> Extensions []ExpirationExtension2
> }
>
> type ExpirationExtension2 struct {
> Deadline uint64
> Partition uint64
> Sectors bitfield.BitField
> SectorsWithClaims []SectorClaim
> NewExpiration abi.ChainEpoch
> }
>
> type SectorClaim struct {
> SectorNumber abi.SectorNumber
> MaintainClaims []verifreg.ClaimId
> DropClaims []verifreg.ClaimId
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/partition_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/partition_state.go
6a7
> "github.com/filecoin-project/go-state-types/builtin/v9/util/adt"
91a93,100
> func (d *Deadline) PartitionsSnapshotArray(store adt.Store) (*adt.Array, error) {
> arr, err := adt.AsArray(store, d.PartitionsSnapshot, DeadlinePartitionsAmtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("failed to load partitions snapshot: %w", err)
> }
> return arr, nil
> }
>
115a125,137
> }
>
> func (pp *PowerPair) Equals(other PowerPair) bool {
> return pp.Raw.Equals(other.Raw) && pp.QA.Equals(other.QA)
> }
>
> func (pp PowerPair) IsZero() bool {
> return pp.Raw.IsZero() && pp.QA.IsZero()
> }
>
> // Active power is power of non-faulty sectors.
> func (p *Partition) ActivePower() PowerPair {
> return p.LivePower.Sub(p.FaultyPower).Sub(p.UnprovenPower)
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/policy.go
165a166,171
> // The quality-adjusted power for a sector.
> func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower {
> duration := sector.Expiration - sector.Activation
> return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight)
> }
>
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/multisig: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/multisig/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/multisig/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,16c8,18
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *ProposeParams) *ProposeReturn), // Propose
< 3: *new(func(interface{}, *TxnIDParams) *ApproveReturn), // Approve
< 4: *new(func(interface{}, *TxnIDParams) *abi.EmptyValue), // Cancel
< 5: *new(func(interface{}, *AddSignerParams) *abi.EmptyValue), // AddSigner
< 6: *new(func(interface{}, *RemoveSignerParams) *abi.EmptyValue), // RemoveSigner
< 7: *new(func(interface{}, *SwapSignerParams) *abi.EmptyValue), // SwapSigner
< 8: *new(func(interface{}, *ChangeNumApprovalsThresholdParams) *abi.EmptyValue), // ChangeNumApprovalsThreshold
< 9: *new(func(interface{}, *LockBalanceParams) *abi.EmptyValue), // LockBalance
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"Propose", *new(func(*ProposeParams) *ProposeReturn)}, // Propose
> 3: {"Approve", *new(func(*TxnIDParams) *ApproveReturn)}, // Approve
> 4: {"Cancel", *new(func(*TxnIDParams) *abi.EmptyValue)}, // Cancel
> 5: {"AddSigner", *new(func(*AddSignerParams) *abi.EmptyValue)}, // AddSigner
> 6: {"RemoveSigner", *new(func(*RemoveSignerParams) *abi.EmptyValue)}, // RemoveSigner
> 7: {"SwapSigner", *new(func(*SwapSignerParams) *abi.EmptyValue)}, // SwapSigner
> 8: {"ChangeNumApprovalsThreshold", *new(func(*ChangeNumApprovalsThresholdParams) *abi.EmptyValue)}, // ChangeNumApprovalsThreshold
> 9: {"LockBalance", *new(func(*LockBalanceParams) *abi.EmptyValue)}, // LockBalance
> uint64(builtin.UniversalReceiverHookMethodNum): {"UniversalReceiverHook", *new(func(*[]byte) *abi.EmptyValue)}, // UniversalReceiverHook
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/multisig: policy.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/paych: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/paych/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/paych/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,11c8,12
< var Methods = []interface{}{
< 1: *new(func(interface{}, *ConstructorParams) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *UpdateChannelStateParams) *abi.EmptyValue), // UpdateChannelState
< 3: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Settle
< 4: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Collect
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*ConstructorParams) *abi.EmptyValue)}, // Constructor
> 2: {"UpdateChannelState", *new(func(*UpdateChannelStateParams) *abi.EmptyValue)}, // UpdateChannelState
> 3: {"Settle", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Settle
> 4: {"Collect", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Collect
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/cbor_gen.go
765a766,968
> var lengthBufCreateMinerParams = []byte{133}
>
> func (t *CreateMinerParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufCreateMinerParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Owner (address.Address) (struct)
> if err := t.Owner.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.Worker (address.Address) (struct)
> if err := t.Worker.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64)
> if t.WindowPoStProofType >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil {
> return err
> }
> }
>
> // t.Peer ([]uint8) (slice)
> if len(t.Peer) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field t.Peer was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Peer))); err != nil {
> return err
> }
>
> if _, err := w.Write(t.Peer[:]); err != nil {
> return err
> }
>
> // t.Multiaddrs ([][]uint8) (slice)
> if len(t.Multiaddrs) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Multiaddrs was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil {
> return err
> }
> for _, v := range t.Multiaddrs {
> if len(v) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field v was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil {
> return err
> }
>
> if _, err := w.Write(v[:]); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *CreateMinerParams) UnmarshalCBOR(r io.Reader) error {
> *t = CreateMinerParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 5 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Owner (address.Address) (struct)
>
> {
>
> if err := t.Owner.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.Owner: %w", err)
> }
>
> }
> // t.Worker (address.Address) (struct)
>
> {
>
> if err := t.Worker.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.Worker: %w", err)
> }
>
> }
> // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.WindowPoStProofType = abi.RegisteredPoStProof(extraI)
> }
> // t.Peer ([]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.Peer: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.Peer = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.Peer[:]); err != nil {
> return err
> }
> // t.Multiaddrs ([][]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Multiaddrs = make([][]uint8, extra)
> }
>
> for i := 0; i < int(extra); i++ {
> {
> var maj byte
> var extra uint64
> var err error
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.Multiaddrs[i] = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil {
> return err
> }
> }
> }
>
> return nil
> }
>
1018a1222,1305
> return err
> }
> return nil
> }
>
> var lengthBufCronEvent = []byte{130}
>
> func (t *CronEvent) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufCronEvent); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.MinerAddr (address.Address) (struct)
> if err := t.MinerAddr.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.CallbackPayload ([]uint8) (slice)
> if len(t.CallbackPayload) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field t.CallbackPayload was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.CallbackPayload))); err != nil {
> return err
> }
>
> if _, err := w.Write(t.CallbackPayload[:]); err != nil {
> return err
> }
> return nil
> }
>
> func (t *CronEvent) UnmarshalCBOR(r io.Reader) error {
> *t = CronEvent{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.MinerAddr (address.Address) (struct)
>
> {
>
> if err := t.MinerAddr.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.MinerAddr: %w", err)
> }
>
> }
> // t.CallbackPayload ([]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.CallbackPayload: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.CallbackPayload = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.CallbackPayload[:]); err != nil {
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
8,17c9,18
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *CreateMinerParams) *CreateMinerReturn), // CreateMiner
< 3: *new(func(interface{}, *UpdateClaimedPowerParams) *abi.EmptyValue), // UpdateClaimedPower
< 4: *new(func(interface{}, *EnrollCronEventParams) *abi.EmptyValue), // EnrollCronEvent
< 5: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick
< 6: *new(func(interface{}, *abi.TokenAmount) *abi.EmptyValue), // UpdatePledgeTotal
< 7: nil,
< 8: *new(func(interface{}, *proof.SealVerifyInfo) *abi.EmptyValue), // SubmitPoRepForBulkVerify
< 9: *new(func(interface{}, *abi.EmptyValue) *CurrentTotalPowerReturn), // CurrentTotalPower
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Constructor
> 2: {"CreateMiner", *new(func(*CreateMinerParams) *CreateMinerReturn)}, // CreateMiner
> 3: {"UpdateClaimedPower", *new(func(*UpdateClaimedPowerParams) *abi.EmptyValue)}, // UpdateClaimedPower
> 4: {"EnrollCronEvent", *new(func(*EnrollCronEventParams) *abi.EmptyValue)}, // EnrollCronEvent
> 5: {"CronTick", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // CronTick
> 6: {"UpdatePledgeTotal", *new(func(*abi.TokenAmount) *abi.EmptyValue)}, // UpdatePledgeTotal
> 7: {"OnConsensusFault", nil}, // deprecated
> 8: {"SubmitPoRepForBulkVerify", *new(func(*proof.SealVerifyInfo) *abi.EmptyValue)}, // SubmitPoRepForBulkVerify
> 9: {"CurrentTotalPower", *new(func(*abi.EmptyValue) *CurrentTotalPowerReturn)}, // CurrentTotalPower
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_state.go
36a37,42
> // PARAM_SPEC// Maximum number of prove-commits each miner can submit in one epoch.
> //
> // This limits the number of proof partitions we may need to load in the cron call path.
> // Onboarding 1EiB/year requires at least 32 prove-commits per epoch.
> const MaxMinerProveCommitsPerEpoch = 200 // PARAM_SPEC
>
106a113,117
> }
>
> type CronEvent struct {
> MinerAddr addr.Address
> CallbackPayload []byte
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/reward: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/reward/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/reward/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,11c8,12
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.StoragePower) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *AwardBlockRewardParams) *abi.EmptyValue), // AwardBlockReward
< 3: *new(func(interface{}, *abi.EmptyValue) *ThisEpochRewardReturn), // ThisEpochReward
< 4: *new(func(interface{}, *abi.StoragePower) *abi.EmptyValue), // UpdateNetworkKPI
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.StoragePower) *abi.EmptyValue)}, // Constructor
> 2: {"AwardBlockReward", *new(func(*AwardBlockRewardParams) *abi.EmptyValue)}, // AwardBlockReward
> 3: {"ThisEpochReward", *new(func(*abi.EmptyValue) *ThisEpochRewardReturn)}, // ThisEpochReward
> 4: {"UpdateNetworkKPI", *new(func(*abi.StoragePower) *abi.EmptyValue)}, // UpdateNetworkKPI
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/system/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/system/methods.go
4a5
> "github.com/filecoin-project/go-state-types/builtin"
7,8c8,9
< var Methods = []interface{}{
< 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*abi.EmptyValue) *abi.EmptyValue)}, // Constructor
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/balancetable.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/balancetable.go
40a41,51
>
> // Returns the total balance held by this BalanceTable
> func (t *BalanceTable) Total() (abi.TokenAmount, error) {
> total := big.Zero()
> var cur abi.TokenAmount
> err := (*Map)(t).ForEach(&cur, func(key string) error {
> total = big.Add(total, cur)
> return nil
> })
> return total, err
> }
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/map.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/map.go
5a6
> "io"
28a30,39
> }
>
> func (m *Map) MarshalCBOR(w io.Writer) error {
> rootCid, err := m.Root()
> if err != nil {
> return xerrors.Errorf("failed to flush map: %w", err)
> }
>
> cborCid := cbg.CborCid(rootCid)
> return cborCid.MarshalCBOR(w)
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/multimap.go
3c3,6
< import "github.com/ipfs/go-cid"
---
> import (
> "github.com/ipfs/go-cid"
> cbg "github.com/whyrusleeping/cbor-gen"
> )
11a15,25
> // Interprets a store as a HAMT-based map of AMTs with root `r`.
> // The outer map is interpreted with a branching factor of 2^bitwidth.
> func AsMultimap(s Store, r cid.Cid, outerBitwidth, innerBitwidth int) (*Multimap, error) {
> m, err := AsMap(s, r, outerBitwidth)
> if err != nil {
> return nil, err
> }
>
> return &Multimap{m, innerBitwidth}, nil
> }
>
33a48,63
> }
>
> func (mm *Multimap) ForAll(fn func(k string, arr *Array) error) error {
> var arrRoot cbg.CborCid
> if err := mm.mp.ForEach(&arrRoot, func(k string) error {
> arr, err := AsArray(mm.mp.store, cid.Cid(arrRoot), mm.innerBitwidth)
> if err != nil {
> return err
> }
>
> return fn(k, arr)
> }); err != nil {
> return err
> }
>
> return nil
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt: set.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util: bitfield.go
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util: bitfield_queue.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/cbor_gen.go
8a9,10
> abi "github.com/filecoin-project/go-state-types/abi"
> exitcode "github.com/filecoin-project/go-state-types/exitcode"
15c17
< var lengthBufState = []byte{132}
---
> var lengthBufState = []byte{134}
39,44d40
< // t.VerifiedClients (cid.Cid) (struct)
<
< if err := cbg.WriteCidBuf(scratch, w, t.VerifiedClients); err != nil {
< return xerrors.Errorf("failed to write cid field t.VerifiedClients: %w", err)
< }
<
50a47,64
> // t.Allocations (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.Allocations); err != nil {
> return xerrors.Errorf("failed to write cid field t.Allocations: %w", err)
> }
>
> // t.NextAllocationId (verifreg.AllocationId) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NextAllocationId)); err != nil {
> return err
> }
>
> // t.Claims (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.Claims); err != nil {
> return xerrors.Errorf("failed to write cid field t.Claims: %w", err)
> }
>
68c82
< if extra != 4 {
---
> if extra != 6 {
93c107
< // t.VerifiedClients (cid.Cid) (struct)
---
> // t.RemoveDataCapProposalIDs (cid.Cid) (struct)
99c113
< return xerrors.Errorf("failed to read cid field t.VerifiedClients: %w", err)
---
> return xerrors.Errorf("failed to read cid field t.RemoveDataCapProposalIDs: %w", err)
102c116
< t.VerifiedClients = c
---
> t.RemoveDataCapProposalIDs = c
105c119
< // t.RemoveDataCapProposalIDs (cid.Cid) (struct)
---
> // t.Allocations (cid.Cid) (struct)
111c125
< return xerrors.Errorf("failed to read cid field t.RemoveDataCapProposalIDs: %w", err)
---
> return xerrors.Errorf("failed to read cid field t.Allocations: %w", err)
114c128,154
< t.RemoveDataCapProposalIDs = c
---
> t.Allocations = c
>
> }
> // t.NextAllocationId (verifreg.AllocationId) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.NextAllocationId = AllocationId(extra)
>
> }
> // t.Claims (cid.Cid) (struct)
>
> {
>
> c, err := cbg.ReadCid(br)
> if err != nil {
> return xerrors.Errorf("failed to read cid field t.Claims: %w", err)
> }
>
> t.Claims = c
519a560,1810
> var lengthBufRemoveExpiredAllocationsParams = []byte{130}
>
> func (t *RemoveExpiredAllocationsParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufRemoveExpiredAllocationsParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Client (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Client)); err != nil {
> return err
> }
>
> // t.AllocationIds ([]verifreg.AllocationId) (slice)
> if len(t.AllocationIds) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.AllocationIds was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.AllocationIds))); err != nil {
> return err
> }
> for _, v := range t.AllocationIds {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *RemoveExpiredAllocationsParams) UnmarshalCBOR(r io.Reader) error {
> *t = RemoveExpiredAllocationsParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Client (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Client = abi.ActorID(extra)
>
> }
> // t.AllocationIds ([]verifreg.AllocationId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.AllocationIds: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.AllocationIds = make([]AllocationId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.AllocationIds slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.AllocationIds was not a uint, instead got %d", maj)
> }
>
> t.AllocationIds[i] = AllocationId(val)
> }
>
> return nil
> }
>
> var lengthBufRemoveExpiredAllocationsReturn = []byte{131}
>
> func (t *RemoveExpiredAllocationsReturn) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufRemoveExpiredAllocationsReturn); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Considered ([]verifreg.AllocationId) (slice)
> if len(t.Considered) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Considered was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Considered))); err != nil {
> return err
> }
> for _, v := range t.Considered {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
>
> // t.Results (verifreg.BatchReturn) (struct)
> if err := t.Results.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.DataCapRecovered (big.Int) (struct)
> if err := t.DataCapRecovered.MarshalCBOR(w); err != nil {
> return err
> }
> return nil
> }
>
> func (t *RemoveExpiredAllocationsReturn) UnmarshalCBOR(r io.Reader) error {
> *t = RemoveExpiredAllocationsReturn{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 3 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Considered ([]verifreg.AllocationId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Considered: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Considered = make([]AllocationId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.Considered slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.Considered was not a uint, instead got %d", maj)
> }
>
> t.Considered[i] = AllocationId(val)
> }
>
> // t.Results (verifreg.BatchReturn) (struct)
>
> {
>
> if err := t.Results.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.Results: %w", err)
> }
>
> }
> // t.DataCapRecovered (big.Int) (struct)
>
> {
>
> if err := t.DataCapRecovered.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.DataCapRecovered: %w", err)
> }
>
> }
> return nil
> }
>
> var lengthBufBatchReturn = []byte{130}
>
> func (t *BatchReturn) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufBatchReturn); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.SuccessCount (uint64) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SuccessCount)); err != nil {
> return err
> }
>
> // t.FailCodes ([]verifreg.FailCode) (slice)
> if len(t.FailCodes) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.FailCodes was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.FailCodes))); err != nil {
> return err
> }
> for _, v := range t.FailCodes {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *BatchReturn) UnmarshalCBOR(r io.Reader) error {
> *t = BatchReturn{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.SuccessCount (uint64) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.SuccessCount = uint64(extra)
>
> }
> // t.FailCodes ([]verifreg.FailCode) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.FailCodes: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.FailCodes = make([]FailCode, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v FailCode
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.FailCodes[i] = v
> }
>
> return nil
> }
>
> var lengthBufClaimAllocationsParams = []byte{130}
>
> func (t *ClaimAllocationsParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufClaimAllocationsParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Sectors ([]verifreg.SectorAllocationClaim) (slice)
> if len(t.Sectors) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Sectors was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil {
> return err
> }
> for _, v := range t.Sectors {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
>
> // t.AllOrNothing (bool) (bool)
> if err := cbg.WriteBool(w, t.AllOrNothing); err != nil {
> return err
> }
> return nil
> }
>
> func (t *ClaimAllocationsParams) UnmarshalCBOR(r io.Reader) error {
> *t = ClaimAllocationsParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Sectors ([]verifreg.SectorAllocationClaim) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Sectors: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Sectors = make([]SectorAllocationClaim, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v SectorAllocationClaim
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.Sectors[i] = v
> }
>
> // t.AllOrNothing (bool) (bool)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajOther {
> return fmt.Errorf("booleans must be major type 7")
> }
> switch extra {
> case 20:
> t.AllOrNothing = false
> case 21:
> t.AllOrNothing = true
> default:
> return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
> }
> return nil
> }
>
> var lengthBufClaimAllocationsReturn = []byte{130}
>
> func (t *ClaimAllocationsReturn) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufClaimAllocationsReturn); err != nil {
> return err
> }
>
> // t.BatchInfo (verifreg.BatchReturn) (struct)
> if err := t.BatchInfo.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.ClaimedSpace (big.Int) (struct)
> if err := t.ClaimedSpace.MarshalCBOR(w); err != nil {
> return err
> }
> return nil
> }
>
> func (t *ClaimAllocationsReturn) UnmarshalCBOR(r io.Reader) error {
> *t = ClaimAllocationsReturn{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.BatchInfo (verifreg.BatchReturn) (struct)
>
> {
>
> if err := t.BatchInfo.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.BatchInfo: %w", err)
> }
>
> }
> // t.ClaimedSpace (big.Int) (struct)
>
> {
>
> if err := t.ClaimedSpace.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.ClaimedSpace: %w", err)
> }
>
> }
> return nil
> }
>
> var lengthBufGetClaimsParams = []byte{130}
>
> func (t *GetClaimsParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufGetClaimsParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Provider (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Provider)); err != nil {
> return err
> }
>
> // t.ClaimIds ([]verifreg.ClaimId) (slice)
> if len(t.ClaimIds) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.ClaimIds was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ClaimIds))); err != nil {
> return err
> }
> for _, v := range t.ClaimIds {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *GetClaimsParams) UnmarshalCBOR(r io.Reader) error {
> *t = GetClaimsParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Provider (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Provider = abi.ActorID(extra)
>
> }
> // t.ClaimIds ([]verifreg.ClaimId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.ClaimIds: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.ClaimIds = make([]ClaimId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.ClaimIds slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.ClaimIds was not a uint, instead got %d", maj)
> }
>
> t.ClaimIds[i] = ClaimId(val)
> }
>
> return nil
> }
>
> var lengthBufGetClaimsReturn = []byte{130}
>
> func (t *GetClaimsReturn) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufGetClaimsReturn); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.BatchInfo (verifreg.BatchReturn) (struct)
> if err := t.BatchInfo.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.Claims ([]verifreg.Claim) (slice)
> if len(t.Claims) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Claims was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Claims))); err != nil {
> return err
> }
> for _, v := range t.Claims {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *GetClaimsReturn) UnmarshalCBOR(r io.Reader) error {
> *t = GetClaimsReturn{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.BatchInfo (verifreg.BatchReturn) (struct)
>
> {
>
> if err := t.BatchInfo.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.BatchInfo: %w", err)
> }
>
> }
> // t.Claims ([]verifreg.Claim) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Claims: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Claims = make([]Claim, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v Claim
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.Claims[i] = v
> }
>
> return nil
> }
>
> var lengthBufUniversalReceiverParams = []byte{130}
>
> func (t *UniversalReceiverParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufUniversalReceiverParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Type_ (verifreg.ReceiverType) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type_)); err != nil {
> return err
> }
>
> // t.Payload ([]uint8) (slice)
> if len(t.Payload) > cbg.ByteArrayMaxLen {
> return xerrors.Errorf("Byte array in field t.Payload was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Payload))); err != nil {
> return err
> }
>
> if _, err := w.Write(t.Payload[:]); err != nil {
> return err
> }
> return nil
> }
>
> func (t *UniversalReceiverParams) UnmarshalCBOR(r io.Reader) error {
> *t = UniversalReceiverParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Type_ (verifreg.ReceiverType) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Type_ = ReceiverType(extra)
>
> }
> // t.Payload ([]uint8) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.ByteArrayMaxLen {
> return fmt.Errorf("t.Payload: byte array too large (%d)", extra)
> }
> if maj != cbg.MajByteString {
> return fmt.Errorf("expected byte array")
> }
>
> if extra > 0 {
> t.Payload = make([]uint8, extra)
> }
>
> if _, err := io.ReadFull(br, t.Payload[:]); err != nil {
> return err
> }
> return nil
> }
>
> var lengthBufAllocationsResponse = []byte{131}
>
> func (t *AllocationsResponse) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufAllocationsResponse); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.AllocationResults (verifreg.BatchReturn) (struct)
> if err := t.AllocationResults.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.ExtensionResults (verifreg.BatchReturn) (struct)
> if err := t.ExtensionResults.MarshalCBOR(w); err != nil {
> return err
> }
>
> // t.NewAllocations ([]verifreg.AllocationId) (slice)
> if len(t.NewAllocations) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.NewAllocations was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewAllocations))); err != nil {
> return err
> }
> for _, v := range t.NewAllocations {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *AllocationsResponse) UnmarshalCBOR(r io.Reader) error {
> *t = AllocationsResponse{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 3 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.AllocationResults (verifreg.BatchReturn) (struct)
>
> {
>
> if err := t.AllocationResults.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.AllocationResults: %w", err)
> }
>
> }
> // t.ExtensionResults (verifreg.BatchReturn) (struct)
>
> {
>
> if err := t.ExtensionResults.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.ExtensionResults: %w", err)
> }
>
> }
> // t.NewAllocations ([]verifreg.AllocationId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.NewAllocations: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.NewAllocations = make([]AllocationId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.NewAllocations slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.NewAllocations was not a uint, instead got %d", maj)
> }
>
> t.NewAllocations[i] = AllocationId(val)
> }
>
> return nil
> }
>
> var lengthBufExtendClaimTermsParams = []byte{129}
>
> func (t *ExtendClaimTermsParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufExtendClaimTermsParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Terms ([]verifreg.ClaimTerm) (slice)
> if len(t.Terms) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Terms was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Terms))); err != nil {
> return err
> }
> for _, v := range t.Terms {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *ExtendClaimTermsParams) UnmarshalCBOR(r io.Reader) error {
> *t = ExtendClaimTermsParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 1 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Terms ([]verifreg.ClaimTerm) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Terms: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Terms = make([]ClaimTerm, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v ClaimTerm
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.Terms[i] = v
> }
>
> return nil
> }
>
> var lengthBufExtendClaimTermsReturn = []byte{130}
>
> func (t *ExtendClaimTermsReturn) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufExtendClaimTermsReturn); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.SuccessCount (uint64) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SuccessCount)); err != nil {
> return err
> }
>
> // t.FailCodes ([]verifreg.FailCode) (slice)
> if len(t.FailCodes) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.FailCodes was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.FailCodes))); err != nil {
> return err
> }
> for _, v := range t.FailCodes {
> if err := v.MarshalCBOR(w); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *ExtendClaimTermsReturn) UnmarshalCBOR(r io.Reader) error {
> *t = ExtendClaimTermsReturn{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.SuccessCount (uint64) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.SuccessCount = uint64(extra)
>
> }
> // t.FailCodes ([]verifreg.FailCode) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.FailCodes: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.FailCodes = make([]FailCode, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> var v FailCode
> if err := v.UnmarshalCBOR(br); err != nil {
> return err
> }
>
> t.FailCodes[i] = v
> }
>
> return nil
> }
>
> var lengthBufRemoveExpiredClaimsParams = []byte{130}
>
> func (t *RemoveExpiredClaimsParams) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufRemoveExpiredClaimsParams); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Provider (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Provider)); err != nil {
> return err
> }
>
> // t.ClaimIds ([]verifreg.ClaimId) (slice)
> if len(t.ClaimIds) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.ClaimIds was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ClaimIds))); err != nil {
> return err
> }
> for _, v := range t.ClaimIds {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *RemoveExpiredClaimsParams) UnmarshalCBOR(r io.Reader) error {
> *t = RemoveExpiredClaimsParams{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Provider (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Provider = abi.ActorID(extra)
>
> }
> // t.ClaimIds ([]verifreg.ClaimId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.ClaimIds: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.ClaimIds = make([]ClaimId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.ClaimIds slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.ClaimIds was not a uint, instead got %d", maj)
> }
>
> t.ClaimIds[i] = ClaimId(val)
> }
>
> return nil
> }
>
> var lengthBufRemoveExpiredClaimsReturn = []byte{130}
>
> func (t *RemoveExpiredClaimsReturn) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufRemoveExpiredClaimsReturn); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Considered ([]verifreg.AllocationId) (slice)
> if len(t.Considered) > cbg.MaxLength {
> return xerrors.Errorf("Slice value in field t.Considered was too long")
> }
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Considered))); err != nil {
> return err
> }
> for _, v := range t.Considered {
> if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
> return err
> }
> }
>
> // t.Results (verifreg.BatchReturn) (struct)
> if err := t.Results.MarshalCBOR(w); err != nil {
> return err
> }
> return nil
> }
>
> func (t *RemoveExpiredClaimsReturn) UnmarshalCBOR(r io.Reader) error {
> *t = RemoveExpiredClaimsReturn{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Considered ([]verifreg.AllocationId) (slice)
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
>
> if extra > cbg.MaxLength {
> return fmt.Errorf("t.Considered: array too large (%d)", extra)
> }
>
> if maj != cbg.MajArray {
> return fmt.Errorf("expected cbor array")
> }
>
> if extra > 0 {
> t.Considered = make([]AllocationId, extra)
> }
>
> for i := 0; i < int(extra); i++ {
>
> maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return xerrors.Errorf("failed to read uint64 for t.Considered slice: %w", err)
> }
>
> if maj != cbg.MajUnsignedInt {
> return xerrors.Errorf("value read for array t.Considered was not a uint, instead got %d", maj)
> }
>
> t.Considered[i] = AllocationId(val)
> }
>
> // t.Results (verifreg.BatchReturn) (struct)
>
> {
>
> if err := t.Results.UnmarshalCBOR(br); err != nil {
> return xerrors.Errorf("unmarshaling t.Results: %w", err)
> }
>
> }
> return nil
> }
>
710a2002,2840
> }
> return nil
> }
>
> var lengthBufFailCode = []byte{130}
>
> func (t *FailCode) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufFailCode); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Idx (uint64) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Idx)); err != nil {
> return err
> }
>
> // t.Code (exitcode.ExitCode) (int64)
> if t.Code >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *FailCode) UnmarshalCBOR(r io.Reader) error {
> *t = FailCode{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 2 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Idx (uint64) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Idx = uint64(extra)
>
> }
> // t.Code (exitcode.ExitCode) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.Code = exitcode.ExitCode(extraI)
> }
> return nil
> }
>
> var lengthBufSectorAllocationClaim = []byte{134}
>
> func (t *SectorAllocationClaim) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufSectorAllocationClaim); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Client (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Client)); err != nil {
> return err
> }
>
> // t.AllocationId (verifreg.AllocationId) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.AllocationId)); err != nil {
> return err
> }
>
> // t.Data (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.Data); err != nil {
> return xerrors.Errorf("failed to write cid field t.Data: %w", err)
> }
>
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
> return err
> }
>
> // t.Sector (abi.SectorNumber) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Sector)); err != nil {
> return err
> }
>
> // t.SectorExpiry (abi.ChainEpoch) (int64)
> if t.SectorExpiry >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorExpiry)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorExpiry-1)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *SectorAllocationClaim) UnmarshalCBOR(r io.Reader) error {
> *t = SectorAllocationClaim{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 6 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Client (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Client = abi.ActorID(extra)
>
> }
> // t.AllocationId (verifreg.AllocationId) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.AllocationId = AllocationId(extra)
>
> }
> // t.Data (cid.Cid) (struct)
>
> {
>
> c, err := cbg.ReadCid(br)
> if err != nil {
> return xerrors.Errorf("failed to read cid field t.Data: %w", err)
> }
>
> t.Data = c
>
> }
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Size = abi.PaddedPieceSize(extra)
>
> }
> // t.Sector (abi.SectorNumber) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Sector = abi.SectorNumber(extra)
>
> }
> // t.SectorExpiry (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.SectorExpiry = abi.ChainEpoch(extraI)
> }
> return nil
> }
>
> var lengthBufClaim = []byte{136}
>
> func (t *Claim) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufClaim); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Provider (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Provider)); err != nil {
> return err
> }
>
> // t.Client (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Client)); err != nil {
> return err
> }
>
> // t.Data (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.Data); err != nil {
> return xerrors.Errorf("failed to write cid field t.Data: %w", err)
> }
>
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
> return err
> }
>
> // t.TermMin (abi.ChainEpoch) (int64)
> if t.TermMin >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TermMin)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TermMin-1)); err != nil {
> return err
> }
> }
>
> // t.TermMax (abi.ChainEpoch) (int64)
> if t.TermMax >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TermMax)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TermMax-1)); err != nil {
> return err
> }
> }
>
> // t.TermStart (abi.ChainEpoch) (int64)
> if t.TermStart >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TermStart)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TermStart-1)); err != nil {
> return err
> }
> }
>
> // t.Sector (abi.SectorNumber) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Sector)); err != nil {
> return err
> }
>
> return nil
> }
>
> func (t *Claim) UnmarshalCBOR(r io.Reader) error {
> *t = Claim{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 8 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Provider (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Provider = abi.ActorID(extra)
>
> }
> // t.Client (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Client = abi.ActorID(extra)
>
> }
> // t.Data (cid.Cid) (struct)
>
> {
>
> c, err := cbg.ReadCid(br)
> if err != nil {
> return xerrors.Errorf("failed to read cid field t.Data: %w", err)
> }
>
> t.Data = c
>
> }
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Size = abi.PaddedPieceSize(extra)
>
> }
> // t.TermMin (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.TermMin = abi.ChainEpoch(extraI)
> }
> // t.TermMax (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.TermMax = abi.ChainEpoch(extraI)
> }
> // t.TermStart (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.TermStart = abi.ChainEpoch(extraI)
> }
> // t.Sector (abi.SectorNumber) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Sector = abi.SectorNumber(extra)
>
> }
> return nil
> }
>
> var lengthBufClaimTerm = []byte{131}
>
> func (t *ClaimTerm) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufClaimTerm); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Provider (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Provider)); err != nil {
> return err
> }
>
> // t.ClaimId (verifreg.ClaimId) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ClaimId)); err != nil {
> return err
> }
>
> // t.TermMax (abi.ChainEpoch) (int64)
> if t.TermMax >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TermMax)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TermMax-1)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *ClaimTerm) UnmarshalCBOR(r io.Reader) error {
> *t = ClaimTerm{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 3 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Provider (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Provider = abi.ActorID(extra)
>
> }
> // t.ClaimId (verifreg.ClaimId) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.ClaimId = ClaimId(extra)
>
> }
> // t.TermMax (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.TermMax = abi.ChainEpoch(extraI)
> }
> return nil
> }
>
> var lengthBufAllocation = []byte{135}
>
> func (t *Allocation) MarshalCBOR(w io.Writer) error {
> if t == nil {
> _, err := w.Write(cbg.CborNull)
> return err
> }
> if _, err := w.Write(lengthBufAllocation); err != nil {
> return err
> }
>
> scratch := make([]byte, 9)
>
> // t.Client (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Client)); err != nil {
> return err
> }
>
> // t.Provider (abi.ActorID) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Provider)); err != nil {
> return err
> }
>
> // t.Data (cid.Cid) (struct)
>
> if err := cbg.WriteCidBuf(scratch, w, t.Data); err != nil {
> return xerrors.Errorf("failed to write cid field t.Data: %w", err)
> }
>
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
> return err
> }
>
> // t.TermMin (abi.ChainEpoch) (int64)
> if t.TermMin >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TermMin)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TermMin-1)); err != nil {
> return err
> }
> }
>
> // t.TermMax (abi.ChainEpoch) (int64)
> if t.TermMax >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TermMax)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TermMax-1)); err != nil {
> return err
> }
> }
>
> // t.Expiration (abi.ChainEpoch) (int64)
> if t.Expiration >= 0 {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil {
> return err
> }
> } else {
> if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (t *Allocation) UnmarshalCBOR(r io.Reader) error {
> *t = Allocation{}
>
> br := cbg.GetPeeker(r)
> scratch := make([]byte, 8)
>
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajArray {
> return fmt.Errorf("cbor input should be of type array")
> }
>
> if extra != 7 {
> return fmt.Errorf("cbor input had wrong number of fields")
> }
>
> // t.Client (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Client = abi.ActorID(extra)
>
> }
> // t.Provider (abi.ActorID) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Provider = abi.ActorID(extra)
>
> }
> // t.Data (cid.Cid) (struct)
>
> {
>
> c, err := cbg.ReadCid(br)
> if err != nil {
> return xerrors.Errorf("failed to read cid field t.Data: %w", err)
> }
>
> t.Data = c
>
> }
> // t.Size (abi.PaddedPieceSize) (uint64)
>
> {
>
> maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
> if err != nil {
> return err
> }
> if maj != cbg.MajUnsignedInt {
> return fmt.Errorf("wrong type for uint64 field")
> }
> t.Size = abi.PaddedPieceSize(extra)
>
> }
> // t.TermMin (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.TermMin = abi.ChainEpoch(extraI)
> }
> // t.TermMax (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.TermMax = abi.ChainEpoch(extraI)
> }
> // t.Expiration (abi.ChainEpoch) (int64)
> {
> maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
> var extraI int64
> if err != nil {
> return err
> }
> switch maj {
> case cbg.MajUnsignedInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 positive overflow")
> }
> case cbg.MajNegativeInt:
> extraI = int64(extra)
> if extraI < 0 {
> return fmt.Errorf("int64 negative oveflow")
> }
> extraI = -1 - extraI
> default:
> return fmt.Errorf("wrong type for int64 field: %d", maj)
> }
>
> t.Expiration = abi.ChainEpoch(extraI)
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg: invariants.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/methods.go
4a5
>
5a7
> "github.com/filecoin-project/go-state-types/builtin"
8,15c10,23
< var Methods = []interface{}{
< 1: *new(func(interface{}, *address.Address) *abi.EmptyValue), // Constructor
< 2: *new(func(interface{}, *AddVerifierParams) *abi.EmptyValue), // AddVerifier
< 3: *new(func(interface{}, *address.Address) *abi.EmptyValue), // RemoveVerifier
< 4: *new(func(interface{}, *AddVerifiedClientParams) *abi.EmptyValue), // AddVerifiedClient
< 5: *new(func(interface{}, *UseBytesParams) *abi.EmptyValue), // UseBytes
< 6: *new(func(interface{}, *RestoreBytesParams) *abi.EmptyValue), // RestoreBytes
< 7: *new(func(interface{}, *RemoveDataCapParams) *RemoveDataCapReturn), // RemoveVerifiedClientDataCap
---
> var Methods = map[uint64]builtin.MethodMeta{
> 1: {"Constructor", *new(func(*address.Address) *abi.EmptyValue)}, // Constructor
> 2: {"AddVerifier", *new(func(*AddVerifierParams) *abi.EmptyValue)}, // AddVerifier
> 3: {"RemoveVerifier", *new(func(*address.Address) *abi.EmptyValue)}, // RemoveVerifier
> 4: {"AddVerifiedClient", *new(func(*AddVerifiedClientParams) *abi.EmptyValue)}, // AddVerifiedClient
> 5: {"UseBytes", nil}, // deprecated
> 6: {"RestoreBytes", nil}, // deprecated
> 7: {"RemoveVerifiedClientDataCap", *new(func(*RemoveDataCapParams) *RemoveDataCapReturn)}, // RemoveVerifiedClientDataCap
> 8: {"RemoveExpiredAllocations", *new(func(*RemoveExpiredAllocationsParams) *RemoveExpiredAllocationsReturn)}, // RemoveExpiredAllocations
> 9: {"ClaimAllocations", *new(func(*ClaimAllocationsParams) *ClaimAllocationsReturn)}, // ClaimAllocations
> 10: {"GetClaims", *new(func(*GetClaimsParams) *GetClaimsReturn)}, // GetClaims
> 11: {"ExtendClaimTerms", *new(func(*ExtendClaimTermsParams) *ExtendClaimTermsReturn)}, // ExtendClaimTerms
> 12: {"RemoveExpiredClaims", *new(func(*RemoveExpiredClaimsParams) *RemoveExpiredClaimsReturn)}, // RemoveExpiredClaims
> uint64(builtin.UniversalReceiverHookMethodNum): {"UniversalReceiverHook", *new(func(*UniversalReceiverParams) *AllocationsResponse)}, // UniversalReceiverHook
Only in b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg: policy.go
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/verified_registry_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/verified_registry_state.go
5a6
> cbg "github.com/whyrusleeping/cbor-gen"
7c8
< addr "github.com/filecoin-project/go-address"
---
> "github.com/filecoin-project/go-address"
9d9
< "github.com/filecoin-project/go-state-types/crypto"
17a18,19
> var DataCapGranularity = builtin.TokenPrecision
>
27c29
< RootKey addr.Address
---
> RootKey address.Address
33,35d34
< // VerifiedClients can add VerifiedClientData, up to DataCap.
< VerifiedClients cid.Cid // HAMT[addr.Address]DataCap
<
39a39,48
>
> // Maps client IDs to allocations made by that client.
> Allocations cid.Cid // HAMT[ActorID]HAMT[AllocationID]Allocation
>
> // Next allocation identifier to use.
> // The value 0 is reserved to mean "no allocation".
> NextAllocationId AllocationId
>
> // Maps provider IDs to allocations claimed by that provider.
> Claims cid.Cid // HAMT[ActorID]HAMT[ClaimID]Claim
45c54
< func ConstructState(store adt.Store, rootKeyAddress addr.Address) (*State, error) {
---
> func ConstructState(store adt.Store, rootKeyAddress address.Address) (*State, error) {
54d62
< VerifiedClients: emptyMapCid,
55a64,66
> Allocations: emptyMapCid,
> NextAllocationId: 1,
> Claims: emptyMapCid,
59,76c70,289
< // A verifier who wants to send/agree to a RemoveDataCapRequest should sign a RemoveDataCapProposal and send the signed proposal to the root key holder.
< type RemoveDataCapProposal struct {
< // VerifiedClient is the client address to remove the DataCap from
< // The address must be an ID address
< VerifiedClient addr.Address
< // DataCapAmount is the amount of DataCap to be removed from the VerifiedClient address
< DataCapAmount DataCap
< // RemovalProposalID is the counter of the proposal sent by the Verifier for the VerifiedClient
< RemovalProposalID RmDcProposalID
< }
<
< // A verifier who wants to submit a request should send their RemoveDataCapRequest to the RKH.
< type RemoveDataCapRequest struct {
< // Verifier is the verifier address used for VerifierSignature.
< // The address can be address.SECP256K1 or address.BLS
< Verifier addr.Address
< // VerifierSignature is the Verifier's signature over a RemoveDataCapProposal
< VerifierSignature crypto.Signature
---
> func (st *State) FindAllocation(store adt.Store, clientIdAddr address.Address, allocationId AllocationId) (*Allocation, bool, error) {
> if clientIdAddr.Protocol() != address.ID {
> return nil, false, xerrors.Errorf("can only look up ID addresses")
> }
>
> innerHamtCid, err := getInnerHamtCid(store, abi.IdAddrKey(clientIdAddr), st.Allocations)
> if err != nil {
> return nil, false, err
> }
>
> idToAllocationMap, err := adt.AsMap(store, innerHamtCid, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, false, xerrors.Errorf("couldn't get inner map: %x", err)
> }
>
> var allocation Allocation
> if found, err := idToAllocationMap.Get(allocationId, &allocation); err != nil {
> return nil, false, xerrors.Errorf("looking up allocation ID: %d: %w", allocationId, err)
> } else if !found {
> return nil, false, nil
> }
>
> clientId, err := address.IDFromAddress(clientIdAddr)
> if err != nil {
> return nil, false, xerrors.Errorf("couldn't get ID from clientIdAddr: %s", clientIdAddr)
> }
>
> if uint64(allocation.Client) != clientId {
> return nil, false, xerrors.Errorf("clientId: %d did not match client in allocation: %d", clientId, allocation.Client)
> }
>
> return &allocation, true, nil
> }
>
> func (st *State) FindClaim(store adt.Store, providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) {
> if providerIdAddr.Protocol() != address.ID {
> return nil, false, xerrors.Errorf("can only look up ID addresses")
> }
>
> innerHamtCid, err := getInnerHamtCid(store, abi.IdAddrKey(providerIdAddr), st.Claims)
> if err != nil {
> return nil, false, err
> }
>
> idToClaimsMap, err := adt.AsMap(store, innerHamtCid, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, false, xerrors.Errorf("couldn't get inner map: %x", err)
> }
>
> var claim Claim
> if found, err := idToClaimsMap.Get(claimId, &claim); err != nil {
> return nil, false, xerrors.Errorf("looking up allocation ID: %d: %w", claimId, err)
> } else if !found {
> return nil, false, nil
> }
>
> providerId, err := address.IDFromAddress(providerIdAddr)
> if err != nil {
> return nil, false, xerrors.Errorf("couldn't get ID from providerIdAddr: %s", providerIdAddr)
> }
>
> if uint64(claim.Provider) != providerId {
> return nil, false, xerrors.Errorf("providerId: %d did not match provider in claim: %d", providerId, claim.Provider)
> }
>
> return &claim, true, nil
> }
>
> func getInnerHamtCid(store adt.Store, key abi.Keyer, mapCid cid.Cid) (cid.Cid, error) {
> actorToHamtMap, err := adt.AsMap(store, mapCid, builtin.DefaultHamtBitwidth)
> if err != nil {
> return cid.Undef, xerrors.Errorf("couldn't get outer map: %x", err)
> }
>
> var innerHamtCid cbg.CborCid
> if found, err := actorToHamtMap.Get(key, &innerHamtCid); err != nil {
> return cid.Undef, xerrors.Errorf("looking up key: %s: %w", key, err)
> } else if !found {
> return cid.Undef, xerrors.Errorf("did not find key: %s", key)
> }
>
> return cid.Cid(innerHamtCid), nil
> }
>
> func (st *State) LoadAllocationsToMap(store adt.Store, clientIdAddr address.Address) (map[AllocationId]Allocation, error) {
> if clientIdAddr.Protocol() != address.ID {
> return nil, xerrors.Errorf("can only look up ID addresses")
> }
>
> innerHamtCid, err := getInnerHamtCid(store, abi.IdAddrKey(clientIdAddr), st.Allocations)
> if err != nil {
> return nil, err
> }
>
> adtMap, err := adt.AsMap(store, innerHamtCid, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("couldn't get map: %x", err)
> }
>
> var allocIdToAlloc = make(map[AllocationId]Allocation)
> var out Allocation
> err = adtMap.ForEach(&out, func(key string) error {
> uintKey, err := abi.ParseUIntKey(key)
> if err != nil {
> return xerrors.Errorf("couldn't parse key to uint: %x", err)
> }
> allocIdToAlloc[AllocationId(uintKey)] = out
> return nil
> })
> if err != nil {
> return nil, err
> }
>
> return allocIdToAlloc, nil
> }
>
> func (st *State) LoadClaimsToMap(store adt.Store, providerIdAddr address.Address) (map[ClaimId]Claim, error) {
> if providerIdAddr.Protocol() != address.ID {
> return nil, xerrors.Errorf("can only look up ID addresses")
> }
>
> innerHamtCid, err := getInnerHamtCid(store, abi.IdAddrKey(providerIdAddr), st.Claims)
> if err != nil {
> return nil, err
> }
>
> adtMap, err := adt.AsMap(store, innerHamtCid, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("couldn't get map: %x", err)
> }
>
> var claimIdToClaim = make(map[ClaimId]Claim)
> var out Claim
> err = adtMap.ForEach(&out, func(key string) error {
> uintKey, err := abi.ParseUIntKey(key)
> if err != nil {
> return xerrors.Errorf("couldn't parse key to uint: %w", err)
> }
> claimIdToClaim[ClaimId(uintKey)] = out
> return nil
> })
> if err != nil {
> return nil, err
> }
>
> return claimIdToClaim, nil
> }
>
> func (st *State) GetAllClaims(store adt.Store) (map[ClaimId]Claim, error) {
> allClaims := make(map[ClaimId]Claim)
>
> actorToHamtMap, err := adt.AsMap(store, st.Claims, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("couldn't get outer map: %x", err)
> }
>
> var innerHamtCid cbg.CborCid
> err = actorToHamtMap.ForEach(&innerHamtCid, func(idKey string) error {
> innerMap, err := adt.AsMap(store, cid.Cid(innerHamtCid), builtin.DefaultHamtBitwidth)
> if err != nil {
> return xerrors.Errorf("couldn't get inner map: %x", err)
> }
>
> var out Claim
> err = innerMap.ForEach(&out, func(key string) error {
> uintKey, err := abi.ParseUIntKey(key)
> if err != nil {
> return xerrors.Errorf("couldn't parse idKey to uint: %w", err)
> }
> allClaims[ClaimId(uintKey)] = out
> return nil
> })
> if err != nil {
> return err
> }
>
> return nil
> })
> if err != nil {
> return nil, err
> }
>
> return allClaims, nil
> }
>
> func (st *State) GetAllAllocations(store adt.Store) (map[AllocationId]Allocation, error) {
> allAllocations := make(map[AllocationId]Allocation)
>
> actorToHamtMap, err := adt.AsMap(store, st.Allocations, builtin.DefaultHamtBitwidth)
> if err != nil {
> return nil, xerrors.Errorf("couldn't get outer map: %x", err)
> }
>
> var innerHamtCid cbg.CborCid
> err = actorToHamtMap.ForEach(&innerHamtCid, func(idKey string) error {
> innerMap, err := adt.AsMap(store, cid.Cid(innerHamtCid), builtin.DefaultHamtBitwidth)
> if err != nil {
> return xerrors.Errorf("couldn't get inner map: %x", err)
> }
>
> var out Allocation
> err = innerMap.ForEach(&out, func(key string) error {
> uintKey, err := abi.ParseUIntKey(key)
> if err != nil {
> return xerrors.Errorf("couldn't parse idKey to uint: %w", err)
> }
> allAllocations[AllocationId(uintKey)] = out
> return nil
> })
> if err != nil {
> return err
> }
>
> return nil
> })
> if err != nil {
> return nil, err
> }
>
> return allAllocations, nil
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/verifreg_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/verifreg/verifreg_types.go
5a6,10
> "github.com/filecoin-project/go-state-types/big"
> "github.com/filecoin-project/go-state-types/crypto"
> "github.com/filecoin-project/go-state-types/exitcode"
> "github.com/ipfs/go-cid"
> "github.com/multiformats/go-varint"
7a13,32
> // RemoveDataCapProposal A verifier who wants to send/agree to a RemoveDataCapRequest should sign a RemoveDataCapProposal and send the signed proposal to the root key holder.
> type RemoveDataCapProposal struct {
> // VerifiedClient is the client address to remove the DataCap from
> // The address must be an ID address
> VerifiedClient addr.Address
> // DataCapAmount is the amount of DataCap to be removed from the VerifiedClient address
> DataCapAmount DataCap
> // RemovalProposalID is the counter of the proposal sent by the Verifier for the VerifiedClient
> RemovalProposalID RmDcProposalID
> }
>
> // RemoveDataCapRequest A verifier who wants to submit a request should send their RemoveDataCapRequest to the RKH.
> type RemoveDataCapRequest struct {
> // Verifier is the verifier address used for VerifierSignature.
> // The address can be address.SECP256K1 or address.BLS
> Verifier addr.Address
> // VerifierSignature is the Verifier's signature over a RemoveDataCapProposal
> VerifierSignature crypto.Signature
> }
>
19,20c44,47
< Address addr.Address // Address of verified client.
< DealSize abi.StoragePower // Number of bytes to use.
---
> // Address of verified client.
> Address addr.Address
> // Number of bytes to use.
> DealSize abi.StoragePower
37a65,227
> }
>
> type RemoveExpiredAllocationsParams struct {
> Client abi.ActorID
> AllocationIds []AllocationId
> }
>
> type RemoveExpiredAllocationsReturn struct {
> Considered []AllocationId
> Results BatchReturn
> DataCapRecovered DataCap
> }
>
> type BatchReturn struct {
> SuccessCount uint64
> FailCodes []FailCode
> }
>
> type FailCode struct {
> Idx uint64
> Code exitcode.ExitCode
> }
>
> type AllocationId uint64
>
> func (a AllocationId) Key() string {
> return string(varint.ToUvarint(uint64(a)))
> }
>
> type ClaimId uint64
>
> func (a ClaimId) Key() string {
> return string(varint.ToUvarint(uint64(a)))
> }
>
> type ClaimAllocationsParams struct {
> Sectors []SectorAllocationClaim
> AllOrNothing bool
> }
>
> type SectorAllocationClaim struct {
> Client abi.ActorID
> AllocationId AllocationId
> Data cid.Cid
> Size abi.PaddedPieceSize
> Sector abi.SectorNumber
> SectorExpiry abi.ChainEpoch
> }
>
> type ClaimAllocationsReturn struct {
> BatchInfo BatchReturn
> ClaimedSpace big.Int
> }
>
> type GetClaimsParams struct {
> Provider abi.ActorID
> ClaimIds []ClaimId
> }
>
> type GetClaimsReturn struct {
> BatchInfo BatchReturn
> Claims []Claim
> }
>
> type Claim struct {
> // The provider storing the data (from allocation).
> Provider abi.ActorID
> // The client which allocated the DataCap (from allocation).
> Client abi.ActorID
> // Identifier of the data committed (from allocation).
> Data cid.Cid
> // The (padded) size of data (from allocation).
> Size abi.PaddedPieceSize
> // The min period which the provider must commit to storing data
> TermMin abi.ChainEpoch
> // The max period for which provider can earn QA-power for the data
> TermMax abi.ChainEpoch
> // The epoch at which the (first range of the) piece was committed.
> TermStart abi.ChainEpoch
> // ID of the provider's sector in which the data is committed.
> Sector abi.SectorNumber
> }
>
> type Allocation struct {
> // The verified client which allocated the DataCap.
> Client abi.ActorID
> // The provider (miner actor) which may claim the allocation.
> Provider abi.ActorID
> // Identifier of the data to be committed.
> Data cid.Cid
> // The (padded) size of data.
> Size abi.PaddedPieceSize
> // The minimum duration which the provider must commit to storing the piece to avoid
> // early-termination penalties (epochs).
> TermMin abi.ChainEpoch
> // The maximum period for which a provider can earn quality-adjusted power
> // for the piece (epochs).
> TermMax abi.ChainEpoch
> // The latest epoch by which a provider must commit data before the allocation expires.
> Expiration abi.ChainEpoch
> }
>
> type UniversalReceiverParams struct {
> Type_ ReceiverType
> Payload []byte
> }
>
> type ReceiverType uint64
>
> type AllocationsResponse struct {
> AllocationResults BatchReturn
> ExtensionResults BatchReturn
> NewAllocations []AllocationId
> }
>
> type ExtendClaimTermsParams struct {
> Terms []ClaimTerm
> }
>
> type ClaimTerm struct {
> Provider abi.ActorID
> ClaimId ClaimId
> TermMax abi.ChainEpoch
> }
>
> type ExtendClaimTermsReturn BatchReturn
>
> type RemoveExpiredClaimsParams struct {
> Provider abi.ActorID
> ClaimIds []ClaimId
> }
>
> type RemoveExpiredClaimsReturn struct {
> Considered []AllocationId
> Results BatchReturn
> }
>
> type AllocationRequest struct {
> // The provider (miner actor) which may claim the allocation.
> Provider abi.ActorID
> // Identifier of the data to be committed.
> Data cid.Cid
> // The (padded) size of data.
> Size abi.PaddedPieceSize
> // The minimum duration which the provider must commit to storing the piece to avoid
> // early-termination penalties (epochs).
> TermMin abi.ChainEpoch
> // The maximum period for which a provider can earn quality-adjusted power
> // for the piece (epochs).
> TermMax abi.ChainEpoch
> // The latest epoch by which a provider must commit data before the allocation expires.
> Expiration abi.ChainEpoch
> }
>
> type ClaimExtensionRequest struct {
> Provider addr.Address
> Claim ClaimId
> TermMax abi.ChainEpoch
> }
>
> type AllocationRequests struct {
> Allocations []AllocationRequest
> Extensions []ClaimExtensionRequest
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go b/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go
18a19
> SigTypeDelegated
28a30,31
> case SigTypeDelegated:
> return "delegated", nil
92a96,97
> case SigTypeDelegated:
> s.Type = SigTypeDelegated
121a127,128
> case SigTypeDelegated:
> s.Type = SigTypeDelegated
diff -r --color a/vendor/github.com/filecoin-project/go-state-types/manifest/manifest.go b/vendor/github.com/filecoin-project/go-state-types/manifest/manifest.go
7a8,9
> actorstypes "github.com/filecoin-project/go-state-types/actors"
>
13a16,50
>
> const (
> AccountKey = "account"
> CronKey = "cron"
> DataCapKey = "datacap"
> InitKey = "init"
> MarketKey = "storagemarket"
> MinerKey = "storageminer"
> MultisigKey = "multisig"
> PaychKey = "paymentchannel"
> PowerKey = "storagepower"
> RewardKey = "reward"
> SystemKey = "system"
> VerifregKey = "verifiedregistry"
> )
>
> func GetBuiltinActorsKeys(av actorstypes.Version) []string {
> keys := []string{
> AccountKey,
> CronKey,
> InitKey,
> MarketKey,
> MinerKey,
> MultisigKey,
> PaychKey,
> PowerKey,
> RewardKey,
> SystemKey,
> VerifregKey,
> }
> if av >= 9 {
> keys = append(keys, DataCapKey)
> }
> return keys
> }
diff -r --color a/vendor/github.com/filecoin-project/index-provider/Dockerfile b/vendor/github.com/filecoin-project/index-provider/Dockerfile
1c1
< FROM golang:1.17.9-buster as build
---
> FROM golang:1.18 as build
8,12c8
< RUN go build -o /go/bin/provider
<
< # TODO consider auto initialization flag as part of `daemon` command
< ARG INIT_PROVIDER='true'
< RUN if test "${INIT_PROVIDER}" = 'false'; then /go/bin/provider init; else echo 'skipping provider initialization.'; fi
---
> RUN CGO_ENABLED=0 go build -o /go/bin/provider ./cmd/provider
16d11
< COPY --from=build /root/.index-provider* /root/.index-provider
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/chunker/cached_chunker.go b/vendor/github.com/filecoin-project/index-provider/engine/chunker/cached_chunker.go
13d12
< "github.com/filecoin-project/storetheindex/api/v0/ingest/schema"
19a19
> "github.com/ipld/go-ipld-prime/datamodel"
22d21
< "github.com/multiformats/go-multihash"
33,54c32,35
< // CachedEntriesChunker is an EntriesChunker that caches the generated chunks using an LRU cache.
< // The chunks within a chain are guaranteed to either be fully cached or not at all.
< // If the chains overlap, the smaller overlapping portion is not evicted unless all the chains that
< // reference to it are evicted.
< //
< // The number of chains cached will be at most equal to the given capacity. The capacity is
< // immutable. Chains are evicted as needed if the capacity is reached.
< //
< // This cache restores previously cached values from the datastore upon instantiation. If the
< // capacity is smaller than the number of chains persisted, the surplus chains will be evicted in no
< // particular order.
< //
< // See: NewCachedEntriesChunker.
< type CachedEntriesChunker struct {
< // ds is the backing storage for the cached etry chunks and the caching metadata.
< ds datastore.Batching
< // lsys is used to store the IPLD representation of cached entry chunks.
< lsys ipld.LinkSystem
< // chunkSize is the maximum number of mulithashes to include within a schema.EntryChunk.
< chunkSize int
< // cache is the LRU cache used to determine the chains to keep and the chains to evict from the
< // backing datastore in order of least recently used.
---
> type (
> // CachedEntriesChunker is an EntriesChunker that caches the generated chunks using an LRU cache.
> // The chunks can be formatted as any DAG with two current implementations: HamtChunker and
> // ChainChunker.
56,61c37,38
< // The cache uses link to root of a chain as key and a slice of links that make up the chain as
< // value. The rationale behind setting the list of chain links as value is to avoid having to
< // traverse the chain to learn what to delete should the chain be evicted. This makes eviction
< // faster in exchange for slightly larger memory footprint. Only cache keys are persisted in the
< // datastore. During restore, the chain is indeed traversed to populate cache values. See
< // CachedEntriesChunker.restoreCache.
---
> // The DAGs are guaranteed to either be fully cached or not at all. If DAGs overlap, the smaller
> // overlapping portion is not evicted unless all the DAGs that link to it are evicted.
63,75c40,80
< // Note that all operations on cache must be performed via CachedEntriesChunker.performOnCache
< // to insure context is set in case of an eviction and any errors during eviction are returned
< // gracefully.
< cache *lru.Cache
< // onEvictedErr is used to signal any errors that occur during cache eviction by operations
< // performed via CachedEntriesChunker.performOnCache.
< onEvictedErr error
< // onEvictedCtx is used to set the context to be used during cache eviction by operations
< // performed via CachedEntriesChunker.performOnCache.
< onEvictedCtx context.Context
< // lock syncronizes the chunking, clearing the cache and reading the number of cached chains.
< lock sync.Mutex
< }
---
> // The number of DAGs cached will be at most equal to the given capacity. The capacity is
> // immutable. DAGs are evicted as needed if the capacity is reached.
> //
> // See: NewCachedEntriesChunker.
> CachedEntriesChunker struct {
> // ds is the backing storage for the cached entry chunks and the caching metadata.
> ds datastore.Batching
> // lsys is used to store the IPLD representation of cached entry chunks.
> lsys ipld.LinkSystem
> // cache is the LRU cache used to determine the chains to keep and the chains to evict from the
> // backing datastore in order of least recently used.
> //
> // The cache uses link to root of a chain as key and a slice of links that make up the chain as
> // value. The rationale behind setting the list of chain links as value is to avoid having to
> // traverse the chain to learn what to delete should the chain be evicted. This makes eviction
> // faster in exchange for slightly larger memory footprint. Only cache keys are persisted in the
> // datastore. During restore, the chain is indeed traversed to populate cache values. See
> // CachedEntriesChunker.restoreCache.
> //
> // Note that all operations on cache must be performed via CachedEntriesChunker.performOnCache
> // to insure context is set in case of an eviction and any errors during eviction are returned
> // gracefully.
> cache *lru.Cache
> // onEvictedErr is used to signal any errors that occur during cache eviction by operations
> // performed via CachedEntriesChunker.performOnCache.
> onEvictedErr error
> // onEvictedCtx is used to set the context to be used during cache eviction by operations
> // performed via CachedEntriesChunker.performOnCache.
> onEvictedCtx context.Context
> // lock synchronizes the chunking, clearing the cache and reading the number of cached chains.
> // Any function that performs Store on the linksystem should also grab this lock. See inline
> // comments in Chunk.
> lock sync.Mutex
> // chunker is the underlying chunker that generates a DAG from a provider.MultihashIterator.
> chunker EntriesChunker
> }
>
> // NewChunkerFunc instantiates the core EntriesChunker to use for generating advertisement
> // entries DAG.
> NewChunkerFunc func(ls *ipld.LinkSystem) (EntriesChunker, error)
> )
79,81c84,85
< // The chunks are generated with the given maximum chunkSize and are stored in an LRU cache. Once
< // stored, the individual chunks that make up the entries chain are retrievable in their raw binary
< // form via CachedEntriesChunker.GetRawCachedChunk.
---
> // The DAGs are generated with the given newChunker and are stored in an LRU cache. Once
> // stored, the individual DAGs that make up the entries chain are retrievable in their raw binary
83,87c87
< // The growth of LRU cache is limited by the given capacity. The capacity specifies the number of
< // complete chains that are cached, not the chunks within each chain. The actual storage consumed by
< // the cache is a factor of: 1) maximum chunk size, 2) multihash length and 3) capacity. For
< // example, a fully populated cache with chunk size of 16384, for multihashes of length 128-bit and
< // capacity of 1024 will consume 256MiB of space, i.e. (16384 * 1024 * 128b).
---
> // form via CachedEntriesChunker.GetRawCachedChunk.
89,91c89,90
< // This struct guarantees that for any given chain of entries, either the entire chain is cached, or
< // it is not cached at all. When chains overlap, the overlapping portion of the chain is not evicted
< // until the larger chain is evicted.
---
> // The shape of the DAGs is dictated by the underlying chunking logic that is instantiated once via
> // newChunker function. See: NewHamtChunkerFunc, NewChainChunkerFunc.
93,95c92,110
< // Upon instantiation, the chunker will restore its state from the datastore, and prunes the
< // datastore as needed. For example, if the given capacity is smaller than the number of chains
< // present in the datastore it will evict chains to respect the given capacity.
---
> // The growth of LRU cache is limited by the given capacity. The capacity specifies the number of
> // complete DAGs that are cached, not the DAGs within each chain. The actual storage consumed by
> // the cache is a factor of: 1) the DAG shape determined by the underlying chunker, 2) multihash
> // length and 3) capacity. For example, a fully populated cache with chunk size of 16384, for
> // multihashes of length 128-bit and capacity of 1024 will consume 256MiB of space, i.e.
> // (16384 * 1024 * 128b).
> //
> // This implementation guarantees that for any given chain of entries, either the entire chain is
> // cached, or it is not cached at all. When chains overlap, the overlapping portion of the chain is
> // not evicted until the larger chain is evicted.
> //
> // Unless purge is set to true, upon instantiation, the chunker will restore its state from the
> // datastore, and prunes the datastore as needed. For example, if the given capacity is smaller than
> // the number of chains present in the datastore it will evict chains to respect the given capacity
> // in no particular order.
> //
> // The purge flag specifies whether any existing cache should be cleared on startup. If set, any
> // existing cached chunks will be deleted from the datastore. Otherwise, the previously cached
> // entries are restored.
103,108c118,119
< // The purge flag specifies whether any existing cache should be cleared on startup. If set, any
< // existing cached chunks will be deleted from the datastore. Otherwise, the previously cached
< // entries are restored.
< //
< // See CachedEntriesChunker.Chunk, CachedEntriesChunker.GetRawCachedChunk
< func NewCachedEntriesChunker(ctx context.Context, ds datastore.Batching, chunkSize, capacity int, purge bool) (*CachedEntriesChunker, error) {
---
> // See: CachedEntriesChunker.Chunk, CachedEntriesChunker.GetRawCachedChunk.
> func NewCachedEntriesChunker(ctx context.Context, ds datastore.Batching, capacity int, newChunker NewChunkerFunc, purge bool) (*CachedEntriesChunker, error) {
110,113c121,123
< ds: ds,
< lsys: cidlink.DefaultLinkSystem(),
< cache: lru.New(capacity),
< chunkSize: chunkSize,
---
> ds: ds,
> lsys: cidlink.DefaultLinkSystem(),
> cache: lru.New(capacity),
119a130,135
> chunker, err := newChunker(&ls.lsys)
> if err != nil {
> return nil, err
> }
> ls.chunker = chunker
>
222,223c238
< // Chunk chunks the multihashes supplied by the given mhi into a chain of schema.EntryChunk instances
< // and stores them.
---
> // Chunk chunks the multihashes supplied by the given mhi into a DAG and returns the link to root.
226,237c241,255
< defer ls.lock.Unlock()
<
< mhs := make([]multihash.Multihash, 0, ls.chunkSize)
< var chunkLinks []ipld.Link
< var chunkLinksEnc []byte
< var next ipld.Link
< var mhCount, chunkCount int
< for {
< mh, err := mhi.Next()
< if err == io.EOF {
< break
< }
---
> defer func() {
> ls.lsys.StorageWriteOpener = ls.storageWriteOpener
> ls.lock.Unlock()
> }()
>
> var links []ipld.Link
> var linksEnc []byte
> // Intercept the links that are being stored.
> // It is safe to swap the StorageWriteOpener, because:
> // - Chunk is the only place we expect to write to the linksystem, and
> // - calls to Chunk are syncronized using a lock.
> // It is also an efficient way to collecting all the links without having to traverse the dag
> // from the root link, or make the EntriesChunker interface more complex.
> ls.lsys.StorageWriteOpener = func(ctx linking.LinkContext) (io.Writer, linking.BlockWriteCommitter, error) {
> opener, committer, err := ls.storageWriteOpener(ctx)
239,256c257
< return nil, err
< }
< mhs = append(mhs, mh)
< mhCount++
< if len(mhs) >= ls.chunkSize {
< cNode, err := newEntriesChunkNode(mhs, next)
< if err != nil {
< return nil, err
< }
< next, err = ls.lsys.Store(ipld.LinkContext{Ctx: ctx}, schema.Linkproto, cNode)
< if err != nil {
< return nil, err
< }
< chunkLinks = append(chunkLinks, next)
< chunkLinksEnc = append(chunkLinksEnc, next.(cidlink.Link).Cid.Bytes()...)
< chunkCount++
< // NewLinkedListOfMhs makes it own copy, so safe to reuse mhs
< mhs = mhs[:0]
---
> return nil, nil, err
258,270c259,263
< }
< if len(mhs) != 0 {
< cNode, err := newEntriesChunkNode(mhs, next)
< if err != nil {
< return nil, err
< }
< next, err = ls.lsys.Store(ipld.LinkContext{Ctx: ctx}, schema.Linkproto, cNode)
< if err != nil {
< return nil, err
< }
< chunkLinks = append(chunkLinks, next)
< chunkLinksEnc = append(chunkLinksEnc, next.(cidlink.Link).Cid.Bytes()...)
< chunkCount++
---
> return opener, func(link datamodel.Link) error {
> links = append(links, link)
> linksEnc = append(linksEnc, link.(cidlink.Link).Cid.Bytes()...)
> return committer(link)
> }, nil
273c266,267
< err := ls.performOnCache(ctx, func(cache *lru.Cache) { cache.Add(next, chunkLinks) })
---
> // Store the multihashes in mhi as a DAG and get the root link.
> root, err := ls.chunker.Chunk(ctx, mhi)
277c271,273
< err = ls.ds.Put(ctx, ls.dsRootPrefixedKey(next), chunkLinksEnc)
---
>
> // Store internal mappings for caching purposes.
> err = ls.performOnCache(ctx, func(cache *lru.Cache) { cache.Add(root, links) })
281,290c277,279
< log.Infow("Generated linked chunks of multihashes", "totalMhCount", mhCount, "chunkCount", chunkCount)
< return next, ls.sync(ctx)
< }
<
< func newEntriesChunkNode(mhs []multihash.Multihash, next ipld.Link) (ipld.Node, error) {
< chunk := schema.EntryChunk{
< Entries: mhs,
< }
< if next != nil {
< chunk.Next = &next
---
> err = ls.ds.Put(ctx, ls.dsRootPrefixedKey(root), linksEnc)
> if err != nil {
> return nil, err
292c281
< return chunk.ToNode()
---
> return root, ls.sync(ctx)
299,300c288
< // GetRawCachedChunk gets the raw cached entry chunk for the given link, or nil if no such caching
< // exists.
---
> // GetRawCachedChunk gets the raw cached entry chunk for the given link, or nil if no such caching exists.
Only in b/vendor/github.com/filecoin-project/index-provider/engine/chunker: chain_chunker.go
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/chunker/doc.go b/vendor/github.com/filecoin-project/index-provider/engine/chunker/doc.go
1,3c1,6
< // Package chunker provides functionality for chunking entries chain generated from
< // provider.MultihashIterator, represented as EntriesChunker interface. The package provides a
< // default implementation of this interface, CachedEntriesChunker.
---
> // Package chunker provides functionality for chunking ad entries generated from
> // provider.MultihashIterator into an IPLD DAG. The interface given a multihash iterator an
> // EntriesChunker drains it, restructures the multihashes in an IPLD DAG and returns the root link
> // to that DAG. Two DAG datastructures are currently implemented: ChainChunker, and HamtChunker.
> // Additionally, CachedEntriesChunker can use either of the chunkers and provide an LRU caching
> // functionality for the generated DAGs.
5,12c8
< // CachedEntriesChunker stores a cache of generated entries chains with configurable capacity and
< // maximum chunk size. This cache guarantees that a cached chain of entries is either fully cached
< // or not at all. This includes chains that may have overlapping section. In this case, the
< // overlapping section is not evicted from the cache until the larger chain it is overlapping with
< // is evicted. The CachedEntriesChunker also supports restoring previously cached entries upon
< // instantiation.
< //
< // See: CachedEntriesChunker, NewCachedEntriesChunker
---
> // See: CachedEntriesChunker, ChainChunker, HamtChunker
Only in b/vendor/github.com/filecoin-project/index-provider/engine/chunker: hamt_chunker.go
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/doc.go b/vendor/github.com/filecoin-project/index-provider/engine/doc.go
11c11
< // - https://github.com/filecoin-project/storetheindex/blob/main/api/v0/ingest/schema/schema.ipldsch
---
> // - https://github.com/filecoin-project/storetheindex/blob/main/api/v0/ingest/schema/schema.ipldsch
13,14c13,14
< // The engine internally uses "go-legs" to sync the IPLD DAG of advertisements.
< // See: https://github.com/filecoin-project/go-legs
---
> // The engine internally uses "storetheindex/dagsync" to sync the IPLD DAG of advertisements.
> // See: https://github.com/filecoin-project/storetheindex/tree/main/dagsync
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/engine.go b/vendor/github.com/filecoin-project/index-provider/engine/engine.go
5a6
> "encoding/json"
6a8
> "net/url"
9,11d10
< "github.com/filecoin-project/go-legs"
< "github.com/filecoin-project/go-legs/dtsync"
< "github.com/filecoin-project/go-legs/httpsync"
14a14
> httpclient "github.com/filecoin-project/storetheindex/api/v0/ingest/client/http"
15a16,18
> "github.com/filecoin-project/storetheindex/dagsync"
> "github.com/filecoin-project/storetheindex/dagsync/dtsync"
> "github.com/filecoin-project/storetheindex/dagsync/httpsync"
22a26,27
> "github.com/libp2p/go-libp2p/core/peer"
> "github.com/multiformats/go-multiaddr"
26,30c31,36
< keyToCidMapPrefix = "map/keyCid/"
< cidToKeyMapPrefix = "map/cidKey/"
< keyToMetadataMapPrefix = "map/keyMD/"
< latestAdvKey = "sync/adv/"
< linksCachePath = "/cache/links"
---
> keyToCidMapPrefix = "map/keyCid/"
> cidToKeyMapPrefix = "map/cidKey/"
> cidToProviderAndKeyMapPrefix = "map/cidProvAndKey/"
> keyToMetadataMapPrefix = "map/keyMD/"
> latestAdvKey = "sync/adv/"
> linksCachePath = "/cache/links"
39c45
< // Engine is an implementation of the core reference provider interface
---
> // Engine is an implementation of the core reference provider interface.
46c52
< publisher legs.Publisher
---
> publisher dagsync.Publisher
54,69c60,79
< // New creates a new index provider Engine as the default implementation of provider.Interface.
< // It provides the ability to advertise the availability of a list of multihashes associated to
< // a context ID as a chain of linked advertisements as defined by the indexer node protocol implemented by "storetheindex".
< // Engine internally uses "go-legs", a protocol for propagating and synchronizing changes an IPLD DAG, to publish advertisements.
< // See:
< // - https://github.com/filecoin-project/storetheindex
< // - https://github.com/filecoin-project/go-legs
< //
< // Published advertisements are signed using the given private key.
< // The retAddrs corresponds to the endpoints at which the data block associated to the advertised
< // multihashes can be retrieved.
< // Note that if no retAddrs is specified the listen addresses of the given libp2p host are used.
< //
< // The engine also provides the ability to generate advertisements via Engine.NotifyPut and
< // Engine.NotifyRemove as long as a provider.MultihashLister is registered.
< // See: provider.MultihashLister, Engine.RegisterMultihashLister.
---
> // New creates a new index provider Engine as the default implementation of
> // provider.Interface. It provides the ability to advertise the availability of
> // a list of multihashes associated to a context ID as a chain of linked
> // advertisements as defined by the indexer node protocol implemented by
> // "storetheindex".
> //
> // Engine internally uses "storetheindex/dagsync", a protocol for propagating and
> // synchronizing changes an IPLD DAG, to publish advertisements. See:
> //
> // - https://github.com/filecoin-project/storetheindex/tree/main/dagsync
> //
> // Published advertisements are signed using the given private key. The
> // retAddrs corresponds to the endpoints at which the data block associated to
> // the advertised multihashes can be retrieved. If no retAddrs are specified,
> // then use the listen addresses of the given libp2p host.
> //
> // The engine also provides the ability to generate advertisements via
> // Engine.NotifyPut and Engine.NotifyRemove as long as a
> // provider.MultihashLister is registered. See: provider.MultihashLister,
> // Engine.RegisterMultihashLister.
71,72c81,82
< // The engine must be started via Engine.Start before use and discarded via Engine.Shutdown when no longer needed.
< // See: Engine.Start, Engine.Shutdown.
---
> // The engine must be started via Engine.Start before use and discarded via
> // Engine.Shutdown when no longer needed.
88,89c98,99
< // Start starts the engine by instantiating the internal storage and joins the configured gossipsub
< // topic used for publishing advertisements.
---
> // Start starts the engine by instantiating the internal storage and joining
> // the configured gossipsub topic used for publishing advertisements.
91,93c101,103
< // The context is used to instantiate the internal LRU cache storage.
< //
< // See: Engine.Shutdown, chunker.NewCachedEntriesChunker, dtsync.NewPublisherFromExisting.
---
> // The context is used to instantiate the internal LRU cache storage. See:
> // Engine.Shutdown, chunker.NewCachedEntriesChunker,
> // dtsync.NewPublisherFromExisting
96c106
< // Create datastore entriesChunker
---
> // Create datastore entriesChunker.
98c108
< e.entriesChunker, err = chunker.NewCachedEntriesChunker(ctx, entriesCacheDs, e.entChunkSize, e.entCacheCap, e.purgeCache)
---
> e.entriesChunker, err = chunker.NewCachedEntriesChunker(ctx, entriesCacheDs, e.entCacheCap, e.chunker, e.purgeCache)
105c115
< log.Errorw("Failed to instantiate legs publisher", "err", err, "kind", e.pubKind)
---
> log.Errorw("Failed to instantiate dagsync publisher", "err", err, "kind", e.pubKind)
125c135
< func (e *Engine) newPublisher() (legs.Publisher, error) {
---
> func (e *Engine) newPublisher() (dagsync.Publisher, error) {
140c150
< ds := dsn.Wrap(e.ds, datastore.NewKey("/legs/dtsync/pub"))
---
> ds := dsn.Wrap(e.ds, datastore.NewKey("/dagsync/dtsync/pub"))
149,150c159,160
< // PublishLocal stores the advertisement in the local link system and marks it locally as the latest
< // advertisement.
---
> // PublishLocal stores the advertisement in the local link system and marks it
> // locally as the latest advertisement.
152c162,163
< // The context is used for storing internal mapping information onto the datastore.
---
> // The context is used for storing internal mapping information onto the
> // datastore.
156d166
<
174c184
< if err := e.putLatestAdv(ctx, c.Bytes()); err != nil {
---
> if err = e.putLatestAdv(ctx, c.Bytes()); err != nil {
182,184c192,194
< // Publish stores the given advertisement locally via Engine.PublishLocal first, then publishes
< // a message onto the gossipsub to signal the change in the latest advertisement by the provider to
< // indexer nodes.
---
> // Publish stores the given advertisement locally via Engine.PublishLocal
> // first, then publishes a message onto the gossipsub to signal the change in
> // the latest advertisement by the provider to indexer nodes.
186,187c196,197
< // The publication mechanism uses legs.Publisher internally.
< // See: https://github.com/filecoin-project/go-legs
---
> // The publication mechanism uses dagsync.Publisher internally.
> // See: https://github.com/filecoin-project/storetheindex/tree/main/dagsync
198c208
< log.Info("Publishing advertisement in pubsub channel")
---
> log.Info("Announcing advertisement in pubsub channel")
203a214,219
>
> err = e.httpAnnounce(ctx, c, e.announceURLs)
> if err != nil {
> log.Errorw("Failed to announce advertisement via http", "err", err)
> return cid.Undef, err
> }
204a221
>
208,209c225
< // PublishLatest re-publishes the latest existing advertisement to pubsub.
< func (e *Engine) PublishLatest(ctx context.Context) error {
---
> func (e *Engine) latestAdToPublish(ctx context.Context) (cid.Cid, error) {
213c229
< return nil
---
> return cid.Undef, nil
218c234
< return fmt.Errorf("failed to get latest advertisement cid: %w", err)
---
> return cid.Undef, fmt.Errorf("failed to get latest advertisement cid: %w", err)
222a239,290
> return cid.Undef, nil
> }
>
> return adCid, nil
> }
>
> // PublishLatest re-publishes the latest existing advertisement to pubsub.
> func (e *Engine) PublishLatest(ctx context.Context) (cid.Cid, error) {
> adCid, err := e.latestAdToPublish(ctx)
> if err != nil {
> return cid.Undef, err
> }
> log.Infow("Publishing latest advertisement", "cid", adCid)
>
> err = e.publisher.UpdateRoot(ctx, adCid)
> if err != nil {
> return cid.Undef, err
> }
>
> return adCid, nil
> }
>
> // PublishLatestHTTP publishes the latest existing advertisement to the
> // specific indexers.
> func (e *Engine) PublishLatestHTTP(ctx context.Context, announceURLs ...*url.URL) (cid.Cid, error) {
> adCid, err := e.latestAdToPublish(ctx)
> if err != nil {
> return cid.Undef, err
> }
>
> err = e.httpAnnounce(ctx, adCid, announceURLs)
> if err != nil {
> return cid.Undef, err
> }
>
> return adCid, nil
> }
>
> func (e *Engine) httpAnnounce(ctx context.Context, adCid cid.Cid, announceURLs []*url.URL) error {
> if ctx.Err() != nil {
> return ctx.Err()
> }
>
> ai := &peer.AddrInfo{
> ID: e.h.ID(),
> }
>
> // The publisher kind determines what addresses to put into the announce
> // message.
> switch e.pubKind {
> case NoPublisher:
> log.Info("Remote announcements disabled")
223a292,320
> case DataTransferPublisher:
> ai.Addrs = e.h.Addrs()
> case HttpPublisher:
> maddr, err := hostToMultiaddr(e.pubHttpListenAddr)
> if err != nil {
> return err
> }
> proto, _ := multiaddr.NewMultiaddr("/http")
> ai.Addrs = append(ai.Addrs, multiaddr.Join(maddr, proto))
> }
>
> errChan := make(chan error)
> for _, u := range announceURLs {
> // Send HTTP announce to indexers concurrently. If context is canceled,
> // then Announce requests will be canceled.
> go func(announceURL *url.URL) {
> log.Infow("Announcing advertisement over HTTP", "url", announceURL)
> cl, err := httpclient.New(announceURL.String())
> if err != nil {
> errChan <- fmt.Errorf("failed to create http client for indexer %s: %w", announceURL, err)
> return
> }
> err = cl.Announce(ctx, ai, adCid)
> if err != nil {
> errChan <- fmt.Errorf("failed to send http announce to indexer %s: %w", announceURL, err)
> return
> }
> errChan <- nil
> }(u)
225d321
< log.Infow("Republishing latest advertisement", "cid", adCid)
227c323,330
< return e.publisher.UpdateRoot(ctx, adCid)
---
> var errs error
> for i := 0; i < len(announceURLs); i++ {
> err := <-errChan
> if err != nil {
> errs = multierror.Append(errs, err)
> }
> }
> return errs
230,232c333,336
< // RegisterMultihashLister registers a provider.MultihashLister that is used to look up the
< // list of multihashes associated to a context ID. At least one such registration
< // must be registered before calls to Engine.NotifyPut and Engine.NotifyRemove.
---
> // RegisterMultihashLister registers a provider.MultihashLister that is used to
> // look up the list of multihashes associated to a context ID. At least one
> // such registration must be registered before calls to Engine.NotifyPut and
> // Engine.NotifyRemove.
234,235c338,339
< // Note that successive calls to this function will replace the previous registration.
< // Only a single registration is supported.
---
> // Note that successive calls to this function will replace the previous
> // registration. Only a single registration is supported.
247,248c351,352
< // given metadata. A provider.MultihashLister is required, and is used to look up the
< // list of multihashes associated to a context ID.
---
> // given metadata. A provider.MultihashLister is required, and is used to look
> // up the list of multihashes associated to a context ID.
250c354,355
< // Note that prior to calling this function a provider.MultihashLister must be registered.
---
> // Note that prior to calling this function a provider.MultihashLister must be
> // registered.
253,256c358,367
< func (e *Engine) NotifyPut(ctx context.Context, contextID []byte, md metadata.Metadata) (cid.Cid, error) {
< // The multihash lister must have been registered for the linkSystem to know how to
< // go from contextID to list of CIDs.
< return e.publishAdvForIndex(ctx, contextID, md, false)
---
> func (e *Engine) NotifyPut(ctx context.Context, provider *peer.AddrInfo, contextID []byte, md metadata.Metadata) (cid.Cid, error) {
> // The multihash lister must have been registered for the linkSystem to
> // know how to go from contextID to list of CIDs.
> pID := e.options.provider.ID
> addrs := e.options.provider.Addrs
> if provider != nil {
> pID = provider.ID
> addrs = provider.Addrs
> }
> return e.publishAdvForIndex(ctx, pID, addrs, contextID, md, false)
259,260c370,371
< // NotifyRemove publishes an advertisement that signals the list of multihashes associated to the given
< // contextID is no longer available by this provider.
---
> // NotifyRemove publishes an advertisement that signals the list of multihashes
> // associated to the given contextID is no longer available by this provider.
262c373,374
< // Note that prior to calling this function a provider.MultihashLister must be registered.
---
> // Note that prior to calling this function a provider.MultihashLister must be
> // registered.
265,266c377,382
< func (e *Engine) NotifyRemove(ctx context.Context, contextID []byte) (cid.Cid, error) {
< return e.publishAdvForIndex(ctx, contextID, metadata.Metadata{}, true)
---
> func (e *Engine) NotifyRemove(ctx context.Context, provider peer.ID, contextID []byte) (cid.Cid, error) {
> // TODO: add support for "delete all" for provider
> if provider == "" {
> provider = e.options.provider.ID
> }
> return e.publishAdvForIndex(ctx, provider, nil, contextID, metadata.Metadata{}, true)
269,270c385,386
< // Shutdown shuts down the engine and discards all resources opened by the engine.
< // The engine is no longer usable after the call to this function.
---
> // Shutdown shuts down the engine and discards all resources opened by the
> // engine. The engine is no longer usable after the call to this function.
284,285c400,401
< // GetAdv gets the advertisement associated to the given cid c.
< // The context is not used.
---
> // GetAdv gets the advertisement associated to the given cid c. The context is
> // not used.
298,299c414,415
< // GetLatestAdv gets the latest advertisement by the provider. If there are
< // not previously published advertisements, then cid.Undef is returned as the
---
> // GetLatestAdv gets the latest advertisement by the provider. If there are no
> // previously published advertisements, then cid.Undef is returned as the
318c434
< func (e *Engine) publishAdvForIndex(ctx context.Context, contextID []byte, md metadata.Metadata, isRm bool) (cid.Cid, error) {
---
> func (e *Engine) publishAdvForIndex(ctx context.Context, p peer.ID, addrs []multiaddr.Multiaddr, contextID []byte, md metadata.Metadata, isRm bool) (cid.Cid, error) {
322c438
< log := log.With("contextID", base64.StdEncoding.EncodeToString(contextID))
---
> log := log.With("providerID", p).With("contextID", base64.StdEncoding.EncodeToString(contextID))
324c440
< c, err := e.getKeyCidMap(ctx, contextID)
---
> c, err := e.getKeyCidMap(ctx, p, contextID)
327c443
< return cid.Undef, fmt.Errorf("cound not not get entries cid by context id: %s", err)
---
> return cid.Undef, fmt.Errorf("cound not not get entries cid by provider + context id: %s", err)
331,332c447,449
< // If we are not removing, we need to generate the link for the list
< // of CIDs from the contextID using the multihash lister, and store the relationship
---
> // If not removing, then generate the link for the list of
> // CIDs from the contextID using the multihash lister, and store the
> // relationship.
339c456
< // If no lister registered return error
---
> // If no lister registered return error.
344,345c461,462
< // Call the lister
< mhIter, err := e.mhLister(ctx, contextID)
---
> // Call the lister.
> mhIter, err := e.mhLister(ctx, p, contextID)
357,359c474,476
< // Store the relationship between contextID and CID of the advertised
< // list of Cids.
< err = e.putKeyCidMap(ctx, contextID, cidsLnk.Cid)
---
> // Store the relationship between providerID, contextID and CID of the
> // advertised list of Cids.
> err = e.putKeyCidMap(ctx, p, contextID, cidsLnk.Cid)
361c478
< return cid.Undef, fmt.Errorf("failed to write context id to entries cid mapping: %s", err)
---
> return cid.Undef, fmt.Errorf("failed to write provider + context id to entries cid mapping: %s", err)
364,365c481,482
< // Lookup metadata for this contextID.
< prevMetadata, err := e.getKeyMetadataMap(ctx, contextID)
---
> // Lookup metadata for this providerID and contextID.
> prevMetadata, err := e.getKeyMetadataMap(ctx, p, contextID)
368c485
< return cid.Undef, fmt.Errorf("could not get metadata for context id: %s", err)
---
> return cid.Undef, fmt.Errorf("could not get metadata for provider + context id: %s", err)
370c487
< log.Warn("No metadata for existing context ID, generating new advertisement")
---
> log.Warn("No metadata for existing provider + context ID, generating new advertisement")
374c491,492
< // Metadata is the same; no change, no need for new advertisement.
---
> // Metadata is the same; no change, no need for new
> // advertisement.
383,384c501,502
< if err = e.putKeyMetadataMap(ctx, contextID, &md); err != nil {
< return cid.Undef, fmt.Errorf("failed to write context id to metadata mapping: %s", err)
---
> if err = e.putKeyMetadataMap(ctx, p, contextID, &md); err != nil {
> return cid.Undef, fmt.Errorf("failed to write provider + context id to metadata mapping: %s", err)
393,395c511,513
< // And if we are removing it means we probably do not have the list of
< // CIDs anymore, so we can remove the entry from the datastore.
< err = e.deleteKeyCidMap(ctx, contextID)
---
> // If removing by context ID, it means the list of CIDs is not needed
> // anymore, so we can remove the entry from the datastore.
> err = e.deleteKeyCidMap(ctx, p, contextID)
397c515
< return cid.Undef, fmt.Errorf("failed to delete context id to entries cid mapping: %s", err)
---
> return cid.Undef, fmt.Errorf("failed to delete provider + context id to entries cid mapping: %s", err)
401c519
< return cid.Undef, fmt.Errorf("failed to delete entries cid to context id mapping: %s", err)
---
> return cid.Undef, fmt.Errorf("failed to delete entries cid to provider + context id mapping: %s", err)
403c521
< err = e.deleteKeyMetadataMap(ctx, contextID)
---
> err = e.deleteKeyMetadataMap(ctx, p, contextID)
405c523
< return cid.Undef, fmt.Errorf("failed to delete context id to metadata mapping: %s", err)
---
> return cid.Undef, fmt.Errorf("failed to delete provider + context id to metadata mapping: %s", err)
413,414c531,532
< // metadata is not used for removal. Create a valid empty metadata.
< md = metadata.New(metadata.Bitswap{})
---
> // metadata is not used for removal. Create a valid empty metadata.
> md = metadata.Default.New()
421a540,544
> var stringAddrs []string
> for _, addr := range addrs {
> stringAddrs = append(stringAddrs, addr.String())
> }
>
423,424c546,547
< Provider: e.options.provider.ID.String(),
< Addresses: e.retrievalAddrsAsString(),
---
> Provider: p.String(),
> Addresses: stringAddrs,
431c554
< // Get the previous advertisement that was generated
---
> // Get the previous advertisement that was generated.
442c565
< adv.PreviousID = &prev
---
> adv.PreviousID = prev
452,455c575,617
< func (e *Engine) putKeyCidMap(ctx context.Context, contextID []byte, c cid.Cid) error {
< // We need to store the map Key-Cid to know what CidLink to put
< // in advertisement when we notify a removal.
< err := e.ds.Put(ctx, datastore.NewKey(keyToCidMapPrefix+string(contextID)), c.Bytes())
---
> func (e *Engine) keyToCidKey(provider peer.ID, contextID []byte) datastore.Key {
> switch provider {
> case e.provider.ID:
> return datastore.NewKey(keyToCidMapPrefix + string(contextID))
> default:
> return datastore.NewKey(keyToCidMapPrefix + provider.String() + "/" + string(contextID))
> }
> }
>
> func (e *Engine) cidToKeyKey(c cid.Cid) datastore.Key {
> return datastore.NewKey(cidToKeyMapPrefix + c.String())
> }
>
> func (e *Engine) cidToProviderAndKeyKey(c cid.Cid) datastore.Key {
> return datastore.NewKey(cidToProviderAndKeyMapPrefix + c.String())
> }
>
> func (e *Engine) keyToMetadataKey(provider peer.ID, contextID []byte) datastore.Key {
> switch provider {
> case e.provider.ID:
> return datastore.NewKey(keyToMetadataMapPrefix + string(contextID))
> default:
> return datastore.NewKey(keyToMetadataMapPrefix + provider.String() + "/" + string(contextID))
> }
> }
>
> func (e *Engine) putKeyCidMap(ctx context.Context, provider peer.ID, contextID []byte, c cid.Cid) error {
> // Store the map Key-Cid to know what CidLink to put in advertisement when
> // notifying about a removal.
>
> err := e.ds.Put(ctx, e.keyToCidKey(provider, contextID), c.Bytes())
> if err != nil {
> return err
> }
> // And the other way around when graphsync is making a request, so the
> // lister in the linksystem knows to what contextID the CID referrs to.
> // it's enough for us to store just a single mapping of cid to provider and context to generate chunks
>
> pB, err := provider.Marshal()
> if err != nil {
> return err
> }
> m, err := json.Marshal(&providerAndContext{Provider: pB, ContextID: contextID})
459,461c621
< // And the other way around when graphsync ios making a request,
< // so the lister in the linksystem knows to what contextID we are referring.
< return e.ds.Put(ctx, datastore.NewKey(cidToKeyMapPrefix+c.String()), contextID)
---
> return e.ds.Put(ctx, e.cidToProviderAndKeyKey(c), m)
464,465c624,625
< func (e *Engine) getKeyCidMap(ctx context.Context, contextID []byte) (cid.Cid, error) {
< b, err := e.ds.Get(ctx, datastore.NewKey(keyToCidMapPrefix+string(contextID)))
---
> func (e *Engine) getKeyCidMap(ctx context.Context, provider peer.ID, contextID []byte) (cid.Cid, error) {
> b, err := e.ds.Get(ctx, e.keyToCidKey(provider, contextID))
473,474c633,634
< func (e *Engine) deleteKeyCidMap(ctx context.Context, contextID []byte) error {
< return e.ds.Delete(ctx, datastore.NewKey(keyToCidMapPrefix+string(contextID)))
---
> func (e *Engine) deleteKeyCidMap(ctx context.Context, provider peer.ID, contextID []byte) error {
> return e.ds.Delete(ctx, e.keyToCidKey(provider, contextID))
478c638,642
< return e.ds.Delete(ctx, datastore.NewKey(cidToKeyMapPrefix+c.String()))
---
> err := e.ds.Delete(ctx, e.cidToProviderAndKeyKey(c))
> if err != nil {
> return err
> }
> return e.ds.Delete(ctx, e.cidToKeyKey(c))
481,482c645,670
< func (e *Engine) getCidKeyMap(ctx context.Context, c cid.Cid) ([]byte, error) {
< return e.ds.Get(ctx, datastore.NewKey(cidToKeyMapPrefix+c.String()))
---
> type providerAndContext struct {
> Provider []byte `json:"p"`
> ContextID []byte `json:"c"`
> }
>
> func (e *Engine) getCidKeyMap(ctx context.Context, c cid.Cid) (*providerAndContext, error) {
> // first see whether the mapping exists in the legacy index
> val, err := e.ds.Get(ctx, e.cidToKeyKey(c))
> if err == nil {
> return &providerAndContext{ContextID: val}, nil
> }
> if err != datastore.ErrNotFound {
> return nil, err
> }
> // trying to fetch this mapping from the new index
> val, err = e.ds.Get(ctx, e.cidToProviderAndKeyKey(c))
> if err != nil {
> return nil, err
> }
>
> var pAndC providerAndContext
> err = json.Unmarshal(val, &pAndC)
> if err != nil {
> return nil, err
> }
> return &pAndC, nil
485c673
< func (e *Engine) putKeyMetadataMap(ctx context.Context, contextID []byte, metadata *metadata.Metadata) error {
---
> func (e *Engine) putKeyMetadataMap(ctx context.Context, provider peer.ID, contextID []byte, metadata *metadata.Metadata) error {
490c678
< return e.ds.Put(ctx, datastore.NewKey(keyToMetadataMapPrefix+string(contextID)), data)
---
> return e.ds.Put(ctx, e.keyToMetadataKey(provider, contextID), data)
493,494c681,683
< func (e *Engine) getKeyMetadataMap(ctx context.Context, contextID []byte) (metadata.Metadata, error) {
< data, err := e.ds.Get(ctx, datastore.NewKey(keyToMetadataMapPrefix+string(contextID)))
---
> func (e *Engine) getKeyMetadataMap(ctx context.Context, provider peer.ID, contextID []byte) (metadata.Metadata, error) {
> md := metadata.Default.New()
> data, err := e.ds.Get(ctx, e.keyToMetadataKey(provider, contextID))
496c685
< return metadata.Metadata{}, err
---
> return md, err
498d686
< var md metadata.Metadata
500c688
< return metadata.Metadata{}, err
---
> return md, err
505,506c693,694
< func (e *Engine) deleteKeyMetadataMap(ctx context.Context, contextID []byte) error {
< return e.ds.Delete(ctx, datastore.NewKey(keyToMetadataMapPrefix+string(contextID)))
---
> func (e *Engine) deleteKeyMetadataMap(ctx context.Context, provider peer.ID, contextID []byte) error {
> return e.ds.Delete(ctx, e.keyToMetadataKey(provider, contextID))
Only in b/vendor/github.com/filecoin-project/index-provider/engine: host_to_maddr.go
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/linksystem.go b/vendor/github.com/filecoin-project/index-provider/engine/linksystem.go
14a15
> "github.com/libp2p/go-libp2p/core/peer"
86c87,88
< // regenerate the list of CIDs.
---
> // regenerate the list of CIDs. It's enough to fetch *any* provider's mapping
> // as same entries from different providers would result into the same chunks
98c100,104
< mhIter, err := e.mhLister(ctx, key)
---
> provider, err := peer.IDFromBytes(key.Provider)
> if err != nil {
> return nil, err
> }
> mhIter, err := e.mhLister(ctx, provider, key.ContextID)
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/options.go b/vendor/github.com/filecoin-project/index-provider/engine/options.go
4a5
> "net/url"
6a8
> "github.com/filecoin-project/index-provider/engine/chunker"
11,13d12
< "github.com/libp2p/go-libp2p-core/crypto"
< "github.com/libp2p/go-libp2p-core/host"
< "github.com/libp2p/go-libp2p-core/peer"
14a14,16
> "github.com/libp2p/go-libp2p/core/crypto"
> "github.com/libp2p/go-libp2p/core/host"
> "github.com/libp2p/go-libp2p/core/peer"
15a18
> "github.com/multiformats/go-multicodec"
43a47,51
>
> // announceURLs is the list of indexer URLs to send direct HTTP
> // announce messages to.
> announceURLs []*url.URL
>
49a58,60
> // It's important to not to change this parameter when running against existing datastores. The reason for that is to maintain backward compatibility.
> // Older records from previous library versions aren't indexed by provider ID as there could have been only one provider in the previous versions.
> // Provider host and retrieval addresses can be overidden from the NotifyPut and Notify Remove method, otherwise the default configured provider will be assumed.
59,61c70,72
< entCacheCap int
< entChunkSize int
< purgeCache bool
---
> entCacheCap int
> purgeCache bool
> chunker chunker.NewChunkerFunc
72c83,84
< // Keep 1024 chunks in cache; keeps 256MiB if chunks are 0.25MiB.
---
> // Keep 1024 ad entry DAG in cache; note, the size on disk depends on DAG format and
> // multihash code.
74,76c86,89
< // Multihashes are 128 bytes so 16384 results in 0.25MiB chunk when full.
< entChunkSize: 16384,
< purgeCache: false,
---
> // By default use chained Entry Chunk as the format of advertisement entries, with maximum
> // 16384 multihashes per chunk.
> chunker: chunker.NewChainChunkerFunc(16384),
> purgeCache: false,
144,145c157,161
< // WithEntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
< // If unset, the default size of 16384 is used.
---
> // WithChainedEntries sets format of advertisement entries to chained Entry Chunk with the
> // given chunkSize as the maximum number of multihashes per chunk.
> //
> // If unset, advertisement entries are formatted as chained Entry Chunk with default maximum of
> // 16384 multihashes per chunk.
147,148c163,165
< // See: WithEntriesCacheCapacity, chunker.CachedEntriesChunker
< func WithEntriesChunkSize(s int) Option {
---
> // To use HAMT as the advertisement entries format, see: WithHamtEntries.
> // For caching configuration: WithEntriesCacheCapacity, chunker.CachedEntriesChunker
> func WithChainedEntries(chunkSize int) Option {
150c167
< o.entChunkSize = s
---
> o.chunker = chunker.NewChainChunkerFunc(chunkSize)
155,156c172,197
< // WithEntriesCacheCapacity sets the maximum number of advertisement entries chains to cache.
< // If unset, the default capacity of 1024 is used.
---
> // WithHamtEntries sets format of advertisement entries to HAMT with the given hash algorithm,
> // bit-width and bucket size.
> //
> // If unset, advertisement entries are formatted as chained Entry Chunk with default maximum of
> // 16384 multihashes per chunk.
> //
> // Only multicodec.Identity, multicodec.Sha2_256 and multicodec.Murmur3X64_64 are supported as hash
> // algorithm.
> // The bit-width and bucket size must be at least 3 and 1 respectively.
> // For more information on HAMT data structure, see:
> // - https://ipld.io/specs/advanced-data-layouts/hamt/spec
> // - https://github.com/ipld/go-ipld-adl-hamt
> //
> // For caching configuration: WithEntriesCacheCapacity, chunker.CachedEntriesChunker
> func WithHamtEntries(hashAlg multicodec.Code, bitWidth, bucketSize int) Option {
> return func(o *options) error {
> o.chunker = chunker.NewHamtChunkerFunc(hashAlg, bitWidth, bucketSize)
> return nil
> }
> }
>
> // WithEntriesCacheCapacity sets the maximum number of advertisement entries DAG to cache. The
> // cached DAG may be in chained Entry Chunk or HAMT format. See WithChainedEntries and
> // WithHamtEntries to select the ad entries DAG format.
> //
> // If unset, the default capacity of 1024 is used. This means at most 1024 DAGs will be cached.
164,165d204
< //
< // See: WithEntriesChunkSize, chunker.CachedEntriesChunker.
287a327,340
> }
> return nil
> }
> }
>
> // WithDirectAnnounce sets indexer URLs to send direct HTTP announcements to.
> func WithDirectAnnounce(announceURLs ...string) Option {
> return func(o *options) error {
> for _, urlStr := range announceURLs {
> u, err := url.Parse(urlStr)
> if err != nil {
> return err
> }
> o.announceURLs = append(o.announceURLs, u)
diff -r --color a/vendor/github.com/filecoin-project/index-provider/engine/policy/policy.go b/vendor/github.com/filecoin-project/index-provider/engine/policy/policy.go
8c8
< "github.com/libp2p/go-libp2p-core/peer"
---
> "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/filecoin-project/index-provider/interface.go b/vendor/github.com/filecoin-project/index-provider/interface.go
5c5
< // - https://github.com/filecoin-project/storetheindex/blob/main/api/v0/ingest/schema/schema.ipldsch
---
> // - https://github.com/filecoin-project/storetheindex/blob/main/api/v0/ingest/schema/schema.ipldsch
15a16
> "github.com/libp2p/go-libp2p/core/peer"
43,46c44,48
< // the given contextID is available. The given contextID is then used to
< // look up the list of multihashes via MultihashLister. An advertisement is then
< // generated, appended to the chain of advertisements and published onto
< // the gossip pubsub channel.
---
> // the given provider and contextIDs is available. The given
> // provider and contextIDs are then used to look up the list of multihashes via MultihashLister.
> // An advertisement is then generated, appended to the chain of advertisements and published onto
> // the gossip pubsub channel. Advertisements for different provider IDs are placed onto the same chain.
> // Use an empty provider string for the default configured provider.
52c54
< // is protocol dependant. The metadata must at least specify a protocol
---
> // is protocol dependant. The metadata must at least specify a protocol
55c57
< // If both the contextID and metadata are the same as a previous call to
---
> // If provider, contextID and metadata are the same as a previous call to
57a60,61
> // If provider is nil then the default configured provider will be assumed.
> //
59c63
< NotifyPut(ctx context.Context, contextID []byte, md metadata.Metadata) (cid.Cid, error)
---
> NotifyPut(ctx context.Context, provider *peer.AddrInfo, contextID []byte, md metadata.Metadata) (cid.Cid, error)
62c66
< // corresponded to the given contextID are no longer available. An advertisement
---
> // corresponded to the given provider and contextID are no longer available. An advertisement
65c69
< // The given contextID must have previously been put via NotifyPut.
---
> // The given provider and contextID tuple must have previously been put via NotifyPut.
67a72,73
> // If providerID is empty then the default configured provider will be assumed.
> //
69c75
< NotifyRemove(ctx context.Context, contextID []byte) (cid.Cid, error)
---
> NotifyRemove(ctx context.Context, providerID peer.ID, contextID []byte) (cid.Cid, error)
95c101
< // MultihashLister lists the multihashes that correspond to a given contextID.
---
> // MultihashLister lists the multihashes that correspond to a given provider and contextID.
97c103
< // order for the same context ID.
---
> // order for the same (provider, contextID) tuple.
98a105
> // empty provider means falling back to the default.
100c107
< type MultihashLister func(ctx context.Context, contextID []byte) (MultihashIterator, error)
---
> type MultihashLister func(ctx context.Context, provider peer.ID, contextID []byte) (MultihashIterator, error)
Only in b/vendor/github.com/filecoin-project/index-provider/metadata: http.go
diff -r --color a/vendor/github.com/filecoin-project/index-provider/metadata/metadata.go b/vendor/github.com/filecoin-project/index-provider/metadata/metadata.go
32a33
> mc *metadataContext
46a48,81
> // metadataContext holds context for metadata serialization and deserialization.
> type metadataContext struct {
> protocols map[multicodec.Code]func() Protocol
> }
>
> // MetadataContext holds context for metadata serialization and deserialization.
> type MetadataContext interface {
> WithProtocol(id multicodec.Code, factory func() Protocol) MetadataContext
> New(t ...Protocol) Metadata
> }
>
> var Default MetadataContext
>
> func init() {
> d := metadataContext{
> protocols: make(map[multicodec.Code]func() Protocol),
> }
> d.protocols[multicodec.TransportBitswap] = func() Protocol { return &Bitswap{} }
> d.protocols[multicodec.TransportGraphsyncFilecoinv1] = func() Protocol { return &GraphsyncFilecoinV1{} }
> Default = &d
> }
>
> // WithProtocol dervies a new MetadataContext including the additional protocol mapping.
> func (mc *metadataContext) WithProtocol(id multicodec.Code, factory func() Protocol) MetadataContext {
> derived := metadataContext{
> protocols: make(map[multicodec.Code]func() Protocol),
> }
> for k, v := range mc.protocols {
> derived.protocols[k] = v
> }
> derived.protocols[id] = factory
> return &derived
> }
>
48c83
< func New(t ...Protocol) Metadata {
---
> func (mc *metadataContext) New(t ...Protocol) Metadata {
49a85
> mc: mc,
131c167
< t, err := newTransport(id)
---
> t, err := m.mc.newTransport(id)
178,185c214,216
< func newTransport(id multicodec.Code) (Protocol, error) {
< switch id {
< case multicodec.TransportBitswap:
< return &Bitswap{}, nil
< case multicodec.TransportGraphsyncFilecoinv1:
< return &GraphsyncFilecoinV1{}, nil
< default:
< return nil, fmt.Errorf("unknwon transport id: %s", id.String())
---
> func (mc *metadataContext) newTransport(id multicodec.Code) (Protocol, error) {
> if factory, ok := mc.protocols[id]; ok {
> return factory(), nil
186a218,219
>
> return nil, fmt.Errorf("unknown transport id: %s", id.String())
Only in b/vendor/github.com/filecoin-project/index-provider/metadata: unknown.go
diff -r --color a/vendor/github.com/filecoin-project/index-provider/multihash_iterator.go b/vendor/github.com/filecoin-project/index-provider/multihash_iterator.go
3a4
> "context"
7a9
> "github.com/filecoin-project/storetheindex/api/v0/ingest/schema"
8a11,12
> hamt "github.com/ipld/go-ipld-adl-hamt"
> "github.com/ipld/go-ipld-prime"
11a16,17
> var _ MultihashIterator = (*sliceMhIterator)(nil)
>
50d55
<
67a73,154
> }
>
> var _ MultihashIterator = (*ipldMapMhIter)(nil)
>
> type ipldMapMhIter struct {
> mi ipld.MapIterator
> }
>
> func (i *ipldMapMhIter) Next() (multihash.Multihash, error) {
> if i.mi.Done() {
> return nil, io.EOF
> }
> k, _, err := i.mi.Next()
> if err != nil {
> return nil, err
> }
>
> // Note the IPLD hamt implementation currently writes map keys as string
> ks, err := k.AsString()
> if err != nil {
> return nil, err
> }
> return []byte(ks), nil
> }
>
> // HamtMultihashIterator constructs a MultihashIterator backed by the given root HAMT.
> // The links from root are dynamically loaded as needed using the given link system.
> func HamtMultihashIterator(root *hamt.HashMapRoot, ls ipld.LinkSystem) MultihashIterator {
> n := hamt.Node{
> HashMapRoot: *root,
> }.WithLinking(ls, schema.Linkproto)
> return &ipldMapMhIter{n.MapIterator()}
> }
>
> var _ MultihashIterator = (*linksysEntryChunkMhIter)(nil)
>
> type linksysEntryChunkMhIter struct {
> ls ipld.LinkSystem
> ec *schema.EntryChunk
> offset int
> }
>
> func (l *linksysEntryChunkMhIter) Next() (multihash.Multihash, error) {
> // Sanity check that entry chunk is set.
> if l.ec == nil {
> return nil, io.EOF
> }
> if l.offset >= len(l.ec.Entries) {
> if l.ec.Next == nil {
> return nil, io.EOF
> }
> lctx := ipld.LinkContext{Ctx: context.TODO()}
> n, err := l.ls.Load(lctx, l.ec.Next, schema.EntryChunkPrototype)
> if err != nil {
> return nil, err
> }
> if l.ec, err = schema.UnwrapEntryChunk(n); err != nil {
> return nil, err
> }
> l.offset = 0
> }
> next := l.ec.Entries[l.offset]
> l.offset++
> return next, nil
> }
>
> // EntryChunkMultihashIterator constructs a MultihashIterator that iterates over the global list of
> // chained multihashes starting from the given link. It dynamically loads the next EntryChunk from
> // the given ipld.LinkSystem as needed.
> func EntryChunkMultihashIterator(l ipld.Link, ls ipld.LinkSystem) (MultihashIterator, error) {
> n, err := ls.Load(ipld.LinkContext{Ctx: context.TODO()}, l, schema.EntryChunkPrototype)
> if err != nil {
> return nil, err
> }
> ec, err := schema.UnwrapEntryChunk(n)
> if err != nil {
> return nil, err
> }
> return &linksysEntryChunkMhIter{
> ls: ls,
> ec: ec,
> }, nil
diff -r --color a/vendor/github.com/filecoin-project/index-provider/README.md b/vendor/github.com/filecoin-project/index-provider/README.md
68c68
< go install github.com/filecoin-project/index-provider@latest
---
> go install github.com/filecoin-project/index-provider/cmd/provider@latest
107a108,123
> #### Exposing reframe server from provider (experimental)
>
> Provider can export a reframe server. [Reframe](https://github.com/ipfs/specs/blob/main/reframe/REFRAME_PROTOCOL.md) is a protocol
> that allows IPFS nodes to advertise their contents to indexers alongside DHT. Reframe server is off by default.
> To enable it, add the following configuration block to the provider config file.
>
> ```
> {
> ...
> Reframe {
> ListenMultiaddr: "/ip4/0.0.0.0/tcp/50617 (example)"
> }
> ...
> }
> ```
>
139a156,182
> #### Publishing ads with extended providers
>
> [Extended providers](https://github.com/filecoin-project/storetheindex/blob/main/doc/ingest.md#extendedprovider)
> field allows for specification of provider families, in cases where a provider operates multiple PeerIDs, perhaps
> with different transport protocols between them, but over the same database of content.
>
> Such ads can be composed manually or using a convenience builder `ExtendedProvidersAdBuilder`.
> ```
>
> adv, err := ep.NewExtendedProviderAdBuilder(providerID, priv, addrs). // the main ad's providerID, private key and addresses
> WithContextID(contextID). // optional context id
> WithMetadata(metadata). // optional metadata
> WithOverride(override). // override flag, false by default
> WithExtendedProviders(extendedProviders). // one or more extended providers to be included in the ad, represented by ExtendedProviderInfo struct
> WithLastAdID(lastAdId). // cid of the last published ad, which is false by default
> BuildAndSign()
>
> if err != nil {
> //...
> }
>
> engine.Publish(ctx, *adv)
> )
> ```
>
> > Identity of the main provider will be added to the extended providers list automatically and should not be passed in explicitly
>
229a273
> * [`storetheindex` documentation](https://github.com/filecoin-project/storetheindex/blob/main/doc/)
diff -r --color a/vendor/github.com/filecoin-project/index-provider/version.json b/vendor/github.com/filecoin-project/index-provider/version.json
2c2
< "version": "v0.8.1"
---
> "version": "v0.9.1"
Only in b/vendor/github.com/filecoin-project/storetheindex: announce
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0: common.go
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0: doc.go
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0: error.go
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0: httpclient
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest: client
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest: model
diff -r --color a/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/doc.go b/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/doc.go
6c6,7
< // IsRm Entries Action
---
> // IsRm Entries Action
> //
diff -r --color a/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/envelope.go b/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/envelope.go
11,13c11,13
< "github.com/libp2p/go-libp2p-core/crypto"
< "github.com/libp2p/go-libp2p-core/peer"
< "github.com/libp2p/go-libp2p-core/record"
---
> "github.com/libp2p/go-libp2p/core/crypto"
> "github.com/libp2p/go-libp2p/core/peer"
> "github.com/libp2p/go-libp2p/core/record"
21a22
> epSignatureCodec = "/indexer/ingest/extendedProviderSignature"
52a54,82
> type epSignatureRecord struct {
> domain *string
> codec []byte
> payload []byte
> }
>
> func (r *epSignatureRecord) Domain() string {
> if r.domain != nil {
> return *r.domain
> }
> return adSignatureDomain
> }
>
> func (r *epSignatureRecord) Codec() []byte {
> if r.codec != nil {
> return r.codec
> }
> return []byte(epSignatureCodec)
> }
>
> func (r *epSignatureRecord) MarshalRecord() ([]byte, error) {
> return r.payload, nil
> }
>
> func (r *epSignatureRecord) UnmarshalRecord(buf []byte) error {
> r.payload = buf
> return nil
> }
>
57c87
< bindex = (*ad.PreviousID).(cidlink.Link).Cid.Bytes()
---
> bindex = ad.PreviousID.(cidlink.Link).Cid.Bytes()
91a122,167
> // extendedSignaturePayload generates the data payload used to compute the signature for a provider from the ExtendedProviders list.
> // This payload *doesn't* contain enough information to sign the ad itself.
> func extendedProviderSignaturePayload(ad *Advertisement, p *Provider) ([]byte, error) {
> if ad.IsRm {
> return nil, fmt.Errorf("rm ads are not supported for extended provider signatures")
> }
>
> bindex := cid.Undef.Bytes()
> if ad.PreviousID != nil {
> bindex = ad.PreviousID.(cidlink.Link).Cid.Bytes()
> }
> ent := ad.Entries.(cidlink.Link).Cid.Bytes()
>
> // Extended signature is an authrorisation for the publisher of the main Ad to publish on behalf of the signee.
> // For more details see https://github.com/filecoin-project/storetheindex/pull/804/files
> // The signature must contain the following fields:
> // - the main provider's identity
> // - the signee's identity addrs and metadata so that they can't be misrepresented
> // - contextID and override flag as they change the behaviour of how extended providers are interpreted (see the spec)
>
> var sigBuf bytes.Buffer
>
> var addrsLen int
> for _, addr := range p.Addresses {
> addrsLen += len(addr)
> }
>
> sigBuf.Grow(len(bindex) + len(ent) + len(ad.Provider) + len(ad.ContextID) + len(p.ID) + addrsLen + len(p.Metadata) + 1)
> sigBuf.Write(bindex)
> sigBuf.Write(ent)
> sigBuf.WriteString(ad.Provider)
> sigBuf.Write(ad.ContextID)
> sigBuf.WriteString(p.ID)
> for _, addr := range p.Addresses {
> sigBuf.WriteString(addr)
> }
> sigBuf.Write(p.Metadata)
> if ad.ExtendedProvider.Override {
> sigBuf.WriteByte(1)
> } else {
> sigBuf.WriteByte(0)
> }
>
> return multihash.Sum(sigBuf.Bytes(), multihash.SHA2_256, -1)
> }
>
92a169
> // This function will return an error if used to sign an ad with extended providers.
93a171,177
> if ad.ExtendedProvider != nil {
> return fmt.Errorf("the ad can not be signed because it has extended providers")
> }
> return ad.signAd(key)
> }
>
> func (ad *Advertisement) signAd(key crypto.PrivKey) error {
110a195,248
> // SignWithExtendedProviders signs an advertisement by the main provider as well as by all extended providers if they are present.
> func (ad *Advertisement) SignWithExtendedProviders(key crypto.PrivKey, extendedProviderKeyFetcher func(string) (crypto.PrivKey, error)) error {
> err := ad.signAd(key)
> if err != nil {
> return err
> }
>
> if ad.ExtendedProvider == nil {
> return nil
> }
>
> seenTopLevelProvider := false
>
> for i := range ad.ExtendedProvider.Providers {
> p := &ad.ExtendedProvider.Providers[i]
> if p.ID == ad.Provider {
> seenTopLevelProvider = true
> }
>
> payload, err := extendedProviderSignaturePayload(ad, p)
> if err != nil {
> return err
> }
>
> var privKey crypto.PrivKey
> if p.ID == ad.Provider {
> privKey = key
> } else {
> privKey, err = extendedProviderKeyFetcher(p.ID)
> if err != nil {
> return err
> }
> }
>
> envelope, err := record.Seal(&epSignatureRecord{payload: payload}, privKey)
> if err != nil {
> return err
> }
>
> sig, err := envelope.Marshal()
> if err != nil {
> return err
> }
>
> p.Signature = sig
> }
>
> if !seenTopLevelProvider && len(ad.ExtendedProvider.Providers) > 0 {
> return fmt.Errorf("extended providers must contain provider from the encapsulating advertisement")
> }
>
> return nil
> }
>
116a255,256
> //
> // Extended providers signatures are also verified.
150a291,322
> }
>
> if ad.ExtendedProvider != nil {
> rec := &epSignatureRecord{}
> // The top level provider must appear in the list of extended providers otherwise the ad is considered invalid
> seenTopLevelProv := false
> for _, p := range ad.ExtendedProvider.Providers {
>
> _, err = record.ConsumeTypedEnvelope(p.Signature, rec)
> if err != nil {
> return "", err
> }
>
> // Calculate our signature payload
> genPayload, err := extendedProviderSignaturePayload(ad, &p)
> if err != nil {
> return "", err
> }
>
> // Check that our own hash is equal to the hash from the signature.
> if !bytes.Equal(genPayload, rec.payload) {
> return "", errors.New("invalid signature")
> }
>
> if p.ID == ad.Provider {
> seenTopLevelProv = true
> }
> }
>
> if !seenTopLevelProv && len(ad.ExtendedProvider.Providers) > 0 {
> return "", fmt.Errorf("extended providers must contain provider from the encapsulating advertisement")
> }
diff -r --color a/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/schema.ipldsch b/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/schema.ipldsch
9a10,37
> # ExtendedProvider specifies an additional set of providers where the ad entries are available from
> type ExtendedProvider struct {
> # Providers is an additional list of providers where the ad entries are available from
> Providers [Provider]
> # Override defines mechanics for extending chain-level extended providers in the following way:
> # * If Override is set on an ExtendedProvider entry on an advertisement with a ContextID, it indicates that any specified chain-level
> # set of providers should not be returned for that context ID. Providers will be returned Instead.
> # * If Override is not set on an entry for an advertisement with a ContextID, it will be combined as a union with any chain-level ExtendedProviders (Addresses, Metadata).
> # * If Override is set on ExtendedProvider for an advertisement without a ContextID, the entry is invalid and should be ignored.
> Override Bool
> }
>
> # Provider contains details of a peer where ad entries are available from
> type Provider struct {
> # ID is a peer ID of the Provider
> ID String
> # Addresses is a list of multiaddresses of the Provider
> Addresses [String]
> # Metadata captures contextual information about how to retrieve the advertised content.
> Metadata Bytes
> # Signature is created by each provider with their corresponding private key
> # * The full advertisement object is serialized, with all instances of Signature replaced with an empty array of bytes.
> # * This serialization is then hashed, and the hash is then signed.
> # * The Provider from the encapsulating advertisement must be present in the Providers of the ExtendedProvider object,
> # and must sign in this way as well. It may omit Metadata and Addresses if they match the values already set at the encapsulating advertisement. However, Signature must be present.
> Signature Bytes
> }
>
21c49,57
< # Entries is a link to the chained EntryChunk instances that contain the multihashes advertised.
---
> # Entries is a link to a data structure that contains the advertised multihashes.
> # The data structure can either be:
> # * an interlinked chain of EntryChunk nodes, or
> # * an IPLD HAMT ADL, where the keys in the map represent the multihashes and the values are
> # simply set to true.
> #
> # See:
> # * https://ipld.io/specs/advanced-data-layouts/hamt/spec
> # * https://ipld.io/specs/advanced-data-layouts/hamt/spec/#use-as-a-set
23a60,61
> # If a Provider listing is written with no ContextID and IsRm=false, peers from ExtendedProvider
> # will be returned for all advertisements published by the publisher.
28a67,69
> # ExtendedProvider might optionally specify a set of providers where the ad entries are available from.
> # See: https://github.com/filecoin-project/storetheindex/blob/main/doc/ingest.md#extendedprovider
> ExtendedProvider optional ExtendedProvider
diff -r --color a/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/types.go b/vendor/github.com/filecoin-project/storetheindex/api/v0/ingest/schema/types.go
12a13,24
> ExtendedProvider struct {
> Providers []Provider
> Override bool
> }
>
> Provider struct {
> ID string
> Addresses []string
> Metadata []byte
> Signature []byte
> }
>
14,21c26,34
< PreviousID *ipld.Link
< Provider string
< Addresses []string
< Signature []byte
< Entries ipld.Link
< ContextID []byte
< Metadata []byte
< IsRm bool
---
> PreviousID ipld.Link
> Provider string
> Addresses []string
> Signature []byte
> Entries ipld.Link
> ContextID []byte
> Metadata []byte
> IsRm bool
> ExtendedProvider *ExtendedProvider
25c38
< Next *ipld.Link
---
> Next ipld.Link
51c64
< // - linksystem in sti is passed into other libraries, like go-legs, and
---
> // - linksystem in sti is passed into other libraries, and
92c105
< // - linksystem in sti is passed into other libraries, like go-legs, and
---
> // - linksystem in sti is passed into other libraries, and
Only in b/vendor/github.com/filecoin-project/storetheindex/api/v0: protocol_ids.go
Only in b/vendor/github.com/filecoin-project/storetheindex: dagsync
Only in b/vendor/github.com/filecoin-project/storetheindex: mautil
diff -r --color a/vendor/github.com/filecoin-project/storetheindex/peerutil/policy.go b/vendor/github.com/filecoin-project/storetheindex/peerutil/policy.go
6c6
< "github.com/libp2p/go-libp2p-core/peer"
---
> "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
16,30c16,30
< // • When the default behavior of equality does not suit the needs of the test,
< // custom equality functions can override the equality operation.
< // For example, an equality function may report floats as equal so long as they
< // are within some tolerance of each other.
< //
< // • Types that have an Equal method may use that method to determine equality.
< // This allows package authors to determine the equality operation for the types
< // that they define.
< //
< // • If no custom equality functions are used and no Equal method is defined,
< // equality is determined by recursively comparing the primitive kinds on both
< // values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
< // fields are not compared by default; they result in panics unless suppressed
< // by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
< // compared using the Exporter option.
---
> // - When the default behavior of equality does not suit the test's needs,
> // custom equality functions can override the equality operation.
> // For example, an equality function may report floats as equal so long as
> // they are within some tolerance of each other.
> //
> // - Types with an Equal method may use that method to determine equality.
> // This allows package authors to determine the equality operation
> // for the types that they define.
> //
> // - If no custom equality functions are used and no Equal method is defined,
> // equality is determined by recursively comparing the primitive kinds on
> // both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
> // unexported fields are not compared by default; they result in panics
> // unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
> // or explicitly compared using the Exporter option.
48,66c48,66
< // • Let S be the set of all Ignore, Transformer, and Comparer options that
< // remain after applying all path filters, value filters, and type filters.
< // If at least one Ignore exists in S, then the comparison is ignored.
< // If the number of Transformer and Comparer options in S is greater than one,
< // then Equal panics because it is ambiguous which option to use.
< // If S contains a single Transformer, then use that to transform the current
< // values and recursively call Equal on the output values.
< // If S contains a single Comparer, then use that to compare the current values.
< // Otherwise, evaluation proceeds to the next rule.
< //
< // • If the values have an Equal method of the form "(T) Equal(T) bool" or
< // "(T) Equal(I) bool" where T is assignable to I, then use the result of
< // x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
< // evaluation proceeds to the next rule.
< //
< // • Lastly, try to compare x and y based on their basic kinds.
< // Simple kinds like booleans, integers, floats, complex numbers, strings, and
< // channels are compared using the equivalent of the == operator in Go.
< // Functions are only equal if they are both nil, otherwise they are unequal.
---
> // - Let S be the set of all Ignore, Transformer, and Comparer options that
> // remain after applying all path filters, value filters, and type filters.
> // If at least one Ignore exists in S, then the comparison is ignored.
> // If the number of Transformer and Comparer options in S is non-zero,
> // then Equal panics because it is ambiguous which option to use.
> // If S contains a single Transformer, then use that to transform
> // the current values and recursively call Equal on the output values.
> // If S contains a single Comparer, then use that to compare the current values.
> // Otherwise, evaluation proceeds to the next rule.
> //
> // - If the values have an Equal method of the form "(T) Equal(T) bool" or
> // "(T) Equal(I) bool" where T is assignable to I, then use the result of
> // x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
> // evaluation proceeds to the next rule.
> //
> // - Lastly, try to compare x and y based on their basic kinds.
> // Simple kinds like booleans, integers, floats, complex numbers, strings,
> // and channels are compared using the equivalent of the == operator in Go.
> // Functions are only equal if they are both nil, otherwise they are unequal.
147c147
< t = reflect.TypeOf((*interface{})(nil)).Elem()
---
> t = anyType
641a642
> //
642a644
> //
diff -r --color a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
130,132c130,132
< // • eq == (es.Dist()==0)
< // • nx == es.LenX()
< // • ny == es.LenY()
---
> // - eq == (es.Dist()==0)
> // - nx == es.LenX()
> // - ny == es.LenY()
172,173c172,173
< // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
< // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
---
> // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
> // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
176,177c176,178
< // • fwdFrontier.X < revFrontier.X
< // • fwdFrontier.Y < revFrontier.Y
---
> // - fwdFrontier.X < revFrontier.X
> // - fwdFrontier.Y < revFrontier.Y
> //
198,210c199,213
< // • Searching for differences switches back-and-forth between
< // a search that starts at the beginning (the top-left corner), and
< // a search that starts at the end (the bottom-right corner). The goal of
< // the search is connect with the search from the opposite corner.
< // • As we search, we build a path in a greedy manner, where the first
< // match seen is added to the path (this is sub-optimal, but provides a
< // decent result in practice). When matches are found, we try the next pair
< // of symbols in the lists and follow all matches as far as possible.
< // • When searching for matches, we search along a diagonal going through
< // through the "frontier" point. If no matches are found, we advance the
< // frontier towards the opposite corner.
< // • This algorithm terminates when either the X coordinates or the
< // Y coordinates of the forward and reverse frontier points ever intersect.
---
> // - Searching for differences switches back-and-forth between
> // a search that starts at the beginning (the top-left corner), and
> // a search that starts at the end (the bottom-right corner).
> // The goal of the search is connect with the search
> // from the opposite corner.
> // - As we search, we build a path in a greedy manner,
> // where the first match seen is added to the path (this is sub-optimal,
> // but provides a decent result in practice). When matches are found,
> // we try the next pair of symbols in the lists and follow all matches
> // as far as possible.
> // - When searching for matches, we search along a diagonal going through
> // through the "frontier" point. If no matches are found,
> // we advance the frontier towards the opposite corner.
> // - This algorithm terminates when either the X coordinates or the
> // Y coordinates of the forward and reverse frontier points ever intersect.
391a395
> //
Only in a/vendor/github.com/google/go-cmp/cmp/internal/value: zero.go
diff -r --color a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
35a36
> //
45a47
> //
339,341c341,343
< // • Symmetric: equal(x, y) == equal(y, x)
< // • Deterministic: equal(x, y) == equal(x, y)
< // • Pure: equal(x, y) does not modify x or y
---
> // - Symmetric: equal(x, y) == equal(y, x)
> // - Deterministic: equal(x, y) == equal(x, y)
> // - Pure: equal(x, y) does not modify x or y
433c435
< // is provided by cmp when calling Result (see Reporter).
---
> // is provided by cmp when calling Report (see Reporter).
diff -r --color a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
44,50c44,50
< // • For StructField, both are not interface-able if the current field
< // is unexported and the struct type is not explicitly permitted by
< // an Exporter to traverse unexported fields.
< // • For SliceIndex, one may be invalid if an element is missing from
< // either the x or y slice.
< // • For MapIndex, one may be invalid if an entry is missing from
< // either the x or y map.
---
> // - For StructField, both are not interface-able if the current field
> // is unexported and the struct type is not explicitly permitted by
> // an Exporter to traverse unexported fields.
> // - For SliceIndex, one may be invalid if an element is missing from
> // either the x or y slice.
> // - For MapIndex, one may be invalid if an entry is missing from
> // either the x or y map.
96a97
> //
110a112
> //
162c164
< s := ps.typ.String()
---
> s := value.TypeString(ps.typ, false)
285c287
< func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
---
> func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
diff -r --color a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
10,11d9
<
< "github.com/google/go-cmp/cmp/internal/value"
120c118
< isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
---
> isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
251c249
< isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
---
> isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
253c251
< isZero = value.IsZero(r.Value.ValueX)
---
> isZero = r.Value.ValueX.IsZero()
255c253
< isZero = value.IsZero(r.Value.ValueY)
---
> isZero = r.Value.ValueY.IsZero()
diff -r --color a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
18a19,25
> var (
> anyType = reflect.TypeOf((*interface{})(nil)).Elem()
> stringType = reflect.TypeOf((*string)(nil)).Elem()
> bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
> byteType = reflect.TypeOf((*byte)(nil)).Elem()
> )
>
187c194
< if value.IsZero(vv) {
---
> if vv.IsZero() {
208c215
< if t.Elem() == reflect.TypeOf(byte(0)) {
---
> if t.Elem() == byteType {
diff -r --color a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
107c107
< case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
---
> case t.Kind() == reflect.Slice && t.Elem() == byteType:
150c150,153
< isPureLinedText = efficiencyLines < 4*efficiencyBytes
---
> quotedLength := len(strconv.Quote(sx + sy))
> unquotedLength := len(sx) + len(sy)
> escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
> isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
174,177c177,180
< // • A line starts with `"""`
< // • A line starts with "..."
< // • A line contains non-printable characters
< // • Adjacent different lines differ only by whitespace
---
> // - A line starts with `"""`
> // - A line starts with "..."
> // - A line contains non-printable characters
> // - Adjacent different lines differ only by whitespace
179a183
> //
234c238
< if t != reflect.TypeOf(string("")) {
---
> if t != stringType {
329c333
< if t != reflect.TypeOf(string("")) {
---
> if t != stringType {
334c338
< if t != reflect.TypeOf([]byte(nil)) {
---
> if t != bytesType {
449d452
< //
506d508
< //
551d552
< //
diff -r --color a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
395a396
> //
diff -r --color a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
438,439c438
< // after the handler returns, unless the KeepContext option is set on the
< // Router.
---
> // after the handler returns.
diff -r --color a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
327a328,333
> if v.host.wildcardHostPort {
> // Don't be strict on the port match
> if i := strings.Index(host, ":"); i != -1 {
> host = host[:i]
> }
> }
Only in b/vendor/github.com/hashicorp: go-hclog
Only in b/vendor/github.com/hashicorp: go-immutable-radix
Only in b/vendor/github.com/hashicorp: go-msgpack
Only in b/vendor/github.com/hashicorp: raft
Only in b/vendor/github.com/hashicorp: raft-boltdb
diff -r --color a/vendor/github.com/ipfs/go-cid/cid_fuzz.go b/vendor/github.com/ipfs/go-cid/cid_fuzz.go
2d1
< // +build gofuzz
diff -r --color a/vendor/github.com/ipfs/go-cid/cid.go b/vendor/github.com/ipfs/go-cid/cid.go
13c13
< // <cidv1> ::= <multibase-prefix><cid-version><multicodec-packed-content-type><multihash-content-address>
---
> // <cidv1> ::= <multibase-prefix><cid-version><multicodec-packed-content-type><multihash-content-address>
183a184,192
> // MustParse calls Parse but will panic on error.
> func MustParse(v interface{}) Cid {
> c, err := Parse(v)
> if err != nil {
> panic(err)
> }
> return c
> }
>
187c196
< // <multibase-type-code><base-encoded-string>
---
> // <multibase-type-code><base-encoded-string>
243c252
< // <version><codec-type><multihash>
---
> // <version><codec-type><multihash>
371a381,383
> //
> // If c.Defined() == false, it return a nil slice and may not
> // be parsable with Cast().
372a385,387
> if !c.Defined() {
> return nil
> }
453c468
< // { "/": "<cid-string>" }
---
> // { "/": "<cid-string>" }
510c525,526
< // use the V0Builder or V1Builder structures instead
---
> //
> // use the V0Builder or V1Builder structures instead
549c565
< // <version><codec><mh-type><mh-length>
---
> // <version><codec><mh-type><mh-length>
diff -r --color a/vendor/github.com/ipfs/go-cid/version.json b/vendor/github.com/ipfs/go-cid/version.json
2c2
< "version": "v0.2.0"
---
> "version": "v0.3.2"
diff -r --color a/vendor/github.com/ipfs/go-datastore/basic_ds.go b/vendor/github.com/ipfs/go-datastore/basic_ds.go
92,147d91
< // NullDatastore stores nothing, but conforms to the API.
< // Useful to test with.
< type NullDatastore struct {
< }
<
< var _ Datastore = (*NullDatastore)(nil)
< var _ Batching = (*NullDatastore)(nil)
<
< // NewNullDatastore constructs a null datastoe
< func NewNullDatastore() *NullDatastore {
< return &NullDatastore{}
< }
<
< // Put implements Datastore.Put
< func (d *NullDatastore) Put(ctx context.Context, key Key, value []byte) (err error) {
< return nil
< }
<
< // Sync implements Datastore.Sync
< func (d *NullDatastore) Sync(ctx context.Context, prefix Key) error {
< return nil
< }
<
< // Get implements Datastore.Get
< func (d *NullDatastore) Get(ctx context.Context, key Key) (value []byte, err error) {
< return nil, ErrNotFound
< }
<
< // Has implements Datastore.Has
< func (d *NullDatastore) Has(ctx context.Context, key Key) (exists bool, err error) {
< return false, nil
< }
<
< // Has implements Datastore.GetSize
< func (d *NullDatastore) GetSize(ctx context.Context, key Key) (size int, err error) {
< return -1, ErrNotFound
< }
<
< // Delete implements Datastore.Delete
< func (d *NullDatastore) Delete(ctx context.Context, key Key) (err error) {
< return nil
< }
<
< // Query implements Datastore.Query
< func (d *NullDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
< return dsq.ResultsWithEntries(q, nil), nil
< }
<
< func (d *NullDatastore) Batch(ctx context.Context) (Batch, error) {
< return NewBasicBatch(d), nil
< }
<
< func (d *NullDatastore) Close() error {
< return nil
< }
<
diff -r --color a/vendor/github.com/ipfs/go-datastore/datastore.go b/vendor/github.com/ipfs/go-datastore/datastore.go
7d6
< "time"
106,107c105
<
< Batch(ctx context.Context) (Batch, error)
---
> BatchingFeature
118,119c116
<
< Check(ctx context.Context) error
---
> CheckedFeature
127,128c124
<
< Scrub(ctx context.Context) error
---
> ScrubbedFeature
135,136c131
<
< CollectGarbage(ctx context.Context) error
---
> GCFeature
143,145c138
<
< // DiskUsage returns the space used by a datastore, in bytes.
< DiskUsage(ctx context.Context) (uint64, error)
---
> PersistentFeature
166,172d158
< // TTL encapulates the methods that deal with entries with time-to-live.
< type TTL interface {
< PutWithTTL(ctx context.Context, key Key, value []byte, ttl time.Duration) error
< SetTTL(ctx context.Context, key Key, ttl time.Duration) error
< GetExpiration(ctx context.Context, key Key) (time.Time, error)
< }
<
197,198c183
<
< NewTransaction(ctx context.Context, readOnly bool) (Txn, error)
---
> TxnFeature
Only in b/vendor/github.com/ipfs/go-datastore: features.go
Only in b/vendor/github.com/ipfs/go-datastore: null_ds.go
diff -r --color a/vendor/github.com/ipfs/go-datastore/version.json b/vendor/github.com/ipfs/go-datastore/version.json
2c2
< "version": "v0.5.1"
---
> "version": "v0.6.0"
diff -r --color a/vendor/github.com/ipfs/go-graphsync/CHANGELOG.md b/vendor/github.com/ipfs/go-graphsync/CHANGELOG.md
2a3,27
> # go-graphsync v0.13.2
>
> Minor stability and depedency upgrade prior to future breaking library changes
>
> ### Changelog
>
> - github.com/ipfs/go-graphsync:
> - chore(deps): upgrade libp2p & ipld-prime (#389) ([ipfs/go-graphsync#389](https://github.com/ipfs/go-graphsync/pull/389))
> - Add .github/workflows/stale.yml
> - chore(ipld): switch to using top-level ipld-prime codec helpers (#383) ([ipfs/go-graphsync#383](https://github.com/ipfs/go-graphsync/pull/383))
> - feat(requestmanager): read request from context (#381) ([ipfs/go-graphsync#381](https://github.com/ipfs/go-graphsync/pull/381))
> - fix: minor typo in error msg
> - fix(panics): lift panic recovery up to top of network handling
> - feat: expand use of panic handler to cover network and codec interaction
> - feat(panics): capture panics from selector execution
>
> ### Contributors
>
> | Contributor | Commits | Lines ± | Files Changed |
> |-------------|---------|---------|---------------|
> | Rod Vagg | 4 | +463/-445 | 50 |
> | Hannah Howard | 2 | +246/-108 | 10 |
> | hannahhoward | 1 | +116/-38 | 10 |
> | ipfs-mgmt-read-write[bot] | 1 | +26/-0 | 1 |
>
diff -r --color a/vendor/github.com/ipfs/go-graphsync/graphsync.go b/vendor/github.com/ipfs/go-graphsync/graphsync.go
440a441,444
> // RequestIDContextKey is used to the desired request id in context when
> // initializing a request
> type RequestIDContextKey struct{}
>
diff -r --color a/vendor/github.com/ipfs/go-graphsync/impl/graphsync.go b/vendor/github.com/ipfs/go-graphsync/impl/graphsync.go
21a22
> "github.com/ipfs/go-graphsync/panics"
87a89
> panicCallback panics.CallBackFn
190a193,202
> // PanicCallback allows calling code to receive information about panics that
> // Graphsync recovers from. Graphsync recovers panics that occur during
> // per-request execution in order to keep the over all system running, although
> // they are still treated as standard errors in normal execution flow.
> func PanicCallback(callbackFn panics.CallBackFn) Option {
> return func(gs *graphsyncConfigOptions) {
> gs.panicCallback = callbackFn
> }
> }
>
204a217
> panicCallback: nil,
233c246
< requestManager := requestmanager.New(ctx, persistenceOptions, linkSystem, outgoingRequestHooks, incomingResponseHooks, networkErrorListeners, outgoingRequestProcessingListeners, requestQueue, network.ConnectionManager(), gsConfig.maxLinksPerOutgoingRequest)
---
> requestManager := requestmanager.New(ctx, persistenceOptions, linkSystem, outgoingRequestHooks, incomingResponseHooks, networkErrorListeners, outgoingRequestProcessingListeners, requestQueue, network.ConnectionManager(), gsConfig.maxLinksPerOutgoingRequest, gsConfig.panicCallback)
253a267
> gsConfig.panicCallback,
Only in a/vendor/github.com/ipfs/go-graphsync/ipldutil: ipldutil.go
diff -r --color a/vendor/github.com/ipfs/go-graphsync/ipldutil/traverser.go b/vendor/github.com/ipfs/go-graphsync/ipldutil/traverser.go
14a15,16
>
> "github.com/ipfs/go-graphsync/panics"
43,48c45,51
< Root ipld.Link
< Selector ipld.Node
< Visitor traversal.AdvVisitFn
< LinkSystem ipld.LinkSystem
< Chooser traversal.LinkTargetNodePrototypeChooser
< Budget *traversal.Budget
---
> Root ipld.Link
> Selector ipld.Node
> Visitor traversal.AdvVisitFn
> LinkSystem ipld.LinkSystem
> Chooser traversal.LinkTargetNodePrototypeChooser
> Budget *traversal.Budget
> PanicCallback panics.CallBackFn
93,100c96,104
< ctx: ctx,
< cancel: cancel,
< root: tb.Root,
< selector: tb.Selector,
< linkSystem: tb.LinkSystem,
< budget: tb.Budget,
< responses: make(chan nextResponse),
< stopped: make(chan struct{}),
---
> ctx: ctx,
> cancel: cancel,
> root: tb.Root,
> selector: tb.Selector,
> linkSystem: tb.LinkSystem,
> budget: tb.Budget,
> responses: make(chan nextResponse),
> stopped: make(chan struct{}),
> panicHandler: panics.MakeHandler(tb.PanicCallback),
129,137c133,142
< blocksCount int
< ctx context.Context
< cancel context.CancelFunc
< root ipld.Link
< selector ipld.Node
< visitor traversal.AdvVisitFn
< linkSystem ipld.LinkSystem
< chooser traversal.LinkTargetNodePrototypeChooser
< budget *traversal.Budget
---
> blocksCount int
> ctx context.Context
> cancel context.CancelFunc
> root ipld.Link
> selector ipld.Node
> visitor traversal.AdvVisitFn
> linkSystem ipld.LinkSystem
> chooser traversal.LinkTargetNodePrototypeChooser
> budget *traversal.Budget
> panicHandler panics.PanicHandler
199c204,210
< defer close(t.stopped)
---
> defer func() {
> // catch panics that occur in selector traversal, treat as an errored traversal
> if err := t.panicHandler(recover()); err != nil {
> t.writeDone(err)
> }
> close(t.stopped)
> }()
Only in a/vendor/github.com/ipfs/go-graphsync/message/ipldbind: util.go
diff -r --color a/vendor/github.com/ipfs/go-graphsync/message/message.go b/vendor/github.com/ipfs/go-graphsync/message/message.go
4d3
< "bytes"
50,52c49,50
< var buf bytes.Buffer
< dagjson.Encode(gsr.selector, &buf)
< sel = buf.String()
---
> byts, _ := ipld.Encode(gsr.selector, dagjson.Encode)
> sel = string(byts)
diff -r --color a/vendor/github.com/ipfs/go-graphsync/message/v1/message.go b/vendor/github.com/ipfs/go-graphsync/message/v1/message.go
11a12,13
> "github.com/ipld/go-ipld-prime"
> "github.com/ipld/go-ipld-prime/codec/dagcbor"
12a15
> "github.com/ipld/go-ipld-prime/node/basicnode"
20d22
< "github.com/ipfs/go-graphsync/ipldutil"
109c111
< selector, err = ipldutil.EncodeNode(request.Selector())
---
> selector, err = ipld.Encode(request.Selector(), dagcbor.Encode)
205c207
< selector, err := ipldutil.DecodeNode(req.Selector)
---
> selector, err := ipld.DecodeUsingPrototype(req.Selector, dagcbor.Decode, basicnode.Prototype.Any)
266c268
< byts, err := ipldutil.EncodeNode(data)
---
> byts, err := ipld.Encode(data, dagcbor.Encode)
278,282c280,281
< mdNode, err := metadata.EncodeMetadata(md)
< if err != nil {
< return nil, err
< }
< mdByts, err := ipldutil.EncodeNode(mdNode)
---
> mdNode := metadata.EncodeMetadata(md)
> mdByts, err := ipld.Encode(mdNode, dagcbor.Encode)
301c300
< node, err = ipldutil.DecodeNode(data)
---
> node, err = ipld.DecodeUsingPrototype(data, dagcbor.Decode, basicnode.Prototype.Any)
diff -r --color a/vendor/github.com/ipfs/go-graphsync/message/v1/metadata/metadata.go b/vendor/github.com/ipfs/go-graphsync/message/v1/metadata/metadata.go
5a6
> "github.com/ipld/go-ipld-prime/node/bindnode"
9d9
< "github.com/ipfs/go-graphsync/message/ipldbind"
48,51c48
< metadata, err := ipldbind.SafeUnwrap(builder.Build())
< if err != nil {
< return nil, err
< }
---
> metadata := bindnode.Unwrap(builder.Build())
56,57c53,54
< func EncodeMetadata(entries Metadata) (datamodel.Node, error) {
< return ipldbind.SafeWrap(&entries, Prototype.Metadata.Type())
---
> func EncodeMetadata(entries Metadata) datamodel.Node {
> return bindnode.Wrap(&entries, Prototype.Metadata.Type())
diff -r --color a/vendor/github.com/ipfs/go-graphsync/message/v2/message.go b/vendor/github.com/ipfs/go-graphsync/message/v2/message.go
10a11
> "github.com/ipld/go-ipld-prime"
12a14
> "github.com/ipld/go-ipld-prime/node/bindnode"
45,46c47
< builder := ipldbind.Prototype.Message.Representation().NewBuilder()
< err = dagcbor.Decode(builder, bytes.NewReader(msg))
---
> node, err := ipld.DecodeUsingPrototype(msg, dagcbor.Decode, ipldbind.Prototype.Message.Representation())
50,51c51
< node := builder.Build()
< ipldGSM, err := ipldbind.SafeUnwrap(node)
---
> ipldGSM := bindnode.Unwrap(node)
143,147c143,144
< node, err := ipldbind.SafeWrap(msg, ipldbind.Prototype.Message.Type())
< if err != nil {
< return err
< }
< err = dagcbor.Encode(node.Representation(), buf)
---
> node := bindnode.Wrap(msg, ipldbind.Prototype.Message.Type())
> err = ipld.EncodeStreaming(buf, node.Representation(), dagcbor.Encode)
diff -r --color a/vendor/github.com/ipfs/go-graphsync/network/libp2p_impl.go b/vendor/github.com/ipfs/go-graphsync/network/libp2p_impl.go
19a20
> "github.com/ipfs/go-graphsync/panics"
36a38,47
> // PanicCallback allows calling code to receive information about panics that
> // Graphsync recovers from. Graphsync recovers panics that occur during
> // message handling in order to keep the over all system running, although
> // they are still treated as standard errors in normal execution flow.
> func PanicCallback(callbackFn panics.CallBackFn) Option {
> return func(gsnet *libp2pGraphSyncNetwork) {
> gsnet.panicCallback = callbackFn
> }
> }
>
39,42d49
< messageHandlerSelector := messageHandlerSelector{
< v1MessageHandler: gsmsgv1.NewMessageHandler(),
< v2MessageHandler: gsmsgv2.NewMessageHandler(),
< }
44,46c51,52
< host: host,
< messageHandlerSelector: &messageHandlerSelector,
< protocols: []protocol.ID{ProtocolGraphsync_2_0_0, ProtocolGraphsync_1_0_0},
---
> host: host,
> protocols: []protocol.ID{ProtocolGraphsync_2_0_0, ProtocolGraphsync_1_0_0},
52a59,66
> graphSyncNetwork.panicHandler = panics.MakeHandler(graphSyncNetwork.panicCallback)
>
> graphSyncNetwork.messageHandlerSelector = &messageHandlerSelector{
> v1MessageHandler: gsmsgv1.NewMessageHandler(),
> v2MessageHandler: gsmsgv2.NewMessageHandler(),
> panicHandler: graphSyncNetwork.panicHandler,
> }
>
75a90,91
>
> panicHandler panics.PanicHandler
96a113,114
> panicCallback panics.CallBackFn
> panicHandler panics.PanicHandler
117c135,142
< func msgToStream(ctx context.Context, s network.Stream, mh *messageHandlerSelector, msg gsmsg.GraphSyncMessage, timeout time.Duration) error {
---
> func msgToStream(ctx context.Context, s network.Stream, mh *messageHandlerSelector, msg gsmsg.GraphSyncMessage, timeout time.Duration) (err error) {
> defer func() {
> if rerr := mh.panicHandler(recover()); rerr != nil {
> log.Warnf("recovered panic handling message: %s", err)
> err = rerr
> }
> }()
>
137c162,163
< return nil
---
>
> return err
188a215,216
> var p peer.ID
>
189a218,224
> defer func() {
> if rerr := gsnet.panicHandler(recover()); rerr != nil {
> log.Debugf("graphsync net handleNewStream recovered error from %s error: %s", s.Conn().RemotePeer(), rerr)
> _ = s.Reset()
> go gsnet.receiver.ReceiveError(p, rerr)
> }
> }()
197a233
> p = s.Conn().RemotePeer()
199d234
< p := s.Conn().RemotePeer()
Only in b/vendor/github.com/ipfs/go-graphsync: panics
diff -r --color a/vendor/github.com/ipfs/go-graphsync/persistenceoptions/persistenceoptions.go b/vendor/github.com/ipfs/go-graphsync/persistenceoptions/persistenceoptions.go
29c29
< return errors.New("persistence option alreayd registered")
---
> return errors.New("persistence option already registered")
diff -r --color a/vendor/github.com/ipfs/go-graphsync/requestmanager/client.go b/vendor/github.com/ipfs/go-graphsync/requestmanager/client.go
27a28
> "github.com/ipfs/go-graphsync/panics"
91a93
> panicCallback panics.CallBackFn
126a129
> panicCallback panics.CallBackFn,
144a148
> panicCallback: panicCallback,
176a181,186
> requestID := graphsync.NewRequestID()
> idFromContext := ctx.Value(graphsync.RequestIDContextKey{})
> if existingRequestID, ok := idFromContext.(graphsync.RequestID); ok {
> requestID = existingRequestID
> }
>
179c189
< rm.send(&newRequestMessage{span, p, root, selectorNode, extensions, inProgressRequestChan}, ctx.Done())
---
> rm.send(&newRequestMessage{requestID, span, p, root, selectorNode, extensions, inProgressRequestChan}, ctx.Done())
diff -r --color a/vendor/github.com/ipfs/go-graphsync/requestmanager/messages.go b/vendor/github.com/ipfs/go-graphsync/requestmanager/messages.go
106a107
> requestID graphsync.RequestID
118c119
< ipr.request, ipr.incoming, ipr.incomingError = rm.newRequest(nrm.span, nrm.p, nrm.root, nrm.selector, nrm.extensions)
---
> ipr.request, ipr.incoming, ipr.incomingError = rm.newRequest(nrm.requestID, nrm.span, nrm.p, nrm.root, nrm.selector, nrm.extensions)
diff -r --color a/vendor/github.com/ipfs/go-graphsync/requestmanager/server.go b/vendor/github.com/ipfs/go-graphsync/requestmanager/server.go
16a17
> "github.com/ipld/go-ipld-prime/codec/dagcbor"
63,64c64
< func (rm *RequestManager) newRequest(parentSpan trace.Span, p peer.ID, root ipld.Link, selector ipld.Node, extensions []graphsync.ExtensionData) (gsmsg.GraphSyncRequest, chan graphsync.ResponseProgress, chan error) {
< requestID := graphsync.NewRequestID()
---
> func (rm *RequestManager) newRequest(requestID graphsync.RequestID, parentSpan trace.Span, p peer.ID, root ipld.Link, selector ipld.Node, extensions []graphsync.ExtensionData) (gsmsg.GraphSyncRequest, chan graphsync.ResponseProgress, chan error) {
160,162c160,163
< Chooser: ipr.nodeStyleChooser,
< LinkSystem: rm.linkSystem,
< Budget: budget,
---
> Chooser: ipr.nodeStyleChooser,
> LinkSystem: rm.linkSystem,
> Budget: budget,
> PanicCallback: rm.panicCallback,
374c375
< _, err := ipldutil.EncodeNode(selectorSpec)
---
> _, err := selector.ParseSelector(selectorSpec)
378c379
< _, err = selector.ParseSelector(selectorSpec)
---
> _, err = ipld.Encode(selectorSpec, dagcbor.Encode)
diff -r --color a/vendor/github.com/ipfs/go-graphsync/responsemanager/client.go b/vendor/github.com/ipfs/go-graphsync/responsemanager/client.go
18a19
> "github.com/ipfs/go-graphsync/panics"
109a111
> panicCallback panics.CallBackFn
125a128
> panicCallback panics.CallBackFn,
146a150
> panicCallback: panicCallback,
diff -r --color a/vendor/github.com/ipfs/go-graphsync/responsemanager/querypreparer.go b/vendor/github.com/ipfs/go-graphsync/responsemanager/querypreparer.go
21a22
> "github.com/ipfs/go-graphsync/panics"
38a40
> panicCallback panics.CallBackFn
90,94c92,97
< Root: rootLink,
< Selector: request.Selector(),
< LinkSystem: linkSystem,
< Chooser: result.CustomChooser,
< Budget: budget,
---
> Root: rootLink,
> Selector: request.Selector(),
> LinkSystem: linkSystem,
> Chooser: result.CustomChooser,
> Budget: budget,
> PanicCallback: qe.panicCallback,
diff -r --color a/vendor/github.com/ipfs/go-graphsync/responsemanager/server.go b/vendor/github.com/ipfs/go-graphsync/responsemanager/server.go
268c268
< loader, traverser, isPaused, err := (&queryPreparer{rm.requestHooks, rm.linkSystem, rm.maxLinksPerRequest}).prepareQuery(response.ctx, response.peer, response.request, response.responseStream, response.signals)
---
> loader, traverser, isPaused, err := (&queryPreparer{rm.requestHooks, rm.linkSystem, rm.maxLinksPerRequest, rm.panicCallback}).prepareQuery(response.ctx, response.peer, response.request, response.responseStream, response.signals)
diff -r --color a/vendor/github.com/ipfs/go-graphsync/version.json b/vendor/github.com/ipfs/go-graphsync/version.json
2c2
< "version": "v0.13.1"
---
> "version": "v0.13.2"
diff -r --color a/vendor/github.com/ipfs/go-ipns/errors.go b/vendor/github.com/ipfs/go-ipns/errors.go
37a38,44
>
> // 10 KiB limit defined in https://github.com/ipfs/specs/pull/319
> const MaxRecordSize int = 10 << (10 * 1)
>
> // ErrRecordSize should be returned when an ipns record is
> // invalid due to being too big
> var ErrRecordSize = errors.New("record exceeds allowed size limit")
diff -r --color a/vendor/github.com/ipfs/go-ipns/ipns.go b/vendor/github.com/ipfs/go-ipns/ipns.go
55a56,59
> // For now we still create V1 signatures. These are deprecated, and not
> // used during verification anymore (Validate func requires SignatureV2),
> // but setting it here allows legacy nodes (e.g., go-ipfs < v0.9.0) to
> // still resolve IPNS published by modern nodes.
130a135,139
> // Make sure max size is respected
> if entry.Size() > MaxRecordSize {
> return ErrRecordSize
> }
>
131a141,144
> if entry.GetSignatureV2() == nil {
> // always error if no valid signature could be found
> return ErrSignature
> }
133,141c146,152
< // Check v2 signature if it's available, otherwise use the v1 signature
< if entry.GetSignatureV2() != nil {
< sig2Data, err := ipnsEntryDataForSigV2(entry)
< if err != nil {
< return fmt.Errorf("could not compute signature data: %w", err)
< }
< if ok, err := pk.Verify(sig2Data, entry.GetSignatureV2()); err != nil || !ok {
< return ErrSignature
< }
---
> sig2Data, err := ipnsEntryDataForSigV2(entry)
> if err != nil {
> return fmt.Errorf("could not compute signature data: %w", err)
> }
> if ok, err := pk.Verify(sig2Data, entry.GetSignatureV2()); err != nil || !ok {
> return ErrSignature
> }
143,152c154,159
< // TODO: If we switch from pb.IpnsEntry to a more generic IpnsRecord type then perhaps we should only check
< // this if there is no v1 signature. In the meanwhile this helps avoid some potential rough edges around people
< // checking the entry fields instead of doing CBOR decoding everywhere.
< if err := validateCborDataMatchesPbData(entry); err != nil {
< return err
< }
< } else {
< if ok, err := pk.Verify(ipnsEntryDataForSigV1(entry), entry.GetSignatureV1()); err != nil || !ok {
< return ErrSignature
< }
---
> // TODO: If we switch from pb.IpnsEntry to a more generic IpnsRecord type then perhaps we should only check
> // this if there is no v1 signature. In the meanwhile this helps avoid some potential rough edges around people
> // checking the entry fields instead of doing CBOR decoding everywhere.
> // See https://github.com/ipfs/go-ipns/pull/42 for next steps here
> if err := validateCborDataMatchesPbData(entry); err != nil {
> return err
diff -r --color a/vendor/github.com/ipfs/go-ipns/README.md b/vendor/github.com/ipfs/go-ipns/README.md
3,5c3,4
< [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://protocol.ai)
< [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/)
< [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs)
---
> [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
> [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](https://ipfs.tech/)
11c10
< This package contains all of the components necessary to create, understand, and validate IPNS records. It does *not* publish or resolve those records. [`go-ipfs`](https://github.com/ipfs/go-ipfs) uses this package internally to manipulate records.
---
> This package contains all of the components necessary to create, understand, and validate IPNS records. It does *not* publish or resolve those records. [Kubo](https://github.com/ipfs/kubo) uses this package internally to manipulate records.
diff -r --color a/vendor/github.com/ipfs/go-ipns/version.json b/vendor/github.com/ipfs/go-ipns/version.json
2c2
< "version": "v0.2.0"
---
> "version": "v0.3.0"
diff -r --color a/vendor/github.com/ipfs/go-merkledag/coding.go b/vendor/github.com/ipfs/go-merkledag/coding.go
25a26,33
> // pbLinkSlice is a slice of pb.PBLink, similar to LinkSlice but for sorting the
> // PB form
> type pbLinkSlice []*pb.PBLink
>
> func (pbls pbLinkSlice) Len() int { return len(pbls) }
> func (pbls pbLinkSlice) Swap(a, b int) { pbls[a], pbls[b] = pbls[b], pbls[a] }
> func (pbls pbLinkSlice) Less(a, b int) bool { return *pbls[a].Name < *pbls[b].Name }
>
43a52,54
> // links may not be sorted after deserialization, but we don't change
> // them until we mutate this node since we're representing the current,
> // as-serialized state
52,53c63
< c := cid.Undef
< c = next.FieldHash().Link().(cidlink.Link).Cid
---
> c := next.FieldHash().Link().(cidlink.Link).Cid
63a74,75
> // we don't set n.linksDirty because the order of the links list from
> // serialized form needs to be stable, until we start mutating the ProtoNode
66a79
> links := n.Links()
68,71c81,87
< qp.MapEntry(ma, "Links", qp.List(int64(len(n.links)), func(la ipld.ListAssembler) {
< for _, link := range n.links {
< qp.ListEntry(la, qp.Map(3, func(ma ipld.MapAssembler) {
< if link.Cid.Defined() {
---
> qp.MapEntry(ma, "Links", qp.List(int64(len(links)), func(la ipld.ListAssembler) {
> for _, link := range links {
> // it shouldn't be possible to get here with an undefined CID, but in
> // case it is we're going to drop this link from the encoded form
> // entirely
> if link.Cid.Defined() {
> qp.ListEntry(la, qp.Map(3, func(ma ipld.MapAssembler) {
73,76c89,96
< }
< qp.MapEntry(ma, "Name", qp.String(link.Name))
< qp.MapEntry(ma, "Tsize", qp.Int(int64(link.Size)))
< }))
---
> qp.MapEntry(ma, "Name", qp.String(link.Name))
> sz := int64(link.Size)
> if sz < 0 { // overflow, >MaxInt64 is almost certainly an error
> sz = 0
> }
> qp.MapEntry(ma, "Tsize", qp.Int(sz))
> }))
> }
117d136
< sort.Stable(LinkSlice(n.links)) // keep links sorted
126a146,150
> // Ensure links are sorted prior to encode, regardless of `linksDirty`. They
> // may not have come sorted if we deserialized a badly encoded form that
> // didn't have links already sorted.
> sort.Stable(pbLinkSlice(pbn.Links))
>
136,137c160,166
< sort.Stable(LinkSlice(n.links)) // keep links sorted
< if n.encoded == nil || force {
---
> if n.encoded == nil || n.linksDirty || force {
> if n.linksDirty {
> // there was a mutation involving links, make sure we sort before we build
> // and cache a `Node` form that captures the current state
> sort.Stable(LinkSlice(n.links))
> n.linksDirty = false
> }
diff -r --color a/vendor/github.com/ipfs/go-merkledag/merkledag.go b/vendor/github.com/ipfs/go-merkledag/merkledag.go
49c49,50
< // able to free some of them when vm pressure is high
---
> //
> // able to free some of them when vm pressure is high
diff -r --color a/vendor/github.com/ipfs/go-merkledag/node.go b/vendor/github.com/ipfs/go-merkledag/node.go
5a6
> "errors"
6a8,9
> "math"
> "sort"
14a18
> mhcore "github.com/multiformats/go-multihash/core"
44,45c48,50
< links []*format.Link
< data []byte
---
> links []*format.Link
> linksDirty bool
> data []byte
47c52,53
< // cache encoded/marshaled value
---
> // cache encoded/marshaled value, kept to make the go-ipld-prime Node interface
> // work (see prime.go), and to provide a cached []byte encoded form available
96,97c102,104
< // is reset to the default value
< func (n *ProtoNode) SetCidBuilder(builder cid.Builder) {
---
> // is reset to the default value. An error will be returned if the builder
> // is not usable.
> func (n *ProtoNode) SetCidBuilder(builder cid.Builder) error {
100,102c107,117
< } else {
< n.builder = builder.WithCodec(cid.DagProtobuf)
< n.cached = cid.Undef
---
> return nil
> }
> if p, ok := builder.(*cid.Prefix); ok {
> mhLen := p.MhLength
> if mhLen <= 0 {
> mhLen = -1
> }
> _, err := mhcore.GetVariableHasher(p.MhType, mhLen)
> if err != nil {
> return err
> }
103a119,121
> n.builder = builder.WithCodec(cid.DagProtobuf)
> n.cached = cid.Undef
> return nil
118c136,144
< // AddNodeLink adds a link to another node.
---
> // AddNodeLink adds a link to another node. The link will be added in
> // sorted order.
> //
> // If sorting has not already been applied to this node (because
> // it was deserialized from a form that did not have sorted links), the links
> // list will be sorted. If a ProtoNode was deserialized from a badly encoded
> // form that did not already have its links sorted, calling AddNodeLink and then
> // RemoveNodeLink for the same link, will not result in an identically encoded
> // form as the links will have been sorted.
132c158,166
< // AddRawLink adds a copy of a link to this node
---
> // AddRawLink adds a copy of a link to this node. The link will be added in
> // sorted order.
> //
> // If sorting has not already been applied to this node (because
> // it was deserialized from a form that did not have sorted links), the links
> // list will be sorted. If a ProtoNode was deserialized from a badly encoded
> // form that did not already have its links sorted, calling AddRawLink and then
> // RemoveNodeLink for the same link, will not result in an identically encoded
> // form as the links will have been sorted.
134,135c168
< n.encoded = nil
< n.links = append(n.links, &format.Link{
---
> lnk := &format.Link{
139,140c172,178
< })
<
---
> }
> if err := checkLink(lnk); err != nil {
> return err
> }
> n.links = append(n.links, lnk)
> n.linksDirty = true // needs a sort
> n.encoded = nil
144c182,184
< // RemoveNodeLink removes a link on this node by the given name.
---
> // RemoveNodeLink removes a link on this node by the given name. If there are
> // no links with this name, ErrLinkNotFound will be returned. If there are more
> // than one link with this name, they will all be removed.
146,147d185
< n.encoded = nil
<
163a202,206
> // Even though a removal won't change sorting, this node may have come from
> // a deserialized state with badly sorted links. Now that we are mutating,
> // we need to ensure the resulting link list is sorted when it gets consumed.
> n.linksDirty = true
> n.encoded = nil
207,208c250,253
< // Copy returns a copy of the node.
< // NOTE: Does not make copies of Node objects in the links.
---
> // Copy returns a copy of the node. The resulting node will have a properly
> // sorted Links list regardless of whether the original came from a badly
> // serialized form that didn't have a sorted list.
> // NOTE: This does not make copies of Node objects in the links.
217,218c262,266
< nnode.links = make([]*format.Link, len(n.links))
< copy(nnode.links, n.links)
---
> nnode.links = append([]*format.Link(nil), n.links...)
> // Sort links regardless of linksDirty state, this may have come from a
> // serialized form that had badly sorted links, in which case linksDirty
> // will not be true.
> sort.Stable(LinkSlice(nnode.links))
225a274,278
> // RawData returns the encoded byte form of this node.
> //
> // Note that this method can panic if a new encode is required and there is an
> // error performing the encode. To avoid a panic, use node.EncodeProtobuf(false)
> // instead (or prior to calling RawData) and check for its returned error value.
247c300,305
< // that. If a link of the same name existed, it is removed.
---
> // that. The link will be added in sorted order. If a link of the same name
> // existed, it is removed.
> //
> // If sorting has not already been applied to this node (because
> // it was deserialized from a form that did not have sorted links), the links
> // list will be sorted in the returned copy.
311a370,372
> // Links may not be sorted after deserialization, but we don't change
> // them until we mutate this node since we're representing the current,
> // as-serialized state. So n.linksDirty is not set here.
312a374,390
> for _, lnk := range s.Links {
> if err := checkLink(lnk); err != nil {
> return err
> }
> }
>
> n.encoded = nil
> return nil
> }
>
> func checkLink(lnk *format.Link) error {
> if lnk.Size > math.MaxInt64 {
> return fmt.Errorf("value of Tsize is too large: %d", lnk.Size)
> }
> if !lnk.Cid.Defined() {
> return errors.New("link must have a value Cid value")
> }
317a396,402
> if n.linksDirty {
> // there was a mutation involving links, make sure we sort
> sort.Stable(LinkSlice(n.links))
> n.linksDirty = false
> n.encoded = nil
> }
>
327a413,417
> //
> // Note that this method can panic if a new encode is required and there is an
> // error performing the encode. To avoid a panic, call
> // node.EncodeProtobuf(false) prior to calling Cid and check for its returned
> // error value.
329,336c419,420
< if n.encoded != nil && n.cached.Defined() {
< return n.cached
< }
<
< c, err := n.CidBuilder().Sum(n.RawData())
< if err != nil {
< // programmer error
< err = fmt.Errorf("invalid CID of length %d: %x: %v", len(n.RawData()), n.RawData(), err)
---
> // re-encode if necessary and we'll get a new cached CID
> if _, err := n.EncodeProtobuf(false); err != nil {
339,341c423
<
< n.cached = c
< return c
---
> return n.cached
344a427,431
> //
> // Note that this method can panic if a new encode is required and there is an
> // error performing the encode. To avoid a panic, call
> // node.EncodeProtobuf(false) prior to calling String and check for its returned
> // error value.
349a437,441
> //
> // Note that this method can panic if a new encode is required and there is an
> // error performing the encode. To avoid a panic, call
> // node.EncodeProtobuf(false) prior to calling Multihash and check for its
> // returned error value.
351,358c443
< // NOTE: EncodeProtobuf generates the hash and puts it in n.cached.
< _, err := n.EncodeProtobuf(false)
< if err != nil {
< // Note: no possibility exists for an error to be returned through here
< panic(err)
< }
<
< return n.cached.Hash()
---
> return n.Cid().Hash()
361c446
< // Links returns the node links.
---
> // Links returns a copy of the node's links.
363c448,454
< return n.links
---
> if n.linksDirty {
> // there was a mutation involving links, make sure we sort
> sort.Stable(LinkSlice(n.links))
> n.linksDirty = false
> n.encoded = nil
> }
> return append([]*format.Link(nil), n.links...)
366,368c457,468
< // SetLinks replaces the node links with the given ones.
< func (n *ProtoNode) SetLinks(links []*format.Link) {
< n.links = links
---
> // SetLinks replaces the node links with a copy of the provided links. Sorting
> // will be applied to the list.
> func (n *ProtoNode) SetLinks(links []*format.Link) error {
> for _, lnk := range links {
> if err := checkLink(lnk); err != nil {
> return err
> }
> }
> n.links = append([]*format.Link(nil), links...)
> n.linksDirty = true // needs a sort
> n.encoded = nil
> return nil
397a498,504
> }
>
> if n.linksDirty {
> // there was a mutation involving links, make sure we sort
> sort.Stable(LinkSlice(n.links))
> n.linksDirty = false
> n.encoded = nil
diff -r --color a/vendor/github.com/ipfs/go-merkledag/version.json b/vendor/github.com/ipfs/go-merkledag/version.json
2c2
< "version": "v0.6.0"
---
> "version": "v0.8.0"
diff -r --color a/vendor/github.com/ipfs/go-peertaskqueue/peertask/peertask.go b/vendor/github.com/ipfs/go-peertaskqueue/peertask/peertask.go
7c7
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> "github.com/libp2p/go-libp2p/core/peer"
diff -r --color a/vendor/github.com/ipfs/go-peertaskqueue/peertaskqueue.go b/vendor/github.com/ipfs/go-peertaskqueue/peertaskqueue.go
9c9
< "github.com/libp2p/go-libp2p-core/peer"
---
> "github.com/libp2p/go-libp2p/core/peer"
230,233c230,234
< // - Peers with the most "active" work are deprioritized.
< // This heuristic is for fairness, we try to keep all peers "busy".
< // - Peers with the most "pending" work are prioritized.
< // This heuristic is so that peers with a lot to do get asked for work first.
---
> // - Peers with the most "active" work are deprioritized.
> // This heuristic is for fairness, we try to keep all peers "busy".
> // - Peers with the most "pending" work are prioritized.
> // This heuristic is so that peers with a lot to do get asked for work first.
> //
diff -r --color a/vendor/github.com/ipfs/go-peertaskqueue/peertracker/peertracker.go b/vendor/github.com/ipfs/go-peertaskqueue/peertracker/peertracker.go
9c9
< peer "github.com/libp2p/go-libp2p-core/peer"
---
> "github.com/libp2p/go-libp2p/core/peer"
Only in b/vendor/github.com/ipfs/go-peertaskqueue: version.json
diff -r --color a/vendor/github.com/ipfs/go-unixfs/hamt/hamt.go b/vendor/github.com/ipfs/go-unixfs/hamt/hamt.go
231a232,251
> // Set sets 'name' = nd in the HAMT, using directly the information in the
> // given link. This avoids writing the given node, then reading it to making a
> // link out of it.
> func (ds *Shard) SetLink(ctx context.Context, name string, lnk *ipld.Link) error {
> hv := newHashBits(name)
>
> newLink := ipld.Link{
> Name: lnk.Name,
> Size: lnk.Size,
> Cid: lnk.Cid,
> }
>
> // FIXME: We don't need to set the name here, it will get overwritten.
> // This is confusing, confirm and remove this line.
> newLink.Name = ds.linkNamePrefix(0) + name
>
> _, err := ds.swapValue(ctx, hv, name, &newLink)
> return err
> }
>
diff -r --color a/vendor/github.com/ipfs/go-unixfs/io/directory.go b/vendor/github.com/ipfs/go-unixfs/io/directory.go
337,342c337
< node, err := d.dserv.Get(ctx, lnk.Cid)
< if err != nil {
< return nil, err
< }
<
< err = hamtDir.shard.Set(ctx, lnk.Name, node)
---
> err = hamtDir.shard.SetLink(ctx, lnk.Name, lnk)
diff -r --color a/vendor/github.com/ipfs/go-unixfs/version.json b/vendor/github.com/ipfs/go-unixfs/version.json
2c2
< "version": "v0.3.1"
---
> "version": "v0.4.0"
Only in b/vendor/github.com/ipfs/go-verifcid: COPYRIGHT
Only in b/vendor/github.com/ipfs/go-verifcid: LICENSE-APACHE
Only in b/vendor/github.com/ipfs/go-verifcid: LICENSE-MIT
Only in a/vendor/github.com/ipfs/go-verifcid: .travis.yml
diff -r --color a/vendor/github.com/ipfs/go-verifcid/validate.go b/vendor/github.com/ipfs/go-verifcid/validate.go
5d4
<
11c10,11
< var ErrBelowMinimumHashLength = fmt.Errorf("hashes must be at %d least bytes long", minimumHashLength)
---
> var ErrBelowMinimumHashLength = fmt.Errorf("hashes must be at least %d bytes long", minimumHashLength)
> var ErrAboveMaximumHashLength = fmt.Errorf("hashes must be at most %d bytes long", maximumHashLength)
13a14
> const maximumHashLength = 128
28c29,30
< mh.ID: true,
---
> mh.BLAKE3: true,
> mh.IDENTITY: true,
57c59
< if pref.MhType != mh.ID && pref.MhLength < minimumHashLength {
---
> if pref.MhType != mh.IDENTITY && pref.MhLength < minimumHashLength {
58a61,64
> }
>
> if pref.MhType != mh.IDENTITY && pref.MhLength > maximumHashLength {
> return ErrAboveMaximumHashLength
Only in b/vendor/github.com/ipfs/go-verifcid: version.json
diff -r --color a/vendor/github.com/ipld/go-car/v2/blockstore/doc.go b/vendor/github.com/ipld/go-car/v2/blockstore/doc.go
6,9c6,9
< // * ReadOnly.NewReadOnly can be used to instantiate a new read-only blockstore for a given CARv1
< // or CARv2 data payload with an optional index override.
< // * ReadOnly.OpenReadOnly can be used to instantiate a new read-only blockstore for a given CARv1
< // or CARv2 file with automatic index generation if the index is not present.
---
> // - ReadOnly.NewReadOnly can be used to instantiate a new read-only blockstore for a given CARv1
> // or CARv2 data payload with an optional index override.
> // - ReadOnly.OpenReadOnly can be used to instantiate a new read-only blockstore for a given CARv1
> // or CARv2 file with automatic index generation if the index is not present.
diff -r --color a/vendor/github.com/ipld/go-car/v2/blockstore/readonly.go b/vendor/github.com/ipld/go-car/v2/blockstore/readonly.go
220c220,226
< // This function always returns true for any given key with multihash.IDENTITY code.
---
> // This function always returns true for any given key with multihash.IDENTITY
> // code unless the StoreIdentityCIDs option is on, in which case it will defer
> // to the index to check for the existence of the block; the index may or may
> // not contain identity CIDs included in this CAR, depending on whether
> // StoreIdentityCIDs was on when the index was created. If the CAR is a CARv1
> // and StoreIdentityCIDs is on, then the index will contain identity CIDs and
> // this will always return true.
222,227c228,236
< // Check if the given CID has multihash.IDENTITY code
< // Note, we do this without locking, since there is no shared information to lock for in order to perform the check.
< if _, ok, err := isIdentity(key); err != nil {
< return false, err
< } else if ok {
< return true, nil
---
> if !b.opts.StoreIdentityCIDs {
> // If we don't store identity CIDs then we can return them straight away as if they are here,
> // otherwise we need to check for their existence.
> // Note, we do this without locking, since there is no shared information to lock for in order to perform the check.
> if _, ok, err := isIdentity(key); err != nil {
> return false, err
> } else if ok {
> return true, nil
> }
272c281,287
< // This API will always return true if the given key has multihash.IDENTITY code.
---
> // This function always returns the block for any given key with
> // multihash.IDENTITY code unless the StoreIdentityCIDs option is on, in which
> // case it will defer to the index to check for the existence of the block; the
> // index may or may not contain identity CIDs included in this CAR, depending on
> // whether StoreIdentityCIDs was on when the index was created. If the CAR is a
> // CARv1 and StoreIdentityCIDs is on, then the index will contain identity CIDs
> // and this will always return true.
274,279c289,297
< // Check if the given CID has multihash.IDENTITY code
< // Note, we do this without locking, since there is no shared information to lock for in order to perform the check.
< if digest, ok, err := isIdentity(key); err != nil {
< return nil, err
< } else if ok {
< return blocks.NewBlockWithCid(digest, key)
---
> if !b.opts.StoreIdentityCIDs {
> // If we don't store identity CIDs then we can return them straight away as if they are here,
> // otherwise we need to check for their existence.
> // Note, we do this without locking, since there is no shared information to lock for in order to perform the check.
> if digest, ok, err := isIdentity(key); err != nil {
> return nil, err
> } else if ok {
> return blocks.NewBlockWithCid(digest, key)
> }
diff -r --color a/vendor/github.com/ipld/go-car/v2/blockstore/readwrite.go b/vendor/github.com/ipld/go-car/v2/blockstore/readwrite.go
12a13,14
> "github.com/multiformats/go-varint"
>
18d19
< "github.com/multiformats/go-varint"
105a107,118
> rwbs, err := OpenReadWriteFile(f, roots, opts...)
> if err != nil {
> return nil, err
> }
> // close the file when finalizing
> rwbs.ronly.carv2Closer = rwbs.f
> return rwbs, nil
> }
>
> // OpenReadWriteFile is similar as OpenReadWrite but lets you control the file lifecycle.
> // You are responsible for closing the given file.
> func OpenReadWriteFile(f *os.File, roots []cid.Cid, opts ...carv2.Option) (*ReadWrite, error) {
148d160
< rwbs.ronly.carv2Closer = rwbs.f
diff -r --color a/vendor/github.com/ipld/go-car/v2/index/doc.go b/vendor/github.com/ipld/go-car/v2/index/doc.go
6d5
< //
diff -r --color a/vendor/github.com/ipld/go-car/v2/index_gen.go b/vendor/github.com/ipld/go-car/v2/index_gen.go
36a37,40
> // If the StoreIdentityCIDs option is set when calling LoadIndex, identity
> // CIDs will be included in the index. By default this option is off, and
> // identity CIDs will not be included in the index.
> //
diff -r --color a/vendor/github.com/ipld/go-car/v2/internal/carv1/car.go b/vendor/github.com/ipld/go-car/v2/internal/carv1/car.go
232,233c232,234
< // 1. They have the same version number, and
< // 2. They contain the same root CIDs in any order.
---
> // 1. They have the same version number, and
> // 2. They contain the same root CIDs in any order.
> //
diff -r --color a/vendor/github.com/ipld/go-car/v2/internal/io/converter.go b/vendor/github.com/ipld/go-car/v2/internal/io/converter.go
5d4
< "io/ioutil"
100c99
< _, err := io.CopyN(ioutil.Discard, drsb, n)
---
> _, err := io.CopyN(io.Discard, drsb, n)
103c102
< _, err := io.CopyN(ioutil.Discard, drsb, offset)
---
> _, err := io.CopyN(io.Discard, drsb, offset)
diff -r --color a/vendor/github.com/ipld/go-car/v2/internal/loader/writing_loader.go b/vendor/github.com/ipld/go-car/v2/internal/loader/writing_loader.go
90c90,92
< // included in the `.Size()` of the IndexTracker.
---
> //
> // included in the `.Size()` of the IndexTracker.
> //
diff -r --color a/vendor/github.com/ipld/go-car/v2/options.go b/vendor/github.com/ipld/go-car/v2/options.go
130,131c130,138
< // When writing CAR files with this option,
< // Characteristics.IsFullyIndexed will be set.
---
> // When writing CAR files with this option, Characteristics.IsFullyIndexed will
> // be set.
> //
> // By default, the blockstore interface will always return true for Has() called
> // with identity CIDs, but when this option is turned on, it will defer to the
> // index.
> //
> // When creating an index (or loading a CARv1 as a blockstore), when this option
> // is on, identity CIDs will be included in the index.
diff -r --color a/vendor/github.com/ipld/go-car/v2/reader.go b/vendor/github.com/ipld/go-car/v2/reader.go
173,175c173,175
< // * Bad indexes, including incorrect offsets, duplicate entries, or other
< // faulty data. Indexes should be re-generated, regardless, if you need to use
< // them and have any reason to not trust the source.
---
> // - Bad indexes, including incorrect offsets, duplicate entries, or other
> // faulty data. Indexes should be re-generated, regardless, if you need to use
> // them and have any reason to not trust the source.
177,179c177,179
< // * Blocks use codecs that your system doesn't have access to—which may mean
< // you can't traverse a DAG or use the contained data. Stats.CodecCounts
< // contains a list of codecs found in the CAR so this can be checked.
---
> // - Blocks use codecs that your system doesn't have access to—which may mean
> // you can't traverse a DAG or use the contained data. Stats.CodecCounts
> // contains a list of codecs found in the CAR so this can be checked.
181,184c181,184
< // * CIDs use multihashes that your system doesn't have access to—which will
< // mean you can't validate block hashes are correct (using validateBlockHash
< // in this case will result in a failure). Stats.MhTypeCounts contains a
< // list of multihashes found in the CAR so this can be checked.
---
> // - CIDs use multihashes that your system doesn't have access to—which will
> // mean you can't validate block hashes are correct (using validateBlockHash
> // in this case will result in a failure). Stats.MhTypeCounts contains a
> // list of multihashes found in the CAR so this can be checked.
186,188c186,188
< // * The presence of IDENTITY CIDs, which may not be supported (or desired) by
< // the consumer of the CAR. Stats.CodecCounts can determine the presence
< // of IDENTITY CIDs.
---
> // - The presence of IDENTITY CIDs, which may not be supported (or desired) by
> // the consumer of the CAR. Stats.CodecCounts can determine the presence
> // of IDENTITY CIDs.
190,192c190,192
< // * Roots: the number of roots, duplicates, and whether they are related to the
< // blocks contained within the CAR. Stats contains a list of Roots and a
< // RootsPresent bool so further checks can be performed.
---
> // - Roots: the number of roots, duplicates, and whether they are related to the
> // blocks contained within the CAR. Stats contains a list of Roots and a
> // RootsPresent bool so further checks can be performed.
194,195c194,195
< // * DAG completeness is not checked. Any properties relating to the DAG, or
< // DAGs contained within a CAR are the responsibility of the user to check.
---
> // - DAG completeness is not checked. Any properties relating to the DAG, or
> // DAGs contained within a CAR are the responsibility of the user to check.
342c342
< idx, err := index.ReadFrom(idxr)
---
> stats.IndexCodec, err = index.ReadCodec(idxr)
346d345
< stats.IndexCodec = idx.Codec()
diff -r --color a/vendor/github.com/ipld/go-car/v2/selective.go b/vendor/github.com/ipld/go-car/v2/selective.go
7d6
< "io/ioutil"
279c278
< _, err = io.Copy(ioutil.Discard, s)
---
> _, err = io.Copy(io.Discard, s)
diff -r --color a/vendor/github.com/ipld/go-codec-dagpb/ipldsch_types.go b/vendor/github.com/ipld/go-codec-dagpb/ipldsch_types.go
14c14
< // dagpb.Type.YourTypeName.NewBuilder().BeginMap() //...
---
> // dagpb.Type.YourTypeName.NewBuilder().BeginMap() //...
18,19c18
< // dagpb.Type.OtherTypeName.NewBuilder().AssignString("x") // ...
< //
---
> // dagpb.Type.OtherTypeName.NewBuilder().AssignString("x") // ...
diff -r --color a/vendor/github.com/ipld/go-codec-dagpb/marshal.go b/vendor/github.com/ipld/go-codec-dagpb/marshal.go
78,80d77
< if err != nil {
< return enc, err
< }
diff -r --color a/vendor/github.com/ipld/go-codec-dagpb/unmarshal.go b/vendor/github.com/ipld/go-codec-dagpb/unmarshal.go
6d5
< "io/ioutil"
30c29
< src, err = ioutil.ReadAll(in)
---
> src, err = io.ReadAll(in)
Only in b/vendor/github.com/ipld/go-codec-dagpb: version.json
Only in b/vendor/github.com/ipld: go-ipld-adl-hamt
diff -r --color a/vendor/github.com/ipld/go-ipld-prime/CHANGELOG.md b/vendor/github.com/ipld/go-ipld-prime/CHANGELOG.md
22,24c22,23
< - **Bindnode improvements**: Candidates for merge and release include:
< - Custom Go type to IPLD kind conversion `Option`s: https://github.com/ipld/go-ipld-prime/pull/414
< - Type registry for easier local maintenance of type to `Type` & `TypedPrototype` mappings and utilities for dealing with them: https://github.com/ipld/go-ipld-prime/pull/437
---
> - **IPLD Amend**: is likely to land soon; it implements a more efficient underlying architecture to support IPLD Patch and related features. IPLD Amend adds an interface to allow incremental changes to `Node`s in an efficient way. Whereas IPLD Patch is a protocol for expressing changes. We're still working on figuring out exactly where it fits in the stack and making sure it won't be disruptive but early benchmarks are very promising for both Patch and traversal-based transforms. See https://github.com/ipld/go-ipld-prime/pull/445 for more.
> - **Layered `Node` implementation optimizations**: When layering different implementations of `Node` builders or consumers, having to defer through basicnode types can lead to large inefficiencies of memory and speed. We are looking at ways to improve this situation, including ways to *assemble* layered assemblers. See https://github.com/ipld/go-ipld-prime/issues/443 for discussion and some initial plans.
29a29,46
>
> ### v0.18.0
>
> _2022 August 01_
>
> go-ipld-prime's release policy says that:
>
> > even numbers should be easy upgrades; odd numbers may change things
>
> So, as an even number, this v0.18.0 release should be a smooth ride for upgraders from v0.17.0. We have 3 major feature additions, all focused on [Bindnode](https://pkg.go.dev/github.com/ipld/go-ipld-prime/node/bindnode).
>
> #### 🔦 Highlights
>
> * **Bindnode**: [Custom Go type converters](https://github.com/ipld/go-ipld-prime/pull/414) - Bindnode performs bidirectional mapping of Go types to the IPLD Data Model, and in doing so, it assumes a straightforward mapping of values to their encoded forms. But there are common cases where a Go type doesn't have a straightforward path to serialization, either because the encoded form needs a custom layout, or because bindnode doesn't have enough information to infer a serialization pattern. Custom Go type converters for bindnode allow a user to supply a pair of converter functions for a Go type that dictate how to map that type to an IPLD Data Model kind. See the **[bindnode documentation](https://pkg.go.dev/github.com/ipld/go-ipld-prime/node/bindnode)** for more information.
> * **Bindnode**: [Type registry](https://github.com/ipld/go-ipld-prime/pull/437) - Setting up Go type mappings with Bindnode involves some boilerplate. A basic type registry is now available that takes some of this boilerplate away; giving you a single place to register, and perform conversions to and from Go types, Data Model (`Node`) forms or directly through serialization. See the **[bindnode/registry documentation](https://pkg.go.dev/github.com/ipld/go-ipld-prime/node/bindnode/registry)** for more information.
> * **Bindnode** [Full `uint64` support](https://github.com/ipld/go-ipld-prime/pull/414/commits/87211682cb963ef1c98fa63909f67a8b02d1108c) - the `uint64` support introduced in [email protected] has been wired into Bindnode. The Data Model (`Node`) forms expose integers as `int64` values, which is lossy for unsigned 64-bit integers. Bindnode Go types using `uint64` values are now lossless in round-trips through serialization to codecs that support the full range (DAG-CBOR most notably).
>
> You can see all of these new features in action using Filecoin Go types, allowing a mapping between Go types, Data Model (`Node`) forms, and their DAG-CBOR serialized forms with [data-transfer vouchers](https://github.com/filecoin-project/go-fil-markets/pull/713). These features also allow us to interact with the original Go types, without modification, including `big.Int` serialization to `Bytes`, Filecoin `Signature` serialization to a byte-prefix discriminated `Bytes` and more. Since the Go types are unchanged, they can also simultaneously support [cbor-gen](https://github.com/whyrusleeping/cbor-gen) serialization, allowing an easier migration path.
Only in b/vendor/github.com/ipld/go-ipld-prime: HACKME_releases.md
diff -r --color a/vendor/github.com/ipld/go-ipld-prime/node/bindnode/api.go b/vendor/github.com/ipld/go-ipld-prime/node/bindnode/api.go
9a10
> "github.com/ipfs/go-cid"
30c31
< func Prototype(ptrType interface{}, schemaType schema.Type) schema.TypedPrototype {
---
> func Prototype(ptrType interface{}, schemaType schema.Type, options ...Option) schema.TypedPrototype {
34a36,37
> cfg := applyOptions(options...)
>
53c56
< verifyCompatibility(make(map[seenEntry]bool), goType, schemaType)
---
> verifyCompatibility(cfg, make(map[seenEntry]bool), goType, schemaType)
57c60,266
< return &_prototype{schemaType: schemaType, goType: goType}
---
> return &_prototype{cfg: cfg, schemaType: schemaType, goType: goType}
> }
>
> type converter struct {
> kind schema.TypeKind
>
> customFromBool func(bool) (interface{}, error)
> customToBool func(interface{}) (bool, error)
>
> customFromInt func(int64) (interface{}, error)
> customToInt func(interface{}) (int64, error)
>
> customFromFloat func(float64) (interface{}, error)
> customToFloat func(interface{}) (float64, error)
>
> customFromString func(string) (interface{}, error)
> customToString func(interface{}) (string, error)
>
> customFromBytes func([]byte) (interface{}, error)
> customToBytes func(interface{}) ([]byte, error)
>
> customFromLink func(cid.Cid) (interface{}, error)
> customToLink func(interface{}) (cid.Cid, error)
>
> customFromAny func(datamodel.Node) (interface{}, error)
> customToAny func(interface{}) (datamodel.Node, error)
> }
>
> type config map[reflect.Type]*converter
>
> // this mainly exists to short-circuit the nonPtrType() call; the `Type()` variant
> // exists for completeness
> func (c config) converterFor(val reflect.Value) *converter {
> if len(c) == 0 {
> return nil
> }
> return c[nonPtrType(val)]
> }
>
> func (c config) converterForType(typ reflect.Type) *converter {
> if len(c) == 0 {
> return nil
> }
> return c[typ]
> }
>
> // Option is able to apply custom options to the bindnode API
> type Option func(config)
>
> // TypedBoolConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func(bool) (interface{}, error)
> // and toFunc is of the form: func(interface{}) (bool, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // TypedBoolConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedBoolConverter(ptrVal interface{}, from func(bool) (interface{}, error), to func(interface{}) (bool, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_Bool,
> customFromBool: from,
> customToBool: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> // TypedIntConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func(int64) (interface{}, error)
> // and toFunc is of the form: func(interface{}) (int64, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // TypedIntConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedIntConverter(ptrVal interface{}, from func(int64) (interface{}, error), to func(interface{}) (int64, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_Int,
> customFromInt: from,
> customToInt: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> // TypedFloatConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func(float64) (interface{}, error)
> // and toFunc is of the form: func(interface{}) (float64, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // TypedFloatConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedFloatConverter(ptrVal interface{}, from func(float64) (interface{}, error), to func(interface{}) (float64, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_Float,
> customFromFloat: from,
> customToFloat: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> // TypedStringConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func(string) (interface{}, error)
> // and toFunc is of the form: func(interface{}) (string, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // TypedStringConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedStringConverter(ptrVal interface{}, from func(string) (interface{}, error), to func(interface{}) (string, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_String,
> customFromString: from,
> customToString: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> // TypedBytesConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func([]byte) (interface{}, error)
> // and toFunc is of the form: func(interface{}) ([]byte, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // TypedBytesConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedBytesConverter(ptrVal interface{}, from func([]byte) (interface{}, error), to func(interface{}) ([]byte, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_Bytes,
> customFromBytes: from,
> customToBytes: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> // TypedLinkConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func([]byte) (interface{}, error)
> // and toFunc is of the form: func(interface{}) ([]byte, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // Beware that this API is only compatible with cidlink.Link types in the data
> // model and may result in errors if attempting to convert from other
> // datamodel.Link types.
> //
> // TypedLinkConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedLinkConverter(ptrVal interface{}, from func(cid.Cid) (interface{}, error), to func(interface{}) (cid.Cid, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_Link,
> customFromLink: from,
> customToLink: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> // TypedAnyConverter adds custom converter functions for a particular
> // type as identified by a pointer in the first argument.
> // The fromFunc is of the form: func(datamodel.Node) (interface{}, error)
> // and toFunc is of the form: func(interface{}) (datamodel.Node, error)
> // where interface{} is a pointer form of the type we are converting.
> //
> // This method should be able to deal with all forms of Any and return an error
> // if the expected data forms don't match the expected.
> //
> // TypedAnyConverter is an EXPERIMENTAL API and may be removed or
> // changed in a future release.
> func TypedAnyConverter(ptrVal interface{}, from func(datamodel.Node) (interface{}, error), to func(interface{}) (datamodel.Node, error)) Option {
> customType := nonPtrType(reflect.ValueOf(ptrVal))
> converter := &converter{
> kind: schema.TypeKind_Any,
> customFromAny: from,
> customToAny: to,
> }
> return func(cfg config) {
> cfg[customType] = converter
> }
> }
>
> func applyOptions(opt ...Option) config {
> if len(opt) == 0 {
> // no need to allocate, we access it via converterFor and converterForType
> // which are safe for nil maps
> return nil
> }
> cfg := make(map[reflect.Type]*converter)
> for _, o := range opt {
> o(cfg)
> }
> return cfg
68c277
< func Wrap(ptrVal interface{}, schemaType schema.Type) schema.TypedNode {
---
> func Wrap(ptrVal interface{}, schemaType schema.Type, options ...Option) schema.TypedNode {
79a289
> cfg := applyOptions(options...)
87c297,302
< verifyCompatibility(make(map[seenEntry]bool), goVal.Type(), schemaType)
---
> // TODO(rvagg): explore ways to make this skippable by caching in the schema.Type
> // passed in to this function; e.g. if you call Prototype(), then you've gone through
> // this already, then calling .Type() on that could return a bindnode version of
> // schema.Type that has the config cached and can be assumed to have been checked or
> // inferred.
> verifyCompatibility(cfg, make(map[seenEntry]bool), goVal.Type(), schemaType)
89c304
< return &_node{val: goVal, schemaType: schemaType}
---
> return newNode(cfg, schemaType, goVal)
diff -r --color a/vendor/github.com/ipld/go-ipld-prime/node/bindnode/infer.go b/vendor/github.com/ipld/go-ipld-prime/node/bindnode/infer.go
42c42,47
< func verifyCompatibility(seen map[seenEntry]bool, goType reflect.Type, schemaType schema.Type) {
---
> // verifyCompatibility is the primary way we check that the schema type(s)
> // matches the Go type(s); so we do this before we can proceed operating on it.
> // verifyCompatibility doesn't return an error, it panics—the errors here are
> // not runtime errors, they're programmer errors because your schema doesn't
> // match your Go type
> func verifyCompatibility(cfg config, seen map[seenEntry]bool, goType reflect.Type, schemaType schema.Type) {
69c74,78
< if goType.Kind() != reflect.Bool {
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_Bool {
> doPanic("kind mismatch; custom converter for type is not for Bool")
> }
> } else if goType.Kind() != reflect.Bool {
73c82,86
< if kind := goType.Kind(); !kindInt[kind] && !kindUint[kind] {
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_Int {
> doPanic("kind mismatch; custom converter for type is not for Int")
> }
> } else if kind := goType.Kind(); !kindInt[kind] && !kindUint[kind] {
77,80c90,99
< switch goType.Kind() {
< case reflect.Float32, reflect.Float64:
< default:
< doPanic("kind mismatch; need float")
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_Float {
> doPanic("kind mismatch; custom converter for type is not for Float")
> }
> } else {
> switch goType.Kind() {
> case reflect.Float32, reflect.Float64:
> default:
> doPanic("kind mismatch; need float")
> }
84c103,107
< if goType.Kind() != reflect.String {
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_String {
> doPanic("kind mismatch; custom converter for type is not for String")
> }
> } else if goType.Kind() != reflect.String {
89c112,116
< if goType.Kind() != reflect.Slice {
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_Bytes {
> doPanic("kind mismatch; custom converter for type is not for Bytes")
> }
> } else if goType.Kind() != reflect.Slice {
91,92c118
< }
< if goType.Elem().Kind() != reflect.Uint8 {
---
> } else if goType.Elem().Kind() != reflect.Uint8 {
117c143
< verifyCompatibility(seen, goType, schemaType.ValueType())
---
> verifyCompatibility(cfg, seen, goType, schemaType.ValueType())
134c160
< verifyCompatibility(seen, fieldKeys.Type.Elem(), schemaType.KeyType())
---
> verifyCompatibility(cfg, seen, fieldKeys.Type.Elem(), schemaType.KeyType())
141c167
< verifyCompatibility(seen, keyType, schemaType.KeyType())
---
> verifyCompatibility(cfg, seen, keyType, schemaType.KeyType())
151c177
< verifyCompatibility(seen, elemType, schemaType.ValueType())
---
> verifyCompatibility(cfg, seen, elemType, schemaType.ValueType())
168a195
> // TODO: deal with custom converters in this case
185c212,214
< doPanic("nullable fields must be nilable")
---
> if customConverter := cfg.converterForType(goType); customConverter == nil {
> doPanic("nullable fields must be nilable")
> }
190c219
< verifyCompatibility(seen, goType, schemaType)
---
> verifyCompatibility(cfg, seen, goType, schemaType)
209c238
< verifyCompatibility(seen, goType, schemaType)
---
> verifyCompatibility(cfg, seen, goType, schemaType)
212c241,245
< if goType != goTypeLink && goType != goTypeCidLink && goType != goTypeCid {
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_Link {
> doPanic("kind mismatch; custom converter for type is not for Link")
> }
> } else if goType != goTypeLink && goType != goTypeCidLink && goType != goTypeCid {
216,217c249,253
< // TODO: support some other option for Any, such as deferred decode
< if goType != goTypeNode {
---
> if customConverter := cfg.converterForType(goType); customConverter != nil {
> if customConverter.kind != schema.TypeKind_Any {
> doPanic("kind mismatch; custom converter for type is not for Any")
> }
> } else if goType != goTypeNode {
249a286
> // inferGoType can build a Go type given a schema
378a416
> // inferSchema can build a schema from a Go type
diff -r --color a/vendor/github.com/ipld/go-ipld-prime/node/bindnode/node.go b/vendor/github.com/ipld/go-ipld-prime/node/bindnode/node.go
4a5
> "math"
12a14
> "github.com/ipld/go-ipld-prime/node/mixins"
26a29,34
> _ datamodel.Node = (*_uintNode)(nil)
> _ schema.TypedNode = (*_uintNode)(nil)
> _ datamodel.UintNode = (*_uintNode)(nil)
> _ datamodel.Node = (*_uintNodeRepr)(nil)
> _ datamodel.UintNode = (*_uintNodeRepr)(nil)
>
48a57
> cfg config
54a64
> cfg: w.cfg,
68a79
> cfg config
78a90,103
> func newNode(cfg config, schemaType schema.Type, val reflect.Value) schema.TypedNode {
> if schemaType.TypeKind() == schema.TypeKind_Int && nonPtrVal(val).Kind() == reflect.Uint64 {
> // special case for uint64 values so we can handle the >int64 range
> // we give this treatment to all uint64s, regardless of current value
> // because we have no guarantees the value won't change underneath us
> return &_uintNode{
> cfg: cfg,
> schemaType: schemaType,
> val: val,
> }
> }
> return &_node{cfg, schemaType, val}
> }
>
90a116,117
> // matching schema level types to data model kinds, since our Node and Builder
> // interfaces operate on kinds
96c123
< actual := actualKind(sch)
---
> actual := actualKind(sch) // ActsLike data model
99a127,128
>
> // Error
108d136
<
133a162,186
> func ptrVal(val reflect.Value) reflect.Value {
> if val.Kind() == reflect.Ptr {
> return val
> }
> return val.Addr()
> }
>
> func nonPtrType(val reflect.Value) reflect.Type {
> typ := val.Type()
> if typ.Kind() == reflect.Ptr {
> return typ.Elem()
> }
> return typ
> }
>
> // where we need to cal Set(), ensure the Value we're setting is a pointer or
> // not, depending on the field we're setting into.
> func matchSettable(val interface{}, to reflect.Value) reflect.Value {
> setVal := nonPtrVal(reflect.ValueOf(val))
> if !setVal.Type().AssignableTo(to.Type()) && setVal.Type().ConvertibleTo(to.Type()) {
> setVal = setVal.Convert(to.Type())
> }
> return setVal
> }
>
164a218,222
> if customConverter := w.cfg.converterFor(fval); customConverter != nil {
> // field is an Any and we have a custom type converter for the type
> return customConverter.customToAny(ptrVal(fval).Interface())
> }
> // field is an Any, safely assume a Node in fval
167,171c225
< node := &_node{
< schemaType: field.Type(),
< val: fval,
< }
< return node, nil
---
> return newNode(w.cfg, field.Type(), fval), nil
172a227
> // maps can only be structs with a Values map
176a232
> // plain String keys, so safely use the map key as is
178a235,237
> // key is something other than a string that we need to assemble via
> // the string representation form, use _assemblerRepr to reverse from
> // string to the type that indexes the map
179a239
> cfg: w.cfg,
202a263,267
> if customConverter := w.cfg.converterFor(fval); customConverter != nil {
> // value is an Any and we have a custom type converter for the type
> return customConverter.customToAny(ptrVal(fval).Interface())
> }
> // value is an Any, safely assume a Node in fval
205,209c270
< node := &_node{
< schemaType: typ.ValueType(),
< val: fval,
< }
< return node, nil
---
> return newNode(w.cfg, typ.ValueType(), fval), nil
210a272,273
> // treat a union similar to a struct, but we have the member names more
> // easily accessible to match to 'key'
228,232c291
< node := &_node{
< schemaType: mtyp,
< val: mval,
< }
< return node, nil
---
> return newNode(w.cfg, mtyp, mval), nil
271a331
> // we should be able assume that val is something we can Len() and Index()
275a336,343
> _, isAny := typ.ValueType().(*schema.TypeAny)
> if isAny {
> if customConverter := w.cfg.converterFor(val); customConverter != nil {
> // values are Any and we have a converter for this type that will give us
> // a datamodel.Node
> return customConverter.customToAny(ptrVal(val).Interface())
> }
> }
279a348
> // nullable elements are assumed to be pointers
282c351,352
< if _, ok := typ.ValueType().(*schema.TypeAny); ok {
---
> if isAny {
> // Any always yields a plain datamodel.Node
285c355
< return &_node{schemaType: typ.ValueType(), val: val}, nil
---
> return newNode(w.cfg, typ.ValueType(), val), nil
338a409,411
> // structs, unions and maps can all iterate but they each have different
> // access semantics for the underlying type, so we need a different iterator
> // for each
341a415
> cfg: w.cfg,
347a422
> cfg: w.cfg,
352a428
> // we can assume a: struct{Keys []string, Values map[x]y}
353a430
> cfg: w.cfg,
366c443
< return &_listIterator{schemaType: typ, val: val}
---
> return &_listIterator{cfg: w.cfg, schemaType: typ, val: val}
397a475,479
> // The AsX methods are matter of fetching the non-pointer form of the underlying
> // value and returning the appropriate Go type. The user may have registered
> // custom converters for the kind being converted, in which case the underlying
> // type may not be the type we need, but the converter will supply it for us.
>
401a484,487
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns a bool
> return customConverter.customToBool(ptrVal(w.val).Interface())
> }
408a495,498
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns an int
> return customConverter.customToInt(ptrVal(w.val).Interface())
> }
411,412c501,505
< // TODO: check for overflow
< return int64(val.Uint()), nil
---
> u := val.Uint()
> if u > math.MaxInt64 {
> return 0, fmt.Errorf("bindnode: integer overflow, %d is too large for an int64", u)
> }
> return int64(u), nil
420a514,517
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns a float
> return customConverter.customToFloat(ptrVal(w.val).Interface())
> }
427a525,528
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns a string
> return customConverter.customToString(ptrVal(w.val).Interface())
> }
434a536,539
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns a []byte
> return customConverter.customToBytes(ptrVal(w.val).Interface())
> }
441a547,554
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns a cid.Cid
> cid, err := customConverter.customToLink(ptrVal(w.val).Interface())
> if err != nil {
> return nil, err
> }
> return cidlink.Link{Cid: cid}, nil
> }
453c566
< return &_prototype{schemaType: w.schemaType, goType: w.val.Type()}
---
> return &_prototype{cfg: w.cfg, schemaType: w.schemaType, goType: w.val.Type()}
462c575
< return &_node{schemaType: w.schemaType, val: w.val}
---
> return newNode(w.cfg, w.schemaType, w.val)
469a583
> cfg config
481a596
> // createNonPtrVal is used for Set() operations on the underlying value
503a619,620
> // basicMapAssembler is for assembling basicnode values, it's only use is for
> // Any fields that end up needing a BeginMap()
507,508c624,626
< builder datamodel.NodeBuilder
< parent *_assembler
---
> builder datamodel.NodeBuilder
> parent *_assembler
> converter *converter
516c634,645
< w.parent.createNonPtrVal().Set(reflect.ValueOf(basicNode))
---
> if w.converter != nil {
> // we can assume an Any converter because basicMapAssembler is only for Any
> // the user has registered the ability to convert a datamodel.Node to the
> // underlying Go type which may not be a datamodel.Node
> typ, err := w.converter.customFromAny(basicNode)
> if err != nil {
> return err
> }
> w.parent.createNonPtrVal().Set(matchSettable(typ, reflect.ValueOf(basicNode)))
> } else {
> w.parent.createNonPtrVal().Set(reflect.ValueOf(basicNode))
> }
533c662,663
< return &basicMapAssembler{MapAssembler: mapAsm, builder: basicBuilder, parent: w}, nil
---
> converter := w.cfg.converterFor(w.val)
> return &basicMapAssembler{MapAssembler: mapAsm, builder: basicBuilder, parent: w, converter: converter}, nil
535a666,669
> // _structAssembler walks through the fields in order as the entries are
> // assembled, verifyCompatibility() should mean it's safe to assume that
> // they match the schema, but we need to keep track of the fields that are
> // set in case of premature Finish()
537a672
> cfg: w.cfg,
543a679,680
> // assume a struct{Keys []string, Values map[x]y} that we can fill with
> // _mapAssembler
550a688
> cfg: w.cfg,
556a695,696
> // we can use _unionAssembler to assemble a union as if it were a map with
> // a single entry
558a699
> cfg: w.cfg,
571a713,714
> // basicListAssembler is for assembling basicnode values, it's only use is for
> // Any fields that end up needing a BeginList()
575,576c718,720
< builder datamodel.NodeBuilder
< parent *_assembler
---
> builder datamodel.NodeBuilder
> parent *_assembler
> converter *converter
584c728,739
< w.parent.createNonPtrVal().Set(reflect.ValueOf(basicNode))
---
> if w.converter != nil {
> // we can assume an Any converter because basicListAssembler is only for Any
> // the user has registered the ability to convert a datamodel.Node to the
> // underlying Go type which may not be a datamodel.Node
> typ, err := w.converter.customFromAny(basicNode)
> if err != nil {
> return err
> }
> w.parent.createNonPtrVal().Set(matchSettable(typ, reflect.ValueOf(basicNode)))
> } else {
> w.parent.createNonPtrVal().Set(reflect.ValueOf(basicNode))
> }
601c756,757
< return &basicListAssembler{ListAssembler: listAsm, builder: basicBuilder, parent: w}, nil
---
> converter := w.cfg.converterFor(w.val)
> return &basicListAssembler{ListAssembler: listAsm, builder: basicBuilder, parent: w, converter: converter}, nil
602a759,760
> // we should be able to safely assume we're dealing with a Go slice here,
> // so _listAssembler can append to that
604a763
> cfg: w.cfg,
619,623c778,784
< if !w.nullable {
< return datamodel.ErrWrongKind{
< TypeName: w.schemaType.Name(),
< MethodName: "AssignNull",
< // TODO
---
> _, isAny := w.schemaType.(*schema.TypeAny)
> if customConverter := w.cfg.converterFor(w.val); customConverter != nil && isAny {
> // an Any field that is being assigned a Null, we pass the Null directly to
> // the converter, regardless of whether this field is nullable or not
> typ, err := customConverter.customFromAny(datamodel.Null)
> if err != nil {
> return err
624a786,796
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
> } else {
> if !w.nullable {
> return datamodel.ErrWrongKind{
> TypeName: w.schemaType.Name(),
> MethodName: "AssignNull",
> // TODO
> }
> }
> // set the zero value for the underlying type as a stand-in for Null
> w.val.Set(reflect.Zero(w.val.Type()))
626d797
< w.val.Set(reflect.Zero(w.val.Type()))
639,640c810,828
< if _, ok := w.schemaType.(*schema.TypeAny); ok {
< w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewBool(b)))
---
> customConverter := w.cfg.converterFor(w.val)
> _, isAny := w.schemaType.(*schema.TypeAny)
> if customConverter != nil {
> var typ interface{}
> var err error
> if isAny {
> // field is an Any, so the converter will be an Any converter that wants
> // a datamodel.Node to convert to whatever the underlying Go type is
> if typ, err = customConverter.customFromAny(basicnode.NewBool(b)); err != nil {
> return err
> }
> } else {
> // field is a Bool, but the user has registered a converter from a bool to
> // whatever the underlying Go type is
> if typ, err = customConverter.customFromBool(b); err != nil {
> return err
> }
> }
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
642c830,864
< w.createNonPtrVal().SetBool(b)
---
> if isAny {
> // Any means the Go type must receive a datamodel.Node
> w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewBool(b)))
> } else {
> w.createNonPtrVal().SetBool(b)
> }
> }
> if w.finish != nil {
> if err := w.finish(); err != nil {
> return err
> }
> }
> return nil
> }
>
> func (w *_assembler) assignUInt(uin datamodel.UintNode) error {
> if err := compatibleKind(w.schemaType, datamodel.Kind_Int); err != nil {
> return err
> }
> _, isAny := w.schemaType.(*schema.TypeAny)
> // TODO: customConverter for uint??
> if isAny {
> // Any means the Go type must receive a datamodel.Node
> w.createNonPtrVal().Set(reflect.ValueOf(uin))
> } else {
> i, err := uin.AsUint()
> if err != nil {
> return err
> }
> if kindUint[w.val.Kind()] {
> w.createNonPtrVal().SetUint(i)
> } else {
> // TODO: check for overflow
> w.createNonPtrVal().SetInt(int64(i))
> }
657,662c879,895
< if _, ok := w.schemaType.(*schema.TypeAny); ok {
< w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewInt(i)))
< } else if kindUint[w.val.Kind()] {
< if i < 0 {
< // TODO: write a test
< return fmt.Errorf("bindnode: cannot assign negative integer to %s", w.val.Type())
---
> customConverter := w.cfg.converterFor(w.val)
> _, isAny := w.schemaType.(*schema.TypeAny)
> if customConverter != nil {
> var typ interface{}
> var err error
> if isAny {
> // field is an Any, so the converter will be an Any converter that wants
> // a datamodel.Node to convert to whatever the underlying Go type is
> if typ, err = customConverter.customFromAny(basicnode.NewInt(i)); err != nil {
> return err
> }
> } else {
> // field is an Int, but the user has registered a converter from an int to
> // whatever the underlying Go type is
> if typ, err = customConverter.customFromInt(i); err != nil {
> return err
> }
664c897
< w.createNonPtrVal().SetUint(uint64(i))
---
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
666c899,910
< w.createNonPtrVal().SetInt(i)
---
> if isAny {
> // Any means the Go type must receive a datamodel.Node
> w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewInt(i)))
> } else if kindUint[w.val.Kind()] {
> if i < 0 {
> // TODO: write a test
> return fmt.Errorf("bindnode: cannot assign negative integer to %s", w.val.Type())
> }
> w.createNonPtrVal().SetUint(uint64(i))
> } else {
> w.createNonPtrVal().SetInt(i)
> }
680,681c924,942
< if _, ok := w.schemaType.(*schema.TypeAny); ok {
< w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewFloat(f)))
---
> customConverter := w.cfg.converterFor(w.val)
> _, isAny := w.schemaType.(*schema.TypeAny)
> if customConverter != nil {
> var typ interface{}
> var err error
> if isAny {
> // field is an Any, so the converter will be an Any converter that wants
> // a datamodel.Node to convert to whatever the underlying Go type is
> if typ, err = customConverter.customFromAny(basicnode.NewFloat(f)); err != nil {
> return err
> }
> } else {
> // field is a Float, but the user has registered a converter from a float
> // to whatever the underlying Go type is
> if typ, err = customConverter.customFromFloat(f); err != nil {
> return err
> }
> }
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
683c944,949
< w.createNonPtrVal().SetFloat(f)
---
> if isAny {
> // Any means the Go type must receive a datamodel.Node
> w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewFloat(f)))
> } else {
> w.createNonPtrVal().SetFloat(f)
> }
697,698c963,981
< if _, ok := w.schemaType.(*schema.TypeAny); ok {
< w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewString(s)))
---
> customConverter := w.cfg.converterFor(w.val)
> _, isAny := w.schemaType.(*schema.TypeAny)
> if customConverter != nil {
> var typ interface{}
> var err error
> if isAny {
> // field is an Any, so the converter will be an Any converter that wants
> // a datamodel.Node to convert to whatever the underlying Go type is
> if typ, err = customConverter.customFromAny(basicnode.NewString(s)); err != nil {
> return err
> }
> } else {
> // field is a String, but the user has registered a converter from a
> // string to whatever the underlying Go type is
> if typ, err = customConverter.customFromString(s); err != nil {
> return err
> }
> }
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
700c983,988
< w.createNonPtrVal().SetString(s)
---
> if isAny {
> // Any means the Go type must receive a datamodel.Node
> w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewString(s)))
> } else {
> w.createNonPtrVal().SetString(s)
> }
714,715c1002,1020
< if _, ok := w.schemaType.(*schema.TypeAny); ok {
< w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewBytes(p)))
---
> customConverter := w.cfg.converterFor(w.val)
> _, isAny := w.schemaType.(*schema.TypeAny)
> if customConverter != nil {
> var typ interface{}
> var err error
> if isAny {
> // field is an Any, so the converter will be an Any converter that wants
> // a datamodel.Node to convert to whatever the underlying Go type is
> if typ, err = customConverter.customFromAny(basicnode.NewBytes(p)); err != nil {
> return err
> }
> } else {
> // field is a Bytes, but the user has registered a converter from a []byte
> // to whatever the underlying Go type is
> if typ, err = customConverter.customFromBytes(p); err != nil {
> return err
> }
> }
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
717c1022,1027
< w.createNonPtrVal().SetBytes(p)
---
> if isAny {
> // Any means the Go type must receive a datamodel.Node
> w.createNonPtrVal().Set(reflect.ValueOf(basicnode.NewBytes(p)))
> } else {
> w.createNonPtrVal().SetBytes(p)
> }
729a1040
> customConverter := w.cfg.converterFor(w.val)
731c1042,1065
< val.Set(reflect.ValueOf(basicnode.NewLink(link)))
---
> if customConverter != nil {
> // field is an Any, so the converter will be an Any converter that wants
> // a datamodel.Node to convert to whatever the underlying Go type is
> typ, err := customConverter.customFromAny(basicnode.NewLink(link))
> if err != nil {
> return err
> }
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
> } else {
> // Any means the Go type must receive a datamodel.Node
> val.Set(reflect.ValueOf(basicnode.NewLink(link)))
> }
> } else if customConverter != nil {
> if cl, ok := link.(cidlink.Link); ok {
> // field is a Link, but the user has registered a converter from a cid.Cid
> // to whatever the underlying Go type is
> typ, err := customConverter.customFromLink(cl.Cid)
> if err != nil {
> return err
> }
> w.createNonPtrVal().Set(matchSettable(typ, w.val))
> } else {
> return fmt.Errorf("bindnode: custom converter can only receive a cidlink.Link through AssignLink")
> }
750d1083
< // fmt.Println(newVal.Type().ConvertibleTo(val.Type()))
768a1102,1104
> if uintNode, ok := node.(datamodel.UintNode); ok {
> return w.assignUInt(uintNode)
> }
773c1109
< return &_prototype{schemaType: w.schemaType, goType: w.val.Type()}
---
> return &_prototype{cfg: w.cfg, schemaType: w.schemaType, goType: w.val.Type()}
775a1112
> // _structAssembler is used for Struct assembling via BeginMap()
778a1116,1117
> cfg config
>
798a1138
> cfg: w.cfg,
839a1180
> cfg: w.cfg,
876c1217
< return &_prototype{schemaType: schemaTypeString, goType: goTypeString}
---
> return &_prototype{cfg: w.cfg, schemaType: schemaTypeString, goType: goTypeString}
898a1240,1241
> // used for Maps which we can assume are of type: struct{Keys []string, Values map[x]y},
> // where we have Keys in keysVal and Values in valuesVal
899a1243
> cfg config
911a1256
> cfg: w.cfg,
922,923d1266
< // fmt.Println(kval.Interface(), val.Interface())
<
930a1274
> cfg: w.cfg,
956c1300
< return &_prototype{schemaType: w.schemaType.KeyType(), goType: w.valuesVal.Type().Key()}
---
> return &_prototype{cfg: w.cfg, schemaType: w.schemaType.KeyType(), goType: w.valuesVal.Type().Key()}
960c1304
< return &_prototype{schemaType: w.schemaType.ValueType(), goType: w.valuesVal.Type().Elem()}
---
> return &_prototype{cfg: w.cfg, schemaType: w.schemaType.ValueType(), goType: w.valuesVal.Type().Elem()}
962a1307
> // _listAssembler is for operating directly on slices, which we have in val
963a1309
> cfg config
973a1320
> cfg: w.cfg,
990c1337
< return &_prototype{schemaType: w.schemaType.ValueType(), goType: w.val.Type().Elem()}
---
> return &_prototype{cfg: w.cfg, schemaType: w.schemaType.ValueType(), goType: w.val.Type().Elem()}
992a1340,1341
> // when assembling as a Map but we anticipate a single value, which we need to
> // look up in the union members
993a1343
> cfg config
1004a1355
> cfg: w.cfg,
1033d1383
< // fmt.Println(kval.Interface(), val.Interface())
1037a1388
> cfg: w.cfg,
1052a1404,1405
> // TODO(rvagg): I think this might allow setting multiple members of the union
> // we need a test for this.
1066c1419
< return &_prototype{schemaType: schemaTypeString, goType: goTypeString}
---
> return &_prototype{cfg: w.cfg, schemaType: schemaTypeString, goType: goTypeString}
1072a1426,1428
> // _structIterator is for iterating over Struct types which operate over Go
> // structs. The iteration order is dictated by Go field declaration order which
> // should match the schema for this type.
1074a1431,1432
> cfg config
>
1099a1458,1469
> _, isAny := field.Type().(*schema.TypeAny)
> if isAny {
> if customConverter := w.cfg.converterFor(val); customConverter != nil {
> // field is an Any and we have an Any converter which takes the underlying
> // struct field value and returns a datamodel.Node
> v, err := customConverter.customToAny(ptrVal(val).Interface())
> if err != nil {
> return nil, nil, err
> }
> return key, v, nil
> }
> }
1108c1478,1479
< if _, ok := field.Type().(*schema.TypeAny); ok {
---
> if isAny {
> // field holds a datamodel.Node
1111,1115c1482
< node := &_node{
< schemaType: field.Type(),
< val: val,
< }
< return key, node, nil
---
> return key, newNode(w.cfg, field.Type(), val), nil
1121a1489,1490
> // _mapIterator is for iterating over a struct{Keys []string, Values map[x]y},
> // where we have the Keys in keysVal and Values in valuesVal
1122a1492
> cfg config
1137,1139c1507,1519
< key = &_node{
< schemaType: w.schemaType.KeyType(),
< val: goKey,
---
> key = newNode(w.cfg, w.schemaType.KeyType(), goKey)
> _, isAny := w.schemaType.ValueType().(*schema.TypeAny)
> if isAny {
> if customConverter := w.cfg.converterFor(val); customConverter != nil {
> // values of this map are Any and we have an Any converter which takes the
> // underlying map value and returns a datamodel.Node
>
> // TODO(rvagg): can't call ptrVal on a map value that's not a pointer
> // so only map[string]*foo will work for the Values map and an Any
> // converter. Should we check in infer.go?
> val, err := customConverter.customToAny(ptrVal(val).Interface())
> return key, val, err
> }
1145c1525
< val = val.Elem()
---
> val = val.Elem() // nullable entries are pointers
1147c1527,1528
< if _, ok := w.schemaType.ValueType().(*schema.TypeAny); ok {
---
> if isAny {
> // Values holds datamodel.Nodes
1150,1154c1531
< node := &_node{
< schemaType: w.schemaType.ValueType(),
< val: val,
< }
< return key, node, nil
---
> return key, newNode(w.cfg, w.schemaType.ValueType(), val), nil
1160a1538
> // _listIterator is for iterating over slices, which is held in val
1161a1540
> cfg config
1178c1557
< val = val.Elem()
---
> val = val.Elem() // nullable values are pointers
1180a1560,1566
> if customConverter := w.cfg.converterFor(val); customConverter != nil {
> // values are Any and we have an Any converter which can take whatever
> // the underlying Go type in this slice is and return a datamodel.Node
> val, err := customConverter.customToAny(ptrVal(val).Interface())
> return idx, val, err
> }
> // values are Any, assume that they are datamodel.Nodes
1183c1569
< return idx, &_node{schemaType: w.schemaType.ValueType(), val: val}, nil
---
> return idx, newNode(w.cfg, w.schemaType.ValueType(), val), nil
1191a1578
> cfg config
1199a1587,1588
> // we can only call this once for a union since a union can only have one
> // entry even though it behaves like a Map
1211,1214c1600
< node := &_node{
< schemaType: mtyp,
< val: mval,
< }
---
> node := newNode(w.cfg, mtyp, mval)
1220a1607,1753
> }
>
> // --- uint64 special case handling
>
> type _uintNode struct {
> cfg config
> schemaType schema.Type
>
> val reflect.Value // non-pointer
> }
>
> func (tu *_uintNode) Type() schema.Type {
> return tu.schemaType
> }
> func (tu *_uintNode) Representation() datamodel.Node {
> return (*_uintNodeRepr)(tu)
> }
> func (_uintNode) Kind() datamodel.Kind {
> return datamodel.Kind_Int
> }
> func (_uintNode) LookupByString(string) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupByString("")
> }
> func (_uintNode) LookupByNode(key datamodel.Node) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupByNode(nil)
> }
> func (_uintNode) LookupByIndex(idx int64) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupByIndex(0)
> }
> func (_uintNode) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupBySegment(seg)
> }
> func (_uintNode) MapIterator() datamodel.MapIterator {
> return nil
> }
> func (_uintNode) ListIterator() datamodel.ListIterator {
> return nil
> }
> func (_uintNode) Length() int64 {
> return -1
> }
> func (_uintNode) IsAbsent() bool {
> return false
> }
> func (_uintNode) IsNull() bool {
> return false
> }
> func (_uintNode) AsBool() (bool, error) {
> return mixins.Int{TypeName: "int"}.AsBool()
> }
> func (tu *_uintNode) AsInt() (int64, error) {
> return (*_uintNodeRepr)(tu).AsInt()
> }
> func (tu *_uintNode) AsUint() (uint64, error) {
> return (*_uintNodeRepr)(tu).AsUint()
> }
> func (_uintNode) AsFloat() (float64, error) {
> return mixins.Int{TypeName: "int"}.AsFloat()
> }
> func (_uintNode) AsString() (string, error) {
> return mixins.Int{TypeName: "int"}.AsString()
> }
> func (_uintNode) AsBytes() ([]byte, error) {
> return mixins.Int{TypeName: "int"}.AsBytes()
> }
> func (_uintNode) AsLink() (datamodel.Link, error) {
> return mixins.Int{TypeName: "int"}.AsLink()
> }
> func (_uintNode) Prototype() datamodel.NodePrototype {
> return basicnode.Prototype__Int{}
> }
>
> // we need this for _uintNode#Representation() so we don't return a TypeNode
> type _uintNodeRepr _uintNode
>
> func (_uintNodeRepr) Kind() datamodel.Kind {
> return datamodel.Kind_Int
> }
> func (_uintNodeRepr) LookupByString(string) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupByString("")
> }
> func (_uintNodeRepr) LookupByNode(key datamodel.Node) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupByNode(nil)
> }
> func (_uintNodeRepr) LookupByIndex(idx int64) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupByIndex(0)
> }
> func (_uintNodeRepr) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) {
> return mixins.Int{TypeName: "int"}.LookupBySegment(seg)
> }
> func (_uintNodeRepr) MapIterator() datamodel.MapIterator {
> return nil
> }
> func (_uintNodeRepr) ListIterator() datamodel.ListIterator {
> return nil
> }
> func (_uintNodeRepr) Length() int64 {
> return -1
> }
> func (_uintNodeRepr) IsAbsent() bool {
> return false
> }
> func (_uintNodeRepr) IsNull() bool {
> return false
> }
> func (_uintNodeRepr) AsBool() (bool, error) {
> return mixins.Int{TypeName: "int"}.AsBool()
> }
> func (tu *_uintNodeRepr) AsInt() (int64, error) {
> if err := compatibleKind(tu.schemaType, datamodel.Kind_Int); err != nil {
> return 0, err
> }
> if customConverter := tu.cfg.converterFor(tu.val); customConverter != nil {
> // user has registered a converter that takes the underlying type and returns an int
> return customConverter.customToInt(ptrVal(tu.val).Interface())
> }
> val := nonPtrVal(tu.val)
> // we can assume it's a uint64 at this point
> u := val.Uint()
> if u > math.MaxInt64 {
> return 0, fmt.Errorf("bindnode: integer overflow, %d is too large for an int64", u)
> }
> return int64(u), nil
> }
> func (tu *_uintNodeRepr) AsUint() (uint64, error) {
> if err := compatibleKind(tu.schemaType, datamodel.Kind_Int); err != nil {
> return 0, err
> }
> // TODO(rvagg): do we want a converter option for uint values? do we combine it
> // with int converters?
> // we can assume it's a uint64 at this point
> return nonPtrVal(tu.val).Uint(), nil
> }
> func (_uintNodeRepr) AsFloat() (float64, error) {
> return mixins.Int{TypeName: "int"}.AsFloat()
> }
> func (_uintNodeRepr) AsString() (string, error) {
> return mixins.Int{TypeName: "int"}.AsString()
> }
> func (_uintNodeRepr) AsBytes() ([]byte, error) {
> return mixins.Int{TypeName: "int"}.AsBytes()
> }
> func (_uintNodeRepr) AsLink() (datamodel.Link, error) {
> return mixins.Int{TypeName: "int"}.AsLink()
> }
> func (_uintNodeRepr) Prototype() datamodel.NodePrototype {
> return basicnode.Prototype__Int{}
diff -r --color a/vendor/github.com/ipld/go-ipld-prime/node/bindnode/repr.go b/vendor/github.com/ipld/go-ipld-prime/node/bindnode/repr.go
41a42
> cfg: w.cfg,
271c272
< iter := _tupleIteratorRepr{schemaType: typ, fields: typ.Fields(), val: w.val}
---
> iter := _tupleIteratorRepr{cfg: w.cfg, schemaType: typ, fields: typ.Fields(), val: w.val}
309a311
> cfg config
538c540
< return &_node{schemaType: w.schemaType, val: w.val}
---
> return &_node{cfg: w.cfg, schemaType: w.schemaType, val: w.val}
545a548
> cfg config
655a659,673
> func (w *_assemblerRepr) assignUInt(uin datamodel.UintNode) error {
> switch stg := reprStrategy(w.schemaType).(type) {
> case schema.UnionRepresentation_Kinded:
> return w.asKinded(stg, datamodel.Kind_Int).(*_assemblerRepr).assignUInt(uin)
> case schema.EnumRepresentation_Int:
> uin, err := uin.AsUint()
> if err != nil {
> return err
> }
> return fmt.Errorf("AssignInt: %d is not a valid member of enum %s", uin, w.schemaType.Name())
> default:
> return (*_assembler)(w).assignUInt(uin)
> }
> }
>
819a838,840
> if uintNode, ok := node.(datamodel.UintNode); ok {
> return w.assignUInt(uintNode)
> }
diff -r --color a/vendor/github.com/ipld/go-ipld-prime/version.json b/vendor/github.com/ipld/go-ipld-prime/version.json
2c2
< "version": "v0.17.0"
---
> "version": "v0.18.0"
diff -r --color a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
25a26,32
>
> # Linux perf files
> perf.data
> perf.data.old
>
> # gdb history
> .gdb_history
Only in a/vendor/github.com/klauspost/compress/huff0: autogen.go
diff -r --color a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
168,172d167
< // peekTopBits(n) is equvialent to peekBitFast(64 - n)
< func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
< return uint16(b.value >> n)
< }
<
221,225d215
< }
<
< // finished returns true if all bits have been read from the bit stream.
< func (b *bitReaderShifted) finished() bool {
< return b.off == 0 && b.bitsRead >= 64
diff -r --color a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
8,9d7
< import "fmt"
<
26,33d23
< // addBits16NC will add up to 16 bits.
< // It will not check if there is space for them,
< // so the caller must ensure that it has flushed recently.
< func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
< b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
< b.nBits += bits
< }
<
73,170d62
< // addBits16ZeroNC will add up to 16 bits.
< // It will not check if there is space for them,
< // so the caller must ensure that it has flushed recently.
< // This is fastest if bits can be zero.
< func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
< if bits == 0 {
< return
< }
< value <<= (16 - bits) & 15
< value >>= (16 - bits) & 15
< b.bitContainer |= uint64(value) << (b.nBits & 63)
< b.nBits += bits
< }
<
< // flush will flush all pending full bytes.
< // There will be at least 56 bits available for writing when this has been called.
< // Using flush32 is faster, but leaves less space for writing.
< func (b *bitWriter) flush() {
< v := b.nBits >> 3
< switch v {
< case 0:
< return
< case 1:
< b.out = append(b.out,
< byte(b.bitContainer),
< )
< b.bitContainer >>= 1 << 3
< case 2:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< )
< b.bitContainer >>= 2 << 3
< case 3:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< )
< b.bitContainer >>= 3 << 3
< case 4:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< )
< b.bitContainer >>= 4 << 3
< case 5:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< )
< b.bitContainer >>= 5 << 3
< case 6:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< byte(b.bitContainer>>40),
< )
< b.bitContainer >>= 6 << 3
< case 7:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< byte(b.bitContainer>>40),
< byte(b.bitContainer>>48),
< )
< b.bitContainer >>= 7 << 3
< case 8:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< byte(b.bitContainer>>40),
< byte(b.bitContainer>>48),
< byte(b.bitContainer>>56),
< )
< b.bitContainer = 0
< b.nBits = 0
< return
< default:
< panic(fmt.Errorf("bits (%d) > 64", b.nBits))
< }
< b.nBits &= 7
< }
<
203,209d94
< }
<
< // reset and continue writing by appending to out.
< func (b *bitWriter) reset(out []byte) {
< b.bitContainer = 0
< b.nBits = 0
< b.out = out
diff -r --color a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
23,27d22
< // advance the stream b n bytes.
< func (b *byteReader) advance(n uint) {
< b.off += int(n)
< }
<
44,48d38
< }
<
< // unread returns the unread portion of the input.
< func (b byteReader) unread() []byte {
< return b.b[b.off:]
diff -r --color a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
406a407
> //lint:ignore U1000 used for debugging
Only in a/vendor/github.com/klauspost/compress/huff0: decompress_8b_amd64.s
Only in a/vendor/github.com/klauspost/compress/huff0: decompress_8b_amd64.s.in
diff -r --color a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
5c5
< // that uses an asm implementation of its main loop.
---
> // and Decoder.Decompress1X that use an asm implementation of thir main loops.
10a11,12
>
> "github.com/klauspost/compress/internal/cpuinfo"
15,17c17,19
< // go:noescape
< func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
< peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
---
> //
> //go:noescape
> func decompress4x_main_loop_amd64(ctx *decompress4xContext)
22,24c24,26
< // go:noescape
< func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
< peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
---
> //
> //go:noescape
> func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
28a31,40
> type decompress4xContext struct {
> pbr *[4]bitReaderShifted
> peekBits uint8
> out *byte
> dstEvery int
> tbl *dEntrySingle
> decoded int
> limit *byte
> }
>
44a57
>
74,76d86
< // Use temp table to avoid bound checks/append penalty.
< buf := d.buffer()
< var off uint8
79,88c89,96
< const debug = false
<
< // see: bitReaderShifted.peekBitsFast()
< peekBits := uint8((64 - d.actualTableLog) & 63)
<
< // Decode 2 values from each decoder/loop.
< const bufoff = 256
< for {
< if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
< break
---
> if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
> ctx := decompress4xContext{
> pbr: &br,
> peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
> out: &out[0],
> dstEvery: dstEvery,
> tbl: &single[0],
> limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
90d97
<
92c99
< off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
---
> decompress4x_8b_main_loop_amd64(&ctx)
94,103c101
< off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
< }
< if debug {
< fmt.Print("DEBUG: ")
< fmt.Printf("off=%d,", off)
< for i := 0; i < 4; i++ {
< fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
< i, br[i].bitsRead, br[i].value, br[i].off)
< }
< fmt.Println("")
---
> decompress4x_main_loop_amd64(&ctx)
106,137c104,105
< if off != 0 {
< break
< }
<
< if bufoff > dstEvery {
< d.bufs.Put(buf)
< return nil, errors.New("corruption detected: stream overrun 1")
< }
< copy(out, buf[0][:])
< copy(out[dstEvery:], buf[1][:])
< copy(out[dstEvery*2:], buf[2][:])
< copy(out[dstEvery*3:], buf[3][:])
< out = out[bufoff:]
< decoded += bufoff * 4
< // There must at least be 3 buffers left.
< if len(out) < dstEvery*3 {
< d.bufs.Put(buf)
< return nil, errors.New("corruption detected: stream overrun 2")
< }
< }
< if off > 0 {
< ioff := int(off)
< if len(out) < dstEvery*3+ioff {
< d.bufs.Put(buf)
< return nil, errors.New("corruption detected: stream overrun 3")
< }
< copy(out, buf[0][:off])
< copy(out[dstEvery:], buf[1][:off])
< copy(out[dstEvery*2:], buf[2][:off])
< copy(out[dstEvery*3:], buf[3][:off])
< decoded += int(off) * 4
< out = out[off:]
---
> decoded = ctx.decoded
> out = out[decoded/4:]
153d120
< d.bufs.Put(buf)
167d133
< d.bufs.Put(buf)
176d141
< d.bufs.Put(buf)
180a146,225
> }
>
> // decompress4x_main_loop_x86 is an x86 assembler implementation
> // of Decompress1X when tablelog > 8.
> //
> //go:noescape
> func decompress1x_main_loop_amd64(ctx *decompress1xContext)
>
> // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
> // of Decompress1X when tablelog > 8.
> //
> //go:noescape
> func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
>
> type decompress1xContext struct {
> pbr *bitReaderShifted
> peekBits uint8
> out *byte
> outCap int
> tbl *dEntrySingle
> decoded int
> }
>
> // Error reported by asm implementations
> const error_max_decoded_size_exeeded = -1
>
> // Decompress1X will decompress a 1X encoded stream.
> // The cap of the output buffer will be the maximum decompressed size.
> // The length of the supplied input must match the end of a block exactly.
> func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
> if len(d.dt.single) == 0 {
> return nil, errors.New("no table loaded")
> }
> var br bitReaderShifted
> err := br.init(src)
> if err != nil {
> return dst, err
> }
> maxDecodedSize := cap(dst)
> dst = dst[:maxDecodedSize]
>
> const tlSize = 1 << tableLogMax
> const tlMask = tlSize - 1
>
> if maxDecodedSize >= 4 {
> ctx := decompress1xContext{
> pbr: &br,
> out: &dst[0],
> outCap: maxDecodedSize,
> peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
> tbl: &d.dt.single[0],
> }
>
> if cpuinfo.HasBMI2() {
> decompress1x_main_loop_bmi2(&ctx)
> } else {
> decompress1x_main_loop_amd64(&ctx)
> }
> if ctx.decoded == error_max_decoded_size_exeeded {
> return nil, ErrMaxDecodedSizeExceeded
> }
>
> dst = dst[:ctx.decoded]
> }
>
> // br < 8, so uint8 is fine
> bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
> for bitsLeft > 0 {
> br.fill()
> if len(dst) >= maxDecodedSize {
> br.close()
> return nil, ErrMaxDecodedSizeExceeded
> }
> v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
> nBits := uint8(v.entry)
> br.advance(nBits)
> bitsLeft -= nBits
> dst = append(dst, uint8(v.entry>>8))
> }
> return dst, br.close()
diff -r --color a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
1,47c1
< // +build !appengine
< // +build gc
< // +build !noasm
<
< #include "textflag.h"
< #include "funcdata.h"
< #include "go_asm.h"
<
< #ifdef GOAMD64_v4
< #ifndef GOAMD64_v3
< #define GOAMD64_v3
< #endif
< #endif
<
< #define bufoff 256 // see decompress.go, we're using [4][256]byte table
<
< // func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
< // peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
< TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
< #define off R8
< #define buffer DI
< #define table SI
<
< #define br_bits_read R9
< #define br_value R10
< #define br_offset R11
< #define peek_bits R12
< #define exhausted DX
<
< #define br0 R13
< #define br1 R14
< #define br2 R15
< #define br3 BP
<
< MOVQ BP, 0(SP)
<
< XORQ exhausted, exhausted // exhausted = false
< XORQ off, off // off = 0
<
< MOVBQZX peekBits+32(FP), peek_bits
< MOVQ buf+40(FP), buffer
< MOVQ tbl+48(FP), table
<
< MOVQ pbr0+0(FP), br0
< MOVQ pbr1+8(FP), br1
< MOVQ pbr2+16(FP), br2
< MOVQ pbr3+24(FP), br3
---
> // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
49,71c3
< main_loop:
<
< // const stream = 0
< // br0.fillFast()
< MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
< MOVQ bitReaderShifted_value(br0), br_value
< MOVQ bitReaderShifted_off(br0), br_offset
<
< // We must have at least 2 * max tablelog left
< CMPQ br_bits_read, $64-22
< JBE skip_fill0
<
< SUBQ $32, br_bits_read // b.bitsRead -= 32
< SUBQ $4, br_offset // b.off -= 4
<
< // v := b.in[b.off-4 : b.off]
< // v = v[:4]
< // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
< MOVQ bitReaderShifted_in(br0), AX
<
< // b.value |= uint64(low) << (b.bitsRead & 63)
< #ifdef GOAMD64_v3
< SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
---
> //go:build amd64 && !appengine && !noasm && gc
73,76c5,16
< #else
< MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
< MOVQ br_bits_read, CX
< SHLQ CL, AX
---
> // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
> TEXT ·decompress4x_main_loop_amd64(SB), $0-8
> XORQ DX, DX
>
> // Preload values
> MOVQ ctx+0(FP), AX
> MOVBQZX 8(AX), DI
> MOVQ 16(AX), SI
> MOVQ 48(AX), BX
> MOVQ 24(AX), R9
> MOVQ 32(AX), R10
> MOVQ (AX), R11
78c18,32
< #endif
---
> // Main loop
> main_loop:
> MOVQ SI, R8
> CMPQ R8, BX
> SETGE DL
>
> // br0.fillFast32()
> MOVQ 32(R11), R12
> MOVBQZX 40(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill0
> MOVQ 24(R11), AX
> SUBQ $0x20, R13
> SUBQ $0x04, AX
> MOVQ (R11), R14
80c34,39
< ORQ AX, br_value
---
> // b.value |= uint64(low) << (b.bitsRead & 63)
> MOVL (AX)(R14*1), R14
> MOVQ R13, CX
> SHLQ CL, R14
> MOVQ AX, 24(R11)
> ORQ R14, R12
83,85c42,44
< CMPQ br_offset, $4
< SETLT DL
< ORB DL, DH
---
> CMPQ AX, $0x04
> SETLT AL
> ORB AL, DL
87d45
< // }
89d46
<
91,99c48,50
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
<
< #else
< MOVQ br_value, AX
< MOVQ peek_bits, CX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
<
< #endif
---
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
102,115c53
< MOVW 0(table)(AX*2), AX // AX - v0
<
< // br0.advance(uint8(v0.entry))
< MOVB AH, BL // BL = uint8(v0.entry >> 8)
<
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
<
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
<
< #endif
---
> MOVW (R10)(R14*2), CX
117c55,58
< ADDQ CX, br_bits_read // bits_read += n
---
> // br0.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
119,122d59
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
<
< #else
124,128c61,63
< MOVQ peek_bits, CX
< MOVQ br_value, AX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
<
< #endif
---
> MOVQ DI, CX
> MOVQ R12, R14
> SHRQ CL, R14
131c66
< MOVW 0(table)(AX*2), AX // AX - v1
---
> MOVW (R10)(R14*2), CX
134,146c69,71
< MOVB AH, BH // BH = uint8(v1.entry >> 8)
<
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
<
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
<
< #endif
<
< ADDQ CX, br_bits_read // bits_read += n
---
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
149,174c74,91
< // buf[stream][off] = uint8(v0.entry >> 8)
< // buf[stream][off+1] = uint8(v1.entry >> 8)
< MOVW BX, 0(buffer)(off*1)
<
< // update the bitrader reader structure
< MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
< MOVQ br_value, bitReaderShifted_value(br0)
< MOVQ br_offset, bitReaderShifted_off(br0)
<
< // const stream = 1
< // br1.fillFast()
< MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
< MOVQ bitReaderShifted_value(br1), br_value
< MOVQ bitReaderShifted_off(br1), br_offset
<
< // We must have at least 2 * max tablelog left
< CMPQ br_bits_read, $64-22
< JBE skip_fill1
<
< SUBQ $32, br_bits_read // b.bitsRead -= 32
< SUBQ $4, br_offset // b.off -= 4
<
< // v := b.in[b.off-4 : b.off]
< // v = v[:4]
< // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
< MOVQ bitReaderShifted_in(br1), AX
---
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> MOVW AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 32(R11)
> MOVB R13, 40(R11)
> ADDQ R9, R8
>
> // br1.fillFast32()
> MOVQ 80(R11), R12
> MOVBQZX 88(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill1
> MOVQ 72(R11), AX
> SUBQ $0x20, R13
> SUBQ $0x04, AX
> MOVQ 48(R11), R14
177,187c94,98
< #ifdef GOAMD64_v3
< SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
<
< #else
< MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
< MOVQ br_bits_read, CX
< SHLQ CL, AX
<
< #endif
<
< ORQ AX, br_value
---
> MOVL (AX)(R14*1), R14
> MOVQ R13, CX
> SHLQ CL, R14
> MOVQ AX, 72(R11)
> ORQ R14, R12
190,192c101,103
< CMPQ br_offset, $4
< SETLT DL
< ORB DL, DH
---
> CMPQ AX, $0x04
> SETLT AL
> ORB AL, DL
194d104
< // }
196d105
<
198,206c107,109
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
<
< #else
< MOVQ br_value, AX
< MOVQ peek_bits, CX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
<
< #endif
---
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
209c112
< MOVW 0(table)(AX*2), AX // AX - v0
---
> MOVW (R10)(R14*2), CX
211,212c114,117
< // br1.advance(uint8(v0.entry))
< MOVB AH, BL // BL = uint8(v0.entry >> 8)
---
> // br1.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
214,229d118
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
<
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
<
< #endif
<
< ADDQ CX, br_bits_read // bits_read += n
<
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
<
< #else
231,235c120,122
< MOVQ peek_bits, CX
< MOVQ br_value, AX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
<
< #endif
---
> MOVQ DI, CX
> MOVQ R12, R14
> SHRQ CL, R14
238c125
< MOVW 0(table)(AX*2), AX // AX - v1
---
> MOVW (R10)(R14*2), CX
241,253c128,130
< MOVB AH, BH // BH = uint8(v1.entry >> 8)
<
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
<
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
<
< #endif
<
< ADDQ CX, br_bits_read // bits_read += n
---
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
256,281c133,150
< // buf[stream][off] = uint8(v0.entry >> 8)
< // buf[stream][off+1] = uint8(v1.entry >> 8)
< MOVW BX, 256(buffer)(off*1)
<
< // update the bitrader reader structure
< MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
< MOVQ br_value, bitReaderShifted_value(br1)
< MOVQ br_offset, bitReaderShifted_off(br1)
<
< // const stream = 2
< // br2.fillFast()
< MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
< MOVQ bitReaderShifted_value(br2), br_value
< MOVQ bitReaderShifted_off(br2), br_offset
<
< // We must have at least 2 * max tablelog left
< CMPQ br_bits_read, $64-22
< JBE skip_fill2
<
< SUBQ $32, br_bits_read // b.bitsRead -= 32
< SUBQ $4, br_offset // b.off -= 4
<
< // v := b.in[b.off-4 : b.off]
< // v = v[:4]
< // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
< MOVQ bitReaderShifted_in(br2), AX
---
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> MOVW AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 80(R11)
> MOVB R13, 88(R11)
> ADDQ R9, R8
>
> // br2.fillFast32()
> MOVQ 128(R11), R12
> MOVBQZX 136(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill2
> MOVQ 120(R11), AX
> SUBQ $0x20, R13
> SUBQ $0x04, AX
> MOVQ 96(R11), R14
284,294c153,157
< #ifdef GOAMD64_v3
< SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
<
< #else
< MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
< MOVQ br_bits_read, CX
< SHLQ CL, AX
<
< #endif
<
< ORQ AX, br_value
---
> MOVL (AX)(R14*1), R14
> MOVQ R13, CX
> SHLQ CL, R14
> MOVQ AX, 120(R11)
> ORQ R14, R12
297,299c160,162
< CMPQ br_offset, $4
< SETLT DL
< ORB DL, DH
---
> CMPQ AX, $0x04
> SETLT AL
> ORB AL, DL
301d163
< // }
303d164
<
305,313c166,168
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
<
< #else
< MOVQ br_value, AX
< MOVQ peek_bits, CX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
<
< #endif
---
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
316,319c171
< MOVW 0(table)(AX*2), AX // AX - v0
<
< // br2.advance(uint8(v0.entry))
< MOVB AH, BL // BL = uint8(v0.entry >> 8)
---
> MOVW (R10)(R14*2), CX
321,323c173,176
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
---
> // br2.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
325,327c178,181
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
---
> // val1 := br2.peekTopBits(peekBits)
> MOVQ DI, CX
> MOVQ R12, R14
> SHRQ CL, R14
329c183,184
< #endif
---
> // v1 := table[val1&mask]
> MOVW (R10)(R14*2), CX
331c186,189
< ADDQ CX, br_bits_read // bits_read += n
---
> // br2.advance(uint8(v1.entry))
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
333,334c191,209
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
---
> // these two writes get coalesced
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> MOVW AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 128(R11)
> MOVB R13, 136(R11)
> ADDQ R9, R8
>
> // br3.fillFast32()
> MOVQ 176(R11), R12
> MOVBQZX 184(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill3
> MOVQ 168(R11), AX
> SUBQ $0x20, R13
> SUBQ $0x04, AX
> MOVQ 144(R11), R14
336,340c211,216
< #else
< // val1 := br2.peekTopBits(peekBits)
< MOVQ peek_bits, CX
< MOVQ br_value, AX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
---
> // b.value |= uint64(low) << (b.bitsRead & 63)
> MOVL (AX)(R14*1), R14
> MOVQ R13, CX
> SHLQ CL, R14
> MOVQ AX, 168(R11)
> ORQ R14, R12
342c218,221
< #endif
---
> // exhausted = exhausted || (br3.off < 4)
> CMPQ AX, $0x04
> SETLT AL
> ORB AL, DL
344,345c223,227
< // v1 := table[val1&mask]
< MOVW 0(table)(AX*2), AX // AX - v1
---
> skip_fill3:
> // val0 := br3.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
347,348c229,230
< // br2.advance(uint8(v1.entry))
< MOVB AH, BH // BH = uint8(v1.entry >> 8)
---
> // v0 := table[val0&mask]
> MOVW (R10)(R14*2), CX
350,352c232,235
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
---
> // br3.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
354,356c237,240
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
---
> // val1 := br3.peekTopBits(peekBits)
> MOVQ DI, CX
> MOVQ R12, R14
> SHRQ CL, R14
358c242,243
< #endif
---
> // v1 := table[val1&mask]
> MOVW (R10)(R14*2), CX
360c245,248
< ADDQ CX, br_bits_read // bits_read += n
---
> // br3.advance(uint8(v1.entry))
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
363,388c251,265
< // buf[stream][off] = uint8(v0.entry >> 8)
< // buf[stream][off+1] = uint8(v1.entry >> 8)
< MOVW BX, 512(buffer)(off*1)
<
< // update the bitrader reader structure
< MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
< MOVQ br_value, bitReaderShifted_value(br2)
< MOVQ br_offset, bitReaderShifted_off(br2)
<
< // const stream = 3
< // br3.fillFast()
< MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
< MOVQ bitReaderShifted_value(br3), br_value
< MOVQ bitReaderShifted_off(br3), br_offset
<
< // We must have at least 2 * max tablelog left
< CMPQ br_bits_read, $64-22
< JBE skip_fill3
<
< SUBQ $32, br_bits_read // b.bitsRead -= 32
< SUBQ $4, br_offset // b.off -= 4
<
< // v := b.in[b.off-4 : b.off]
< // v = v[:4]
< // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
< MOVQ bitReaderShifted_in(br3), AX
---
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> MOVW AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 176(R11)
> MOVB R13, 184(R11)
> ADDQ $0x02, SI
> TESTB DL, DL
> JZ main_loop
> MOVQ ctx+0(FP), AX
> SUBQ 16(AX), SI
> SHLQ $0x02, SI
> MOVQ SI, 40(AX)
> RET
390,392c267,278
< // b.value |= uint64(low) << (b.bitsRead & 63)
< #ifdef GOAMD64_v3
< SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
---
> // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
> TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
> XORQ DX, DX
>
> // Preload values
> MOVQ ctx+0(FP), CX
> MOVBQZX 8(CX), DI
> MOVQ 16(CX), BX
> MOVQ 48(CX), SI
> MOVQ 24(CX), R9
> MOVQ 32(CX), R10
> MOVQ (CX), R11
394,397c280,294
< #else
< MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
< MOVQ br_bits_read, CX
< SHLQ CL, AX
---
> // Main loop
> main_loop:
> MOVQ BX, R8
> CMPQ R8, SI
> SETGE DL
>
> // br0.fillFast32()
> MOVQ 32(R11), R12
> MOVBQZX 40(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill0
> MOVQ 24(R11), R14
> SUBQ $0x20, R13
> SUBQ $0x04, R14
> MOVQ (R11), R15
399c296,301
< #endif
---
> // b.value |= uint64(low) << (b.bitsRead & 63)
> MOVL (R14)(R15*1), R15
> MOVQ R13, CX
> SHLQ CL, R15
> MOVQ R14, 24(R11)
> ORQ R15, R12
401c303,306
< ORQ AX, br_value
---
> // exhausted = exhausted || (br0.off < 4)
> CMPQ R14, $0x04
> SETLT AL
> ORB AL, DL
403,406c308,312
< // exhausted = exhausted || (br3.off < 4)
< CMPQ br_offset, $4
< SETLT DL
< ORB DL, DH
---
> skip_fill0:
> // val0 := br0.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
408,409c314,315
< // }
< skip_fill3:
---
> // v0 := table[val0&mask]
> MOVW (R10)(R14*2), CX
411,413c317,320
< // val0 := br3.peekTopBits(peekBits)
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
---
> // br0.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
415,418c322,383
< #else
< MOVQ br_value, AX
< MOVQ peek_bits, CX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
---
> // val1 := br0.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v1 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br0.advance(uint8(v1.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // val2 := br0.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v2 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br0.advance(uint8(v2.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
>
> // val3 := br0.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v3 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br0.advance(uint8(v3.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // these four writes get coalesced
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
> // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
> MOVL AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 32(R11)
> MOVB R13, 40(R11)
> ADDQ R9, R8
>
> // br1.fillFast32()
> MOVQ 80(R11), R12
> MOVBQZX 88(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill1
> MOVQ 72(R11), R14
> SUBQ $0x20, R13
> SUBQ $0x04, R14
> MOVQ 48(R11), R15
420c385,390
< #endif
---
> // b.value |= uint64(low) << (b.bitsRead & 63)
> MOVL (R14)(R15*1), R15
> MOVQ R13, CX
> SHLQ CL, R15
> MOVQ R14, 72(R11)
> ORQ R15, R12
422,423c392,395
< // v0 := table[val0&mask]
< MOVW 0(table)(AX*2), AX // AX - v0
---
> // exhausted = exhausted || (br1.off < 4)
> CMPQ R14, $0x04
> SETLT AL
> ORB AL, DL
425,426c397,401
< // br3.advance(uint8(v0.entry))
< MOVB AH, BL // BL = uint8(v0.entry >> 8)
---
> skip_fill1:
> // val0 := br1.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
428,430c403,404
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
---
> // v0 := table[val0&mask]
> MOVW (R10)(R14*2), CX
432,434c406,409
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
---
> // br1.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
436c411,472
< #endif
---
> // val1 := br1.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v1 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br1.advance(uint8(v1.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // val2 := br1.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v2 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br1.advance(uint8(v2.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
>
> // val3 := br1.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v3 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br1.advance(uint8(v3.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // these four writes get coalesced
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
> // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
> MOVL AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 80(R11)
> MOVB R13, 88(R11)
> ADDQ R9, R8
>
> // br2.fillFast32()
> MOVQ 128(R11), R12
> MOVBQZX 136(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill2
> MOVQ 120(R11), R14
> SUBQ $0x20, R13
> SUBQ $0x04, R14
> MOVQ 96(R11), R15
438c474,479
< ADDQ CX, br_bits_read // bits_read += n
---
> // b.value |= uint64(low) << (b.bitsRead & 63)
> MOVL (R14)(R15*1), R15
> MOVQ R13, CX
> SHLQ CL, R15
> MOVQ R14, 120(R11)
> ORQ R15, R12
440,441c481,484
< #ifdef GOAMD64_v3
< SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
---
> // exhausted = exhausted || (br2.off < 4)
> CMPQ R14, $0x04
> SETLT AL
> ORB AL, DL
443,447c486,490
< #else
< // val1 := br3.peekTopBits(peekBits)
< MOVQ peek_bits, CX
< MOVQ br_value, AX
< SHRQ CL, AX // AX = (value >> peek_bits) & mask
---
> skip_fill2:
> // val0 := br2.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
449c492,493
< #endif
---
> // v0 := table[val0&mask]
> MOVW (R10)(R14*2), CX
451,452c495,498
< // v1 := table[val1&mask]
< MOVW 0(table)(AX*2), AX // AX - v1
---
> // br2.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
454,455c500,561
< // br3.advance(uint8(v1.entry))
< MOVB AH, BH // BH = uint8(v1.entry >> 8)
---
> // val1 := br2.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v1 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br2.advance(uint8(v1.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // val2 := br2.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v2 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br2.advance(uint8(v2.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
>
> // val3 := br2.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v3 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br2.advance(uint8(v3.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // these four writes get coalesced
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
> // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
> MOVL AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 128(R11)
> MOVB R13, 136(R11)
> ADDQ R9, R8
>
> // br3.fillFast32()
> MOVQ 176(R11), R12
> MOVBQZX 184(R11), R13
> CMPQ R13, $0x20
> JBE skip_fill3
> MOVQ 168(R11), R14
> SUBQ $0x20, R13
> SUBQ $0x04, R14
> MOVQ 144(R11), R15
457,459c563,568
< #ifdef GOAMD64_v3
< MOVBQZX AL, CX
< SHLXQ AX, br_value, br_value // value <<= n
---
> // b.value |= uint64(low) << (b.bitsRead & 63)
> MOVL (R14)(R15*1), R15
> MOVQ R13, CX
> SHLQ CL, R15
> MOVQ R14, 168(R11)
> ORQ R15, R12
461,463c570,573
< #else
< MOVBQZX AL, CX
< SHLQ CL, br_value // value <<= n
---
> // exhausted = exhausted || (br3.off < 4)
> CMPQ R14, $0x04
> SETLT AL
> ORB AL, DL
465c575,579
< #endif
---
> skip_fill3:
> // val0 := br3.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
467c581,582
< ADDQ CX, br_bits_read // bits_read += n
---
> // v0 := table[val0&mask]
> MOVW (R10)(R14*2), CX
469,472c584,587
< // these two writes get coalesced
< // buf[stream][off] = uint8(v0.entry >> 8)
< // buf[stream][off+1] = uint8(v1.entry >> 8)
< MOVW BX, 768(buffer)(off*1)
---
> // br3.advance(uint8(v0.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
474,477c589,647
< // update the bitrader reader structure
< MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
< MOVQ br_value, bitReaderShifted_value(br3)
< MOVQ br_offset, bitReaderShifted_off(br3)
---
> // val1 := br3.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v1 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br3.advance(uint8(v1.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // val2 := br3.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v2 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br3.advance(uint8(v2.entry)
> MOVB CH, AH
> SHLQ CL, R12
> ADDB CL, R13
>
> // val3 := br3.peekTopBits(peekBits)
> MOVQ R12, R14
> MOVQ DI, CX
> SHRQ CL, R14
>
> // v3 := table[val0&mask]
> MOVW (R10)(R14*2), CX
>
> // br3.advance(uint8(v3.entry)
> MOVB CH, AL
> SHLQ CL, R12
> ADDB CL, R13
> BSWAPL AX
>
> // these four writes get coalesced
> // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
> // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
> // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
> // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
> MOVL AX, (R8)
>
> // update the bitreader structure
> MOVQ R12, 176(R11)
> MOVB R13, 184(R11)
> ADDQ $0x04, BX
> TESTB DL, DL
> JZ main_loop
> MOVQ ctx+0(FP), AX
> SUBQ 16(AX), BX
> SHLQ $0x02, BX
> MOVQ BX, 40(AX)
> RET
479c649,664
< ADDQ $2, off // off += 2
---
> // func decompress1x_main_loop_amd64(ctx *decompress1xContext)
> TEXT ·decompress1x_main_loop_amd64(SB), $0-8
> MOVQ ctx+0(FP), CX
> MOVQ 16(CX), DX
> MOVQ 24(CX), BX
> CMPQ BX, $0x04
> JB error_max_decoded_size_exeeded
> LEAQ (DX)(BX*1), BX
> MOVQ (CX), SI
> MOVQ (SI), R8
> MOVQ 24(SI), R9
> MOVQ 32(SI), R10
> MOVBQZX 40(SI), R11
> MOVQ 32(CX), SI
> MOVBQZX 8(CX), DI
> JMP loop_condition
481,482c666,744
< TESTB DH, DH // any br[i].ofs < 4?
< JNZ end
---
> main_loop:
> // Check if we have room for 4 bytes in the output buffer
> LEAQ 4(DX), CX
> CMPQ CX, BX
> JGE error_max_decoded_size_exeeded
>
> // Decode 4 values
> CMPQ R11, $0x20
> JL bitReader_fillFast_1_end
> SUBQ $0x20, R11
> SUBQ $0x04, R9
> MOVL (R8)(R9*1), R12
> MOVQ R11, CX
> SHLQ CL, R12
> ORQ R12, R10
>
> bitReader_fillFast_1_end:
> MOVQ DI, CX
> MOVQ R10, R12
> SHRQ CL, R12
> MOVW (SI)(R12*2), CX
> MOVB CH, AL
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLQ CL, R10
> MOVQ DI, CX
> MOVQ R10, R12
> SHRQ CL, R12
> MOVW (SI)(R12*2), CX
> MOVB CH, AH
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLQ CL, R10
> BSWAPL AX
> CMPQ R11, $0x20
> JL bitReader_fillFast_2_end
> SUBQ $0x20, R11
> SUBQ $0x04, R9
> MOVL (R8)(R9*1), R12
> MOVQ R11, CX
> SHLQ CL, R12
> ORQ R12, R10
>
> bitReader_fillFast_2_end:
> MOVQ DI, CX
> MOVQ R10, R12
> SHRQ CL, R12
> MOVW (SI)(R12*2), CX
> MOVB CH, AH
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLQ CL, R10
> MOVQ DI, CX
> MOVQ R10, R12
> SHRQ CL, R12
> MOVW (SI)(R12*2), CX
> MOVB CH, AL
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLQ CL, R10
> BSWAPL AX
>
> // Store the decoded values
> MOVL AX, (DX)
> ADDQ $0x04, DX
>
> loop_condition:
> CMPQ R9, $0x08
> JGE main_loop
>
> // Update ctx structure
> MOVQ ctx+0(FP), AX
> SUBQ 16(AX), DX
> MOVQ DX, 40(AX)
> MOVQ (AX), AX
> MOVQ R9, 24(AX)
> MOVQ R10, 32(AX)
> MOVB R11, 40(AX)
> RET
484,485c746,751
< CMPQ off, $bufoff
< JL main_loop
---
> // Report error
> error_max_decoded_size_exeeded:
> MOVQ ctx+0(FP), AX
> MOVQ $-1, CX
> MOVQ CX, 40(AX)
> RET
487,488c753,769
< end:
< MOVQ 0(SP), BP
---
> // func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
> // Requires: BMI2
> TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
> MOVQ ctx+0(FP), CX
> MOVQ 16(CX), DX
> MOVQ 24(CX), BX
> CMPQ BX, $0x04
> JB error_max_decoded_size_exeeded
> LEAQ (DX)(BX*1), BX
> MOVQ (CX), SI
> MOVQ (SI), R8
> MOVQ 24(SI), R9
> MOVQ 32(SI), R10
> MOVBQZX 40(SI), R11
> MOVQ 32(CX), SI
> MOVBQZX 8(CX), DI
> JMP loop_condition
490c771,838
< MOVB off, ret+56(FP)
---
> main_loop:
> // Check if we have room for 4 bytes in the output buffer
> LEAQ 4(DX), CX
> CMPQ CX, BX
> JGE error_max_decoded_size_exeeded
>
> // Decode 4 values
> CMPQ R11, $0x20
> JL bitReader_fillFast_1_end
> SUBQ $0x20, R11
> SUBQ $0x04, R9
> MOVL (R8)(R9*1), CX
> SHLXQ R11, CX, CX
> ORQ CX, R10
>
> bitReader_fillFast_1_end:
> SHRXQ DI, R10, CX
> MOVW (SI)(CX*2), CX
> MOVB CH, AL
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLXQ CX, R10, R10
> SHRXQ DI, R10, CX
> MOVW (SI)(CX*2), CX
> MOVB CH, AH
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLXQ CX, R10, R10
> BSWAPL AX
> CMPQ R11, $0x20
> JL bitReader_fillFast_2_end
> SUBQ $0x20, R11
> SUBQ $0x04, R9
> MOVL (R8)(R9*1), CX
> SHLXQ R11, CX, CX
> ORQ CX, R10
>
> bitReader_fillFast_2_end:
> SHRXQ DI, R10, CX
> MOVW (SI)(CX*2), CX
> MOVB CH, AH
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLXQ CX, R10, R10
> SHRXQ DI, R10, CX
> MOVW (SI)(CX*2), CX
> MOVB CH, AL
> MOVBQZX CL, CX
> ADDQ CX, R11
> SHLXQ CX, R10, R10
> BSWAPL AX
>
> // Store the decoded values
> MOVL AX, (DX)
> ADDQ $0x04, DX
>
> loop_condition:
> CMPQ R9, $0x08
> JGE main_loop
>
> // Update ctx structure
> MOVQ ctx+0(FP), AX
> SUBQ 16(AX), DX
> MOVQ DX, 40(AX)
> MOVQ (AX), AX
> MOVQ R9, 24(AX)
> MOVQ R10, 32(AX)
> MOVB R11, 40(AX)
493,506c841,846
< #undef off
< #undef buffer
< #undef table
<
< #undef br_bits_read
< #undef br_value
< #undef br_offset
< #undef peek_bits
< #undef exhausted
<
< #undef br0
< #undef br1
< #undef br2
< #undef br3
---
> // Report error
> error_max_decoded_size_exeeded:
> MOVQ ctx+0(FP), AX
> MOVQ $-1, CX
> MOVQ CX, 40(AX)
> RET
Only in a/vendor/github.com/klauspost/compress/huff0: decompress_amd64.s.in
diff -r --color a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
125,130d124
< copy(out, buf[0][:])
< copy(out[dstEvery:], buf[1][:])
< copy(out[dstEvery*2:], buf[2][:])
< copy(out[dstEvery*3:], buf[3][:])
< out = out[bufoff:]
< decoded += bufoff * 4
132c126
< if len(out) < dstEvery*3 {
---
> if len(out)-bufoff < dstEvery*3 {
135a130,139
> //copy(out, buf[0][:])
> //copy(out[dstEvery:], buf[1][:])
> //copy(out[dstEvery*2:], buf[2][:])
> //copy(out[dstEvery*3:], buf[3][:])
> *(*[bufoff]byte)(out) = buf[0]
> *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
> *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
> *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
> out = out[bufoff:]
> decoded += bufoff * 4
192a197,298
> }
>
> // Decompress1X will decompress a 1X encoded stream.
> // The cap of the output buffer will be the maximum decompressed size.
> // The length of the supplied input must match the end of a block exactly.
> func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
> if len(d.dt.single) == 0 {
> return nil, errors.New("no table loaded")
> }
> if use8BitTables && d.actualTableLog <= 8 {
> return d.decompress1X8Bit(dst, src)
> }
> var br bitReaderShifted
> err := br.init(src)
> if err != nil {
> return dst, err
> }
> maxDecodedSize := cap(dst)
> dst = dst[:0]
>
> // Avoid bounds check by always having full sized table.
> const tlSize = 1 << tableLogMax
> const tlMask = tlSize - 1
> dt := d.dt.single[:tlSize]
>
> // Use temp table to avoid bound checks/append penalty.
> bufs := d.buffer()
> buf := &bufs[0]
> var off uint8
>
> for br.off >= 8 {
> br.fillFast()
> v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
> br.advance(uint8(v.entry))
> buf[off+0] = uint8(v.entry >> 8)
>
> v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
> br.advance(uint8(v.entry))
> buf[off+1] = uint8(v.entry >> 8)
>
> // Refill
> br.fillFast()
>
> v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
> br.advance(uint8(v.entry))
> buf[off+2] = uint8(v.entry >> 8)
>
> v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
> br.advance(uint8(v.entry))
> buf[off+3] = uint8(v.entry >> 8)
>
> off += 4
> if off == 0 {
> if len(dst)+256 > maxDecodedSize {
> br.close()
> d.bufs.Put(bufs)
> return nil, ErrMaxDecodedSizeExceeded
> }
> dst = append(dst, buf[:]...)
> }
> }
>
> if len(dst)+int(off) > maxDecodedSize {
> d.bufs.Put(bufs)
> br.close()
> return nil, ErrMaxDecodedSizeExceeded
> }
> dst = append(dst, buf[:off]...)
>
> // br < 8, so uint8 is fine
> bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
> for bitsLeft > 0 {
> br.fill()
> if false && br.bitsRead >= 32 {
> if br.off >= 4 {
> v := br.in[br.off-4:]
> v = v[:4]
> low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
> br.value = (br.value << 32) | uint64(low)
> br.bitsRead -= 32
> br.off -= 4
> } else {
> for br.off > 0 {
> br.value = (br.value << 8) | uint64(br.in[br.off-1])
> br.bitsRead -= 8
> br.off--
> }
> }
> }
> if len(dst) >= maxDecodedSize {
> d.bufs.Put(bufs)
> br.close()
> return nil, ErrMaxDecodedSizeExceeded
> }
> v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
> nBits := uint8(v.entry)
> br.advance(nBits)
> bitsLeft -= nBits
> dst = append(dst, uint8(v.entry>>8))
> }
> d.bufs.Put(bufs)
> return dst, br.close()
diff -r --color a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
14d13
< double []dEntryDouble
22,28d20
< // double-symbols decoding
< type dEntryDouble struct {
< seq [4]byte
< nBits uint8
< len uint8
< }
<
38c30
< s, err = s.prepare(in)
---
> s, err = s.prepare(nil)
239,340d230
< // Decompress1X will decompress a 1X encoded stream.
< // The cap of the output buffer will be the maximum decompressed size.
< // The length of the supplied input must match the end of a block exactly.
< func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
< if len(d.dt.single) == 0 {
< return nil, errors.New("no table loaded")
< }
< if use8BitTables && d.actualTableLog <= 8 {
< return d.decompress1X8Bit(dst, src)
< }
< var br bitReaderShifted
< err := br.init(src)
< if err != nil {
< return dst, err
< }
< maxDecodedSize := cap(dst)
< dst = dst[:0]
<
< // Avoid bounds check by always having full sized table.
< const tlSize = 1 << tableLogMax
< const tlMask = tlSize - 1
< dt := d.dt.single[:tlSize]
<
< // Use temp table to avoid bound checks/append penalty.
< bufs := d.buffer()
< buf := &bufs[0]
< var off uint8
<
< for br.off >= 8 {
< br.fillFast()
< v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
< br.advance(uint8(v.entry))
< buf[off+0] = uint8(v.entry >> 8)
<
< v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
< br.advance(uint8(v.entry))
< buf[off+1] = uint8(v.entry >> 8)
<
< // Refill
< br.fillFast()
<
< v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
< br.advance(uint8(v.entry))
< buf[off+2] = uint8(v.entry >> 8)
<
< v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
< br.advance(uint8(v.entry))
< buf[off+3] = uint8(v.entry >> 8)
<
< off += 4
< if off == 0 {
< if len(dst)+256 > maxDecodedSize {
< br.close()
< d.bufs.Put(bufs)
< return nil, ErrMaxDecodedSizeExceeded
< }
< dst = append(dst, buf[:]...)
< }
< }
<
< if len(dst)+int(off) > maxDecodedSize {
< d.bufs.Put(bufs)
< br.close()
< return nil, ErrMaxDecodedSizeExceeded
< }
< dst = append(dst, buf[:off]...)
<
< // br < 8, so uint8 is fine
< bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
< for bitsLeft > 0 {
< br.fill()
< if false && br.bitsRead >= 32 {
< if br.off >= 4 {
< v := br.in[br.off-4:]
< v = v[:4]
< low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
< br.value = (br.value << 32) | uint64(low)
< br.bitsRead -= 32
< br.off -= 4
< } else {
< for br.off > 0 {
< br.value = (br.value << 8) | uint64(br.in[br.off-1])
< br.bitsRead -= 8
< br.off--
< }
< }
< }
< if len(dst) >= maxDecodedSize {
< d.bufs.Put(bufs)
< br.close()
< return nil, ErrMaxDecodedSizeExceeded
< }
< v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
< nBits := uint8(v.entry)
< br.advance(nBits)
< bitsLeft -= nBits
< dst = append(dst, uint8(v.entry>>8))
< }
< d.bufs.Put(bufs)
< return dst, br.close()
< }
<
876,881d765
< copy(out, buf[0][:])
< copy(out[dstEvery:], buf[1][:])
< copy(out[dstEvery*2:], buf[2][:])
< copy(out[dstEvery*3:], buf[3][:])
< out = out[bufoff:]
< decoded += bufoff * 4
883c767
< if len(out) < dstEvery*3 {
---
> if len(out)-bufoff < dstEvery*3 {
886a771,779
> //copy(out, buf[0][:])
> //copy(out[dstEvery:], buf[1][:])
> //copy(out[dstEvery*2:], buf[2][:])
> *(*[bufoff]byte)(out) = buf[0]
> *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
> *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
> *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
> out = out[bufoff:]
> decoded += bufoff * 4
998d890
< const tlMask = tlSize - 1
1111,1116d1002
< copy(out, buf[0][:])
< copy(out[dstEvery:], buf[1][:])
< copy(out[dstEvery*2:], buf[2][:])
< copy(out[dstEvery*3:], buf[3][:])
< out = out[bufoff:]
< decoded += bufoff * 4
1118c1004
< if len(out) < dstEvery*3 {
---
> if len(out)-bufoff < dstEvery*3 {
1121a1008,1018
>
> //copy(out, buf[0][:])
> //copy(out[dstEvery:], buf[1][:])
> //copy(out[dstEvery*2:], buf[2][:])
> // copy(out[dstEvery*3:], buf[3][:])
> *(*[bufoff]byte)(out) = buf[0]
> *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
> *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
> *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
> out = out[bufoff:]
> decoded += bufoff * 4
Only in b/vendor/github.com/klauspost/compress/internal: cpuinfo
diff -r --color a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
20a21
> //
44a46
> //
91a94
> //
107a111
> //
109c113
< // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
---
> // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
diff -r --color a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
19a20,91
> * July 21, 2022 (v1.15.9)
>
> * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
> * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
> * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
>
> * July 13, 2022 (v1.15.8)
>
> * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
> * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
> * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
> * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
> * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
> * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
> * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
>
> * June 29, 2022 (v1.15.7)
>
> * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
> * zip: Merge upstream https://github.com/klauspost/compress/pull/631
> * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
> * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
> * flate: Faster histograms https://github.com/klauspost/compress/pull/620
> * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
>
> * June 3, 2022 (v1.15.6)
> * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
> * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
> * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
> * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
> * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
> * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
> * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
> * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
> * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
> * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
>
> * May 25, 2022 (v1.15.5)
> * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
> * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
> * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
> * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
> * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
> * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
> * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
> * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
> * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
>
>
> * May 11, 2022 (v1.15.4)
> * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
> * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
> * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
> * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
>
> * May 5, 2022 (v1.15.3)
> * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
> * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
>
> * Apr 26, 2022 (v1.15.2)
> * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
> * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
> * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
> * Minimum version is Go 1.16, added CI test on 1.18.
>
> * Mar 11, 2022 (v1.15.1)
> * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
> * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
> * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
> * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
> * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
>
62a135,137
> <details>
> <summary>See changes to v1.13.x</summary>
>
90a166,167
> </details>
>
diff -r --color a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
66,72d65
< func (b *bitReader) get16BitsFast(n uint8) uint16 {
< const regMask = 64 - 1
< v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
< b.bitsRead += n
< return v
< }
<
diff -r --color a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
8,9d7
< import "fmt"
<
74,147d71
< }
<
< // flush will flush all pending full bytes.
< // There will be at least 56 bits available for writing when this has been called.
< // Using flush32 is faster, but leaves less space for writing.
< func (b *bitWriter) flush() {
< v := b.nBits >> 3
< switch v {
< case 0:
< case 1:
< b.out = append(b.out,
< byte(b.bitContainer),
< )
< case 2:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< )
< case 3:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< )
< case 4:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< )
< case 5:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< )
< case 6:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< byte(b.bitContainer>>40),
< )
< case 7:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< byte(b.bitContainer>>40),
< byte(b.bitContainer>>48),
< )
< case 8:
< b.out = append(b.out,
< byte(b.bitContainer),
< byte(b.bitContainer>>8),
< byte(b.bitContainer>>16),
< byte(b.bitContainer>>24),
< byte(b.bitContainer>>32),
< byte(b.bitContainer>>40),
< byte(b.bitContainer>>48),
< byte(b.bitContainer>>56),
< )
< default:
< panic(fmt.Errorf("bits (%d) > 64", b.nBits))
< }
< b.bitContainer >>= v << 3
< b.nBits &= 7
diff -r --color a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
7a8,9
> "bytes"
> "encoding/binary"
10a13,14
> "os"
> "path/filepath"
40a45,47
> compressedBlockOverAlloc = 16
> maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
>
44,48c51,52
< // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
< maxCompressedLiteralSize = 1 << 18
< maxRLELiteralSize = 1 << 20
< maxMatchLen = 131074
< maxSequences = 0x7f00 + 0xffff
---
> maxMatchLen = 131074
> maxSequences = 0x7f00 + 0xffff
100d103
< tmp [4]byte
139c142
< maxSize := maxBlockSize
---
> maxSize := maxCompressedBlockSizeAlloc
160c163
< maxSize = maxCompressedBlockSize
---
> maxSize = maxCompressedBlockSizeAlloc
162c165
< maxSize = int(windowSize)
---
> maxSize = int(windowSize) + compressedBlockOverAlloc
193c196
< b.dataStorage = make([]byte, 0, cSize)
---
> b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
195c198
< b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
---
> b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
363c366
< b.literalBuf = make([]byte, litRegenSize)
---
> b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
365,370c368
< if litRegenSize > maxCompressedLiteralSize {
< // Exceptional
< b.literalBuf = make([]byte, litRegenSize)
< } else {
< b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
< }
---
> b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
400c398
< b.literalBuf = make([]byte, 0, litRegenSize)
---
> b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
402c400
< b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
---
> b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
407c405
< huff.MaxDecodedSize = maxCompressedBlockSize
---
> huff.MaxDecodedSize = litRegenSize
432c430
< b.literalBuf = make([]byte, 0, litRegenSize)
---
> b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
434c432
< b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
---
> b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
451c449
< huff.MaxDecodedSize = maxCompressedBlockSize
---
> huff.MaxDecodedSize = litRegenSize
465a464,465
> // Re-cap to get extra size.
> literals = b.literalBuf[:len(literals)]
489c489,490
< err = hist.decoders.decodeSync(hist)
---
> before := len(hist.decoders.out)
> err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
492a494,497
> if hist.decoders.maxSyncLen > 0 {
> hist.decoders.maxSyncLen += uint64(before)
> hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
> }
634a640,655
> // Extract blocks...
> if false && hist.dict == nil {
> fatalErr := func(err error) {
> if err != nil {
> panic(err)
> }
> }
> fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
> var buf bytes.Buffer
> fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
> fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
> fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
> buf.Write(in)
> os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
> }
>
652a674
>
diff -r --color a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
10d9
< "io/ioutil"
26c25
< skipN(n int) error
---
> skipN(n int64) error
55,58d53
< func (b *byteBuf) remain() []byte {
< return *b
< }
<
69c64
< func (b *byteBuf) skipN(n int) error {
---
> func (b *byteBuf) skipN(n int64) error {
71c66,69
< if len(bb) < n {
---
> if n < 0 {
> return fmt.Errorf("negative skip (%d) requested", n)
> }
> if int64(len(bb)) < n {
127,129c125,127
< func (r *readerWrapper) skipN(n int) error {
< n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
< if n2 != int64(n) {
---
> func (r *readerWrapper) skipN(n int64) error {
> n2, err := io.CopyN(io.Discard, r.r, n)
> if n2 != n {
diff -r --color a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
16,21d15
< // init will initialize the reader and set the input.
< func (b *byteReader) init(in []byte) {
< b.b = in
< b.off = 0
< }
<
diff -r --color a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
314a315
> initialSize := len(dst)
350,352c351,355
<
< if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
< return dst, ErrDecoderSizeExceeded
---
> if frame.WindowSize > d.o.maxWindowSize {
> if debugDecoder {
> println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
> }
> return dst, ErrWindowSizeExceeded
354,355c357,369
< if frame.FrameContentSize < 1<<30 {
< // Never preallocate more than 1 GB up front.
---
> if frame.FrameContentSize != fcsUnknown {
> if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
> if debugDecoder {
> println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
> }
> return dst, ErrDecoderSizeExceeded
> }
> if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
> if debugDecoder {
> println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
> }
> return dst, ErrDecoderSizeExceeded
> }
357c371
< dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
---
> dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
362c376,377
< if cap(dst) == 0 {
---
>
> if cap(dst) == 0 && !d.o.limitToCap {
379a395,397
> if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
> return dst, ErrDecoderSizeExceeded
> }
440c458
< if len(next.b) > 0 {
---
> if !d.o.ignoreChecksum && len(next.b) > 0 {
452c470
< if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
---
> if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
536c554,556
< d.frame.crc.Write(d.current.b)
---
> if !d.o.ignoreChecksum {
> d.frame.crc.Write(d.current.b)
> }
538c558,562
< d.current.err = d.frame.checkCRC()
---
> if !d.o.ignoreChecksum {
> d.current.err = d.frame.checkCRC()
> } else {
> d.current.err = d.frame.consumeCRC()
> }
632,636c656,659
< // Spawn 4 go routines.
< // 0: Read frames and decode blocks.
< // 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
< // 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
< // 3: Wait for stream history, execute sequences, send stream history.
---
> // Spawn 3 go routines.
> // 0: Read frames and decode block literals.
> // 1: Decode sequences.
> // 2: Execute sequences, send to output.
641d663
< var seqPrepare = make(chan *blockDec, d.o.concurrent)
645,685c667
< // Async 1: Prepare blocks...
< go func() {
< var hist history
< var hasErr bool
< for block := range seqPrepare {
< if hasErr {
< if block != nil {
< seqDecode <- block
< }
< continue
< }
< if block.async.newHist != nil {
< if debugDecoder {
< println("Async 1: new history")
< }
< hist.reset()
< if block.async.newHist.dict != nil {
< hist.setDict(block.async.newHist.dict)
< }
< }
< if block.err != nil || block.Type != blockTypeCompressed {
< hasErr = block.err != nil
< seqDecode <- block
< continue
< }
<
< remain, err := block.decodeLiterals(block.data, &hist)
< block.err = err
< hasErr = block.err != nil
< if err == nil {
< block.async.literals = hist.decoders.literals
< block.async.seqData = remain
< } else if debugDecoder {
< println("decodeLiterals error:", err)
< }
< seqDecode <- block
< }
< close(seqDecode)
< }()
<
< // Async 2: Decode sequences...
---
> // Async 1: Decode sequences...
699c681
< println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
---
> println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
753c735
< println("Async 3: new history")
---
> println("Async 2: new history")
839a822,848
> var hist history
> var hasErr bool
>
> decodeBlock := func(block *blockDec) {
> if hasErr {
> if block != nil {
> seqDecode <- block
> }
> return
> }
> if block.err != nil || block.Type != blockTypeCompressed {
> hasErr = block.err != nil
> seqDecode <- block
> return
> }
>
> remain, err := block.decodeLiterals(block.data, &hist)
> block.err = err
> hasErr = block.err != nil
> if err == nil {
> block.async.literals = hist.decoders.literals
> block.async.seqData = remain
> } else if debugDecoder {
> println("decodeLiterals error:", err)
> }
> seqDecode <- block
> }
858a868,871
> if debugDecoder {
> println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
> }
>
866c879
< seqPrepare <- dec
---
> decodeBlock(dec)
885a899,902
> hist.reset()
> if h.dict != nil {
> hist.setDict(h.dict)
> }
912c929
< seqPrepare <- dec
---
> decodeBlock(dec)
921c938
< close(seqPrepare)
---
> close(seqDecode)
diff -r --color a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
21a22,23
> ignoreChecksum bool
> limitToCap bool
34c36
< o.maxDecodedSize = 1 << 63
---
> o.maxDecodedSize = 64 << 30
69c71
< // Maximum and default is 1 << 63 bytes.
---
> // Maximum is 1 << 63 bytes. Default is 64GiB.
111a114,132
> return nil
> }
> }
>
> // WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
> // or any size set in WithDecoderMaxMemory.
> // This can be used to limit decoding to a specific maximum output size.
> // Disabled by default.
> func WithDecodeAllCapLimit(b bool) DOption {
> return func(o *decoderOptions) error {
> o.limitToCap = b
> return nil
> }
> }
>
> // IgnoreChecksum allows to forcibly ignore checksum checking.
> func IgnoreChecksum(b bool) DOption {
> return func(o *decoderOptions) error {
> o.ignoreChecksum = b
diff -r --color a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
159d158
< nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
160a160
> nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
418a419,424
> // Allow some bytes at the beginning to mismatch.
> // Sweet spot is around 3 bytes, but depends on input.
> // The skipped bytes are tested in Extend backwards,
> // and still picked up as part of the match if they do.
> const skipBeginning = 3
>
420c426,427
< cv := load3232(src, s)
---
> s2 := s + skipBeginning
> cv := load3232(src, s2)
422,423c429,430
< coffsetL := candidateL.offset - e.cur - matched
< if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
---
> coffsetL := candidateL.offset - e.cur - matched + skipBeginning
> if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
425c432
< matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
---
> matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
427a435
> s = s2
437,438c445,446
< coffsetL = candidateL.prev - e.cur - matched
< if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
---
> coffsetL = candidateL.prev - e.cur - matched + skipBeginning
> if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
440c448
< matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
---
> matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
442a451
> s = s2
521d529
< nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
522a531
> nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
677d685
< nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
678a687
> nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
1050d1058
< nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
1051a1060
> nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
diff -r --color a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
130d129
< nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
131a131
> nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
442d441
< nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
443a443
> nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
788d787
< nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
789a789
> nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
972c972
< longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
---
> longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
1005d1004
< nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
1006a1006
> nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
1106c1106,1107
< copy(e.longTable[:], e.dictLongTable)
---
> //copy(e.longTable[:], e.dictLongTable)
> e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
1117c1118,1120
< copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
---
> // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
> *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
>
diff -r --color a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
874c874,875
< copy(e.table[:], e.dictTable)
---
> //copy(e.table[:], e.dictTable)
> e.table = *(*[tableSize]tableEntry)(e.dictTable)
886c887,888
< copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
---
> //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
> *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
diff -r --color a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
531,532c531,532
< // Use single segments when above minimum window and below 1MB.
< single := len(src) < 1<<20 && len(src) > MinWindowSize
---
> // Use single segments when above minimum window and below window size.
> single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
554c554
< if len(src) <= maxCompressedBlockSize {
---
> if len(src) <= e.o.blockSize {
diff -r --color a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
286c286
< // If this is not specified, block encodes will automatically choose this based on the input size.
---
> // If this is not specified, block encodes will automatically choose this based on the input size and the window size.
diff -r --color a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
109c109
< err = br.skipN(int(n))
---
> err = br.skipN(int64(n))
233a234,240
> if d.WindowSize > d.o.maxWindowSize {
> if debugDecoder {
> printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
> }
> return ErrWindowSizeExceeded
> }
>
240,244c247,251
< }
<
< if d.WindowSize > uint64(d.o.maxWindowSize) {
< if debugDecoder {
< printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
---
> if d.WindowSize > d.o.maxDecodedSize {
> if debugDecoder {
> printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
> }
> return ErrDecoderSizeExceeded
246d252
< return ErrWindowSizeExceeded
247a254
>
256c263,264
< if d.o.lowMem && d.history.windowSize < maxBlockSize {
---
> if !d.o.lowMem || d.history.windowSize < maxBlockSize {
> // Alloc 2x window size if not low-mem, or very small window size.
258d265
< // TODO: Maybe use FrameContent size
259a267
> // Alloc with one additional block
293,299d300
< var tmp [4]byte
< got := d.crc.Sum64()
< // Flip to match file order.
< tmp[0] = byte(got >> 0)
< tmp[1] = byte(got >> 8)
< tmp[2] = byte(got >> 16)
< tmp[3] = byte(got >> 24)
308c309,321
< if !bytes.Equal(tmp[:], want) && !ignoreCRC {
---
> if d.o.ignoreChecksum {
> return nil
> }
>
> var tmp [4]byte
> got := d.crc.Sum64()
> // Flip to match file order.
> tmp[0] = byte(got >> 0)
> tmp[1] = byte(got >> 8)
> tmp[2] = byte(got >> 16)
> tmp[3] = byte(got >> 24)
>
> if !bytes.Equal(tmp[:], want) {
319a333,345
> // consumeCRC reads the checksum data if the frame has one.
> func (d *frameDec) consumeCRC() error {
> if d.HasCheckSum {
> _, err := d.rawInput.readSmall(4)
> if err != nil {
> println("CRC missing?", err)
> return err
> }
> }
>
> return nil
> }
>
328a355,378
> d.history.decoders.maxSyncLen = 0
> if d.o.limitToCap {
> d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
> }
> if d.FrameContentSize != fcsUnknown {
> if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
> d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
> }
> if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
> if debugDecoder {
> println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
> }
> return dst, ErrDecoderSizeExceeded
> }
> if debugDecoder {
> println("maxSyncLen:", d.history.decoders.maxSyncLen)
> }
> if !d.o.limitToCap && uint64(cap(dst)-len(dst)) < d.history.decoders.maxSyncLen {
> // Alloc for output
> dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
> copy(dst2, dst)
> dst = dst2
> }
> }
342c392,398
< if uint64(len(d.history.b)) > d.o.maxDecodedSize {
---
> if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
> println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
> err = ErrDecoderSizeExceeded
> break
> }
> if d.o.limitToCap && len(d.history.b) > cap(dst) {
> println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
363,369c419,429
< var n int
< n, err = d.crc.Write(dst[crcStart:])
< if err == nil {
< if n != len(dst)-crcStart {
< err = io.ErrShortWrite
< } else {
< err = d.checkCRC()
---
> if d.o.ignoreChecksum {
> err = d.consumeCRC()
> } else {
> var n int
> n, err = d.crc.Write(dst[crcStart:])
> if err == nil {
> if n != len(dst)-crcStart {
> err = io.ErrShortWrite
> } else {
> err = d.checkCRC()
> }
Only in b/vendor/github.com/klauspost/compress/zstd: fse_decoder_amd64.go
Only in b/vendor/github.com/klauspost/compress/zstd: fse_decoder_amd64.s
Only in b/vendor/github.com/klauspost/compress/zstd: fse_decoder_generic.go
diff -r --color a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
7a8
> "encoding/binary"
9a11
> "io"
181d182
< // println(s.norm[:s.symbolLen], s.symbolLen)
184a186,208
> func (s *fseDecoder) mustReadFrom(r io.Reader) {
> fatalErr := func(err error) {
> if err != nil {
> panic(err)
> }
> }
> // dt [maxTablesize]decSymbol // Decompression table.
> // symbolLen uint16 // Length of active part of the symbol table.
> // actualTableLog uint8 // Selected tablelog.
> // maxBits uint8 // Maximum number of additional bits
> // // used for table creation to avoid allocations.
> // stateTable [256]uint16
> // norm [maxSymbolValue + 1]int16
> // preDefined bool
> fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
> fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
> fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
> fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
> fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
> fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
> fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
> }
>
207,210d230
< func (d decSymbol) baseline() uint32 {
< return uint32(d >> 32)
< }
<
215,218d234
< func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
< *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
< }
<
234,238d249
< func (d *decSymbol) setBaseline(baseline uint32) {
< const mask = 0xffffffff
< *d = (*d & mask) | decSymbol(baseline)<<32
< }
<
260,321d270
< // buildDtable will build the decoding table.
< func (s *fseDecoder) buildDtable() error {
< tableSize := uint32(1 << s.actualTableLog)
< highThreshold := tableSize - 1
< symbolNext := s.stateTable[:256]
<
< // Init, lay down lowprob symbols
< {
< for i, v := range s.norm[:s.symbolLen] {
< if v == -1 {
< s.dt[highThreshold].setAddBits(uint8(i))
< highThreshold--
< symbolNext[i] = 1
< } else {
< symbolNext[i] = uint16(v)
< }
< }
< }
< // Spread symbols
< {
< tableMask := tableSize - 1
< step := tableStep(tableSize)
< position := uint32(0)
< for ss, v := range s.norm[:s.symbolLen] {
< for i := 0; i < int(v); i++ {
< s.dt[position].setAddBits(uint8(ss))
< position = (position + step) & tableMask
< for position > highThreshold {
< // lowprob area
< position = (position + step) & tableMask
< }
< }
< }
< if position != 0 {
< // position must reach all cells once, otherwise normalizedCounter is incorrect
< return errors.New("corrupted input (position != 0)")
< }
< }
<
< // Build Decoding table
< {
< tableSize := uint16(1 << s.actualTableLog)
< for u, v := range s.dt[:tableSize] {
< symbol := v.addBits()
< nextState := symbolNext[symbol]
< symbolNext[symbol] = nextState + 1
< nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
< s.dt[u&maxTableMask].setNBits(nBits)
< newState := (nextState << nBits) - tableSize
< if newState > tableSize {
< return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
< }
< if newState == uint16(u) && nBits == 0 {
< // Seems weird that this is possible with nbits > 0.
< return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
< }
< s.dt[u&maxTableMask].setNewState(newState)
< }
< }
< return nil
< }
<
355,372d303
< // next returns the current symbol and sets the next state.
< // At least tablelog bits must be available in the bit reader.
< func (s *fseState) next(br *bitReader) {
< lowBits := uint16(br.getBits(s.state.nbBits()))
< s.state = s.dt[s.state.newState()+lowBits]
< }
<
< // finished returns true if all bits have been read from the bitstream
< // and the next state would require reading bits from the input.
< func (s *fseState) finished(br *bitReader) bool {
< return br.finished() && s.state.nbBits() > 0
< }
<
< // final returns the current state symbol without decoding the next.
< func (s *fseState) final() (int, uint8) {
< return s.state.baselineInt(), s.state.addBits()
< }
<
376,384d306
< }
<
< // nextFast returns the next symbol and sets the next state.
< // This can only be used if no symbols are 0 bits.
< // At least tablelog bits must be available in the bit reader.
< func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
< lowBits := br.get16BitsFast(s.state.nbBits())
< s.state = s.dt[s.state.newState()+lowBits]
< return s.state.baseline(), s.state.addBits()
diff -r --color a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
79,93d78
< // prepare will prepare and allocate scratch tables used for both compression and decompression.
< func (s *fseEncoder) prepare() (*fseEncoder, error) {
< if s == nil {
< s = &fseEncoder{}
< }
< s.useRLE = false
< if s.clearCount && s.maxCount == 0 {
< for i := range s.count {
< s.count[i] = 0
< }
< s.clearCount = false
< }
< return s, nil
< }
<
710,717d694
< }
<
< // encode the output symbol provided and write it to the bitstream.
< func (c *cState) encode(symbolTT symbolTransform) {
< nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
< dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
< c.bw.addBits16NC(c.state, uint8(nbBitsOut))
< c.state = c.stateTable[dstState]
Only in a/vendor/github.com/klauspost/compress/zstd: fuzz.go
Only in a/vendor/github.com/klauspost/compress/zstd: fuzz_none.go
diff -r --color a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
36,41d35
<
< // hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
< // Preferably h should be a constant and should always be <32.
< func hash3(u uint32, h uint8) uint32 {
< return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
< }
diff -r --color a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
14a15,16
> For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
>
389,390d390
< These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
<
392,395d391
<
< ```
< BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
< BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
397,398c393,397
< BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
< BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
---
> Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
>
> ```
> BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
> BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
400c399
< Concurrent performance:
---
> Concurrent blocks, performance:
402,426c401,412
< BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
<
< BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
< BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
---
> BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
> BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
429c415
< This reflects the performance around May 2020, but this may be out of date.
---
> This reflects the performance around May 2022, but this may be out of date.
Only in b/vendor/github.com/klauspost/compress/zstd: seqdec_amd64.go
Only in b/vendor/github.com/klauspost/compress/zstd: seqdec_amd64.s
Only in b/vendor/github.com/klauspost/compress/zstd: seqdec_generic.go
diff -r --color a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
75a76
> maxSyncLen uint64
101,244d101
< // decode sequences from the stream with the provided history.
< func (s *sequenceDecs) decode(seqs []seqVals) error {
< br := s.br
<
< // Grab full sizes tables, to avoid bounds checks.
< llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
< llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
< s.seqSize = 0
< litRemain := len(s.literals)
< maxBlockSize := maxCompressedBlockSize
< if s.windowSize < maxBlockSize {
< maxBlockSize = s.windowSize
< }
< for i := range seqs {
< var ll, mo, ml int
< if br.off > 4+((maxOffsetBits+16+16)>>3) {
< // inlined function:
< // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
<
< // Final will not read from stream.
< var llB, mlB, moB uint8
< ll, llB = llState.final()
< ml, mlB = mlState.final()
< mo, moB = ofState.final()
<
< // extra bits are stored in reverse order.
< br.fillFast()
< mo += br.getBits(moB)
< if s.maxBits > 32 {
< br.fillFast()
< }
< ml += br.getBits(mlB)
< ll += br.getBits(llB)
<
< if moB > 1 {
< s.prevOffset[2] = s.prevOffset[1]
< s.prevOffset[1] = s.prevOffset[0]
< s.prevOffset[0] = mo
< } else {
< // mo = s.adjustOffset(mo, ll, moB)
< // Inlined for rather big speedup
< if ll == 0 {
< // There is an exception though, when current sequence's literals_length = 0.
< // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
< // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
< mo++
< }
<
< if mo == 0 {
< mo = s.prevOffset[0]
< } else {
< var temp int
< if mo == 3 {
< temp = s.prevOffset[0] - 1
< } else {
< temp = s.prevOffset[mo]
< }
<
< if temp == 0 {
< // 0 is not valid; input is corrupted; force offset to 1
< println("WARNING: temp was 0")
< temp = 1
< }
<
< if mo != 1 {
< s.prevOffset[2] = s.prevOffset[1]
< }
< s.prevOffset[1] = s.prevOffset[0]
< s.prevOffset[0] = temp
< mo = temp
< }
< }
< br.fillFast()
< } else {
< if br.overread() {
< if debugDecoder {
< printf("reading sequence %d, exceeded available data\n", i)
< }
< return io.ErrUnexpectedEOF
< }
< ll, mo, ml = s.next(br, llState, mlState, ofState)
< br.fill()
< }
<
< if debugSequences {
< println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
< }
< // Evaluate.
< // We might be doing this async, so do it early.
< if mo == 0 && ml > 0 {
< return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
< }
< if ml > maxMatchLen {
< return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
< }
< s.seqSize += ll + ml
< if s.seqSize > maxBlockSize {
< return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
< }
< litRemain -= ll
< if litRemain < 0 {
< return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
< }
< seqs[i] = seqVals{
< ll: ll,
< ml: ml,
< mo: mo,
< }
< if i == len(seqs)-1 {
< // This is the last sequence, so we shouldn't update state.
< break
< }
<
< // Manually inlined, ~ 5-20% faster
< // Update all 3 states at once. Approx 20% faster.
< nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
< if nBits == 0 {
< llState = llTable[llState.newState()&maxTableMask]
< mlState = mlTable[mlState.newState()&maxTableMask]
< ofState = ofTable[ofState.newState()&maxTableMask]
< } else {
< bits := br.get32BitsFast(nBits)
< lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
< llState = llTable[(llState.newState()+lowBits)&maxTableMask]
<
< lowBits = uint16(bits >> (ofState.nbBits() & 31))
< lowBits &= bitMask[mlState.nbBits()&15]
< mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
<
< lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
< ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
< }
< }
< s.seqSize += litRemain
< if s.seqSize > maxBlockSize {
< return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
< }
< err := br.close()
< if err != nil {
< printf("Closing sequences: %v, %+v\n", err, *br)
< }
< return err
< }
<
247a105,108
> if len(s.dict) == 0 {
> return s.executeSimple(seqs, hist)
> }
>
329a191
>
344c206,211
< func (s *sequenceDecs) decodeSync(history *history) error {
---
> func (s *sequenceDecs) decodeSync(hist []byte) error {
> supported, err := s.decodeSyncSimple(hist)
> if supported {
> return err
> }
>
351d217
< hist := history.b[history.ignoreBuffer:]
436c302
< return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
---
> return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
466c332
< return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
---
> return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
472c338
< return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
---
> return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
532a399
>
546,547c413,414
< if len(s.literals)+len(s.out)-startSize > maxBlockSize {
< return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
---
> if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
> return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
555,564d421
< // update states, at least 27 bits must be available.
< func (s *sequenceDecs) update(br *bitReader) {
< // Max 8 bits
< s.litLengths.state.next(br)
< // Max 9 bits
< s.matchLengths.state.next(br)
< // Max 8 bits
< s.offsets.state.next(br)
< }
<
571,651d427
< }
<
< // update states, at least 27 bits must be available.
< func (s *sequenceDecs) updateAlt(br *bitReader) {
< // Update all 3 states at once. Approx 20% faster.
< a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
<
< nBits := a.nbBits() + b.nbBits() + c.nbBits()
< if nBits == 0 {
< s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
< s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
< s.offsets.state.state = s.offsets.state.dt[c.newState()]
< return
< }
< bits := br.get32BitsFast(nBits)
< lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
< s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
<
< lowBits = uint16(bits >> (c.nbBits() & 31))
< lowBits &= bitMask[b.nbBits()&15]
< s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
<
< lowBits = uint16(bits) & bitMask[c.nbBits()&15]
< s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
< }
<
< // nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
< func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
< // Final will not read from stream.
< ll, llB := llState.final()
< ml, mlB := mlState.final()
< mo, moB := ofState.final()
<
< // extra bits are stored in reverse order.
< br.fillFast()
< mo += br.getBits(moB)
< if s.maxBits > 32 {
< br.fillFast()
< }
< ml += br.getBits(mlB)
< ll += br.getBits(llB)
<
< if moB > 1 {
< s.prevOffset[2] = s.prevOffset[1]
< s.prevOffset[1] = s.prevOffset[0]
< s.prevOffset[0] = mo
< return
< }
< // mo = s.adjustOffset(mo, ll, moB)
< // Inlined for rather big speedup
< if ll == 0 {
< // There is an exception though, when current sequence's literals_length = 0.
< // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
< // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
< mo++
< }
<
< if mo == 0 {
< mo = s.prevOffset[0]
< return
< }
< var temp int
< if mo == 3 {
< temp = s.prevOffset[0] - 1
< } else {
< temp = s.prevOffset[mo]
< }
<
< if temp == 0 {
< // 0 is not valid; input is corrupted; force offset to 1
< println("temp was 0")
< temp = 1
< }
<
< if mo != 1 {
< s.prevOffset[2] = s.prevOffset[1]
< }
< s.prevOffset[1] = s.prevOffset[0]
< s.prevOffset[0] = temp
< mo = temp
< return
diff -r --color a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
21c21,28
< var zipReaderPool sync.Pool
---
> // zipReaderPool is the default reader pool.
> var zipReaderPool = sync.Pool{New: func() interface{} {
> z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
> if err != nil {
> panic(err)
> }
> return z
> }}
24,31c31,49
< func newZipReader(r io.Reader) io.ReadCloser {
< dec, ok := zipReaderPool.Get().(*Decoder)
< if ok {
< dec.Reset(r)
< } else {
< d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
< if err != nil {
< panic(err)
---
> func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
> pool := &zipReaderPool
> if len(opts) > 0 {
> opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
> // Force concurrency 1
> opts = append(opts, WithDecoderConcurrency(1))
> // Create our own pool
> pool = &sync.Pool{}
> }
> return func(r io.Reader) io.ReadCloser {
> dec, ok := pool.Get().(*Decoder)
> if ok {
> dec.Reset(r)
> } else {
> d, err := NewReader(r, opts...)
> if err != nil {
> panic(err)
> }
> dec = d
33c51
< dec = d
---
> return &pooledZipReader{dec: dec, pool: pool}
35d52
< return &pooledZipReader{dec: dec}
39,40c56,58
< mu sync.Mutex // guards Close and Read
< dec *Decoder
---
> mu sync.Mutex // guards Close and Read
> pool *sync.Pool
> dec *Decoder
51,52c69,70
< err = r.dec.Reset(nil)
< zipReaderPool.Put(r.dec)
---
> r.dec.Reset(nil)
> r.pool.Put(r.dec)
64c82
< zipReaderPool.Put(r.dec)
---
> r.pool.Put(r.dec)
118,119c136,140
< func ZipDecompressor() func(r io.Reader) io.ReadCloser {
< return newZipReader
---
> // Options can be specified. WithDecoderConcurrency(1) is forced,
> // and by default a 128MB maximum decompression window is specified.
> // The window size can be overridden if required.
> func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
> return newZipReader(opts...)
diff -r --color a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
113,123d112
< // matchLenFast does matching, but will not match the last up to 7 bytes.
< func matchLenFast(a, b []byte) int {
< endI := len(a) & (math.MaxInt32 - 7)
< for i := 0; i < endI; i += 8 {
< if diff := load64(a, i) ^ load64(b, i); diff != 0 {
< return i + bits.TrailingZeros64(diff)>>3
< }
< }
< return endI
< }
<
diff -r --color a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
162d161
< SCE // SYSENTER and SYSEXIT instructions
192a192,193
> SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
> SYSEE // SYSENTER and SYSEXIT instructions
193a195
> TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
255a258
> Stepping int // CPU stepping info
361a365,374
> // AnyOf returns whether the CPU supports one or more of the requested features.
> func (c CPUInfo) AnyOf(ids ...FeatureID) bool {
> for _, id := range ids {
> if c.featureSet.inSet(id) {
> return true
> }
> }
> return false
> }
>
363,366c376,379
< var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2)
< var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
< var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
< var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
---
> var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)
> var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
> var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
> var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
680c693
< fam, _ := familyModel()
---
> fam, _, _ := familyModel()
718c731
< func familyModel() (int, int) {
---
> func familyModel() (family, model, stepping int) {
720c733
< return 0, 0
---
> return 0, 0, 0
723,725c736,751
< family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
< model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
< return int(family), int(model)
---
> // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0].
> family = int((eax >> 8) & 0xf)
> extFam := family == 0x6 // Intel is 0x6, needs extended model.
> if family == 0xf {
> // Add ExtFamily
> family += int((eax >> 20) & 0xff)
> extFam = true
> }
> // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0].
> model = int((eax >> 4) & 0xf)
> if extFam {
> // Add ExtModel
> model += int((eax >> 12) & 0xf0)
> }
> stepping = int(eax & 0xf)
> return family, model, stepping
860c886
< if maxExtendedFunction() < 0x8000001D {
---
> if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) {
977c1003
< family, model := familyModel()
---
> family, model, _ := familyModel()
982c1008
< fs.setIf((d&(1<<11)) != 0, SCE)
---
> fs.setIf((d&(1<<11)) != 0, SYSEE)
984d1009
< fs.setIf((d&(1<<22)) != 0, MMXEXT)
992,994c1017,1019
< fs.setIf((c&0x00000200) != 0, SSSE3)
< fs.setIf((c&0x00080000) != 0, SSE4)
< fs.setIf((c&0x00100000) != 0, SSE42)
---
> fs.setIf((c&(1<<9)) != 0, SSSE3)
> fs.setIf((c&(1<<19)) != 0, SSE4)
> fs.setIf((c&(1<<20)) != 0, SSE42)
1158a1184
> fs.setIf((c&(1<<22)) != 0, TOPEXT)
1161,1164c1187
< fs.setIf((d&(1<<31)) != 0, AMD3DNOW)
< fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT)
< fs.setIf((d&(1<<23)) != 0, MMX)
< fs.setIf((d&(1<<22)) != 0, MMXEXT)
---
> fs.setIf(d&(1<<11) != 0, SYSCALL)
1165a1189,1192
> fs.setIf(d&(1<<22) != 0, MMXEXT)
> fs.setIf(d&(1<<23) != 0, MMX)
> fs.setIf(d&(1<<24) != 0, FXSR)
> fs.setIf(d&(1<<25) != 0, FXSROPT)
1166a1194,1195
> fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT)
> fs.setIf(d&(1<<31) != 0, AMD3DNOW)
1171,1172c1200,1201
< fs.setIf((c&0x00000800) != 0, XOP)
< fs.setIf((c&0x00010000) != 0, FMA4)
---
> fs.setIf((c&(1<<11)) != 0, XOP)
> fs.setIf((c&(1<<16)) != 0, FMA4)
diff -r --color a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
27c27
< c.Family, c.Model = familyModel()
---
> c.Family, c.Model, c.Stepping = familyModel()
diff -r --color a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
102,177c102,179
< _ = x[SCE-92]
< _ = x[SERIALIZE-93]
< _ = x[SEV-94]
< _ = x[SEV_64BIT-95]
< _ = x[SEV_ALTERNATIVE-96]
< _ = x[SEV_DEBUGSWAP-97]
< _ = x[SEV_ES-98]
< _ = x[SEV_RESTRICTED-99]
< _ = x[SEV_SNP-100]
< _ = x[SGX-101]
< _ = x[SGXLC-102]
< _ = x[SHA-103]
< _ = x[SME-104]
< _ = x[SME_COHERENT-105]
< _ = x[SSE-106]
< _ = x[SSE2-107]
< _ = x[SSE3-108]
< _ = x[SSE4-109]
< _ = x[SSE42-110]
< _ = x[SSE4A-111]
< _ = x[SSSE3-112]
< _ = x[STIBP-113]
< _ = x[STOSB_SHORT-114]
< _ = x[SUCCOR-115]
< _ = x[SVM-116]
< _ = x[SVMDA-117]
< _ = x[SVMFBASID-118]
< _ = x[SVML-119]
< _ = x[SVMNP-120]
< _ = x[SVMPF-121]
< _ = x[SVMPFT-122]
< _ = x[TBM-123]
< _ = x[TME-124]
< _ = x[TSCRATEMSR-125]
< _ = x[TSXLDTRK-126]
< _ = x[VAES-127]
< _ = x[VMCBCLEAN-128]
< _ = x[VMPL-129]
< _ = x[VMSA_REGPROT-130]
< _ = x[VMX-131]
< _ = x[VPCLMULQDQ-132]
< _ = x[VTE-133]
< _ = x[WAITPKG-134]
< _ = x[WBNOINVD-135]
< _ = x[X87-136]
< _ = x[XGETBV1-137]
< _ = x[XOP-138]
< _ = x[XSAVE-139]
< _ = x[XSAVEC-140]
< _ = x[XSAVEOPT-141]
< _ = x[XSAVES-142]
< _ = x[AESARM-143]
< _ = x[ARMCPUID-144]
< _ = x[ASIMD-145]
< _ = x[ASIMDDP-146]
< _ = x[ASIMDHP-147]
< _ = x[ASIMDRDM-148]
< _ = x[ATOMICS-149]
< _ = x[CRC32-150]
< _ = x[DCPOP-151]
< _ = x[EVTSTRM-152]
< _ = x[FCMA-153]
< _ = x[FP-154]
< _ = x[FPHP-155]
< _ = x[GPA-156]
< _ = x[JSCVT-157]
< _ = x[LRCPC-158]
< _ = x[PMULL-159]
< _ = x[SHA1-160]
< _ = x[SHA2-161]
< _ = x[SHA3-162]
< _ = x[SHA512-163]
< _ = x[SM3-164]
< _ = x[SM4-165]
< _ = x[SVE-166]
< _ = x[lastID-167]
---
> _ = x[SERIALIZE-92]
> _ = x[SEV-93]
> _ = x[SEV_64BIT-94]
> _ = x[SEV_ALTERNATIVE-95]
> _ = x[SEV_DEBUGSWAP-96]
> _ = x[SEV_ES-97]
> _ = x[SEV_RESTRICTED-98]
> _ = x[SEV_SNP-99]
> _ = x[SGX-100]
> _ = x[SGXLC-101]
> _ = x[SHA-102]
> _ = x[SME-103]
> _ = x[SME_COHERENT-104]
> _ = x[SSE-105]
> _ = x[SSE2-106]
> _ = x[SSE3-107]
> _ = x[SSE4-108]
> _ = x[SSE42-109]
> _ = x[SSE4A-110]
> _ = x[SSSE3-111]
> _ = x[STIBP-112]
> _ = x[STOSB_SHORT-113]
> _ = x[SUCCOR-114]
> _ = x[SVM-115]
> _ = x[SVMDA-116]
> _ = x[SVMFBASID-117]
> _ = x[SVML-118]
> _ = x[SVMNP-119]
> _ = x[SVMPF-120]
> _ = x[SVMPFT-121]
> _ = x[SYSCALL-122]
> _ = x[SYSEE-123]
> _ = x[TBM-124]
> _ = x[TOPEXT-125]
> _ = x[TME-126]
> _ = x[TSCRATEMSR-127]
> _ = x[TSXLDTRK-128]
> _ = x[VAES-129]
> _ = x[VMCBCLEAN-130]
> _ = x[VMPL-131]
> _ = x[VMSA_REGPROT-132]
> _ = x[VMX-133]
> _ = x[VPCLMULQDQ-134]
> _ = x[VTE-135]
> _ = x[WAITPKG-136]
> _ = x[WBNOINVD-137]
> _ = x[X87-138]
> _ = x[XGETBV1-139]
> _ = x[XOP-140]
> _ = x[XSAVE-141]
> _ = x[XSAVEC-142]
> _ = x[XSAVEOPT-143]
> _ = x[XSAVES-144]
> _ = x[AESARM-145]
> _ = x[ARMCPUID-146]
> _ = x[ASIMD-147]
> _ = x[ASIMDDP-148]
> _ = x[ASIMDHP-149]
> _ = x[ASIMDRDM-150]
> _ = x[ATOMICS-151]
> _ = x[CRC32-152]
> _ = x[DCPOP-153]
> _ = x[EVTSTRM-154]
> _ = x[FCMA-155]
> _ = x[FP-156]
> _ = x[FPHP-157]
> _ = x[GPA-158]
> _ = x[JSCVT-159]
> _ = x[LRCPC-160]
> _ = x[PMULL-161]
> _ = x[SHA1-162]
> _ = x[SHA2-163]
> _ = x[SHA3-164]
> _ = x[SHA512-165]
> _ = x[SM3-166]
> _ = x[SM4-167]
> _ = x[SVE-168]
> _ = x[lastID-169]
181c183
< const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
---
> const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTOPEXTTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
183c185
< var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107}
---
> var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 674, 677, 686, 701, 714, 720, 734, 741, 744, 749, 752, 755, 767, 770, 774, 778, 782, 787, 792, 797, 802, 813, 819, 822, 827, 836, 840, 845, 850, 856, 863, 868, 871, 877, 880, 890, 898, 902, 911, 915, 927, 930, 940, 943, 950, 958, 961, 968, 971, 976, 982, 990, 996, 1002, 1010, 1015, 1022, 1029, 1037, 1044, 1049, 1054, 1061, 1065, 1067, 1071, 1074, 1079, 1084, 1089, 1093, 1097, 1101, 1107, 1110, 1113, 1116, 1122}
diff -r --color a/vendor/github.com/libp2p/go-libp2p/config/config.go b/vendor/github.com/libp2p/go-libp2p/config/config.go
66a67,71
> // ProtocolVersion is the protocol version that identifies the family
> // of protocols used by the peer in the Identify protocol. It is set
> // using the [ProtocolVersion] option.
> ProtocolVersion string
>
152a158,160
> if cfg.MultiaddrResolver != nil {
> opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver))
> }
191c199
< tpts, err := makeTransports(h, upgrader, cfg.ConnectionGater, cfg.PSK, cfg.ResourceManager, cfg.Transports)
---
> tpts, err := makeTransports(h, upgrader, cfg.ConnectionGater, cfg.PSK, cfg.ResourceManager, cfg.MultiaddrResolver, cfg.Transports)
226c234
< MultiaddrResolver: cfg.MultiaddrResolver,
---
> ProtocolVersion: cfg.ProtocolVersion,
diff -r --color a/vendor/github.com/libp2p/go-libp2p/config/constructor_types.go b/vendor/github.com/libp2p/go-libp2p/config/constructor_types.go
15a16,17
>
> madns "github.com/multiformats/go-multiaddr-dns"
33,34c35,37
< peerIDType = reflect.TypeOf((peer.ID)(""))
< pskType = reflect.TypeOf((pnet.PSK)(nil))
---
> peerIDType = reflect.TypeOf((peer.ID)(""))
> pskType = reflect.TypeOf((pnet.PSK)(nil))
> resolverType = reflect.TypeOf((*madns.Resolver)(nil))
38c41
< upgraderType: func(_ host.Host, u transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> upgraderType: func(_ host.Host, u transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
41c44
< hostType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> hostType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
44c47
< networkType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> networkType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
47c50
< pskType: func(_ host.Host, _ transport.Upgrader, psk pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> pskType: func(_ host.Host, _ transport.Upgrader, psk pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
50c53
< connGaterType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, cg connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> connGaterType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, cg connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
53c56
< peerIDType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> peerIDType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
56c59
< privKeyType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> privKeyType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
59c62
< pubKeyType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> pubKeyType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
62c65
< pstoreType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
---
> pstoreType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) interface{} {
65c68
< rcmgrType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, rcmgr network.ResourceManager) interface{} {
---
> rcmgrType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, rcmgr network.ResourceManager, _ *madns.Resolver) interface{} {
66a70,72
> },
> resolverType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, r *madns.Resolver) interface{} {
> return r
diff -r --color a/vendor/github.com/libp2p/go-libp2p/config/muxer.go b/vendor/github.com/libp2p/go-libp2p/config/muxer.go
38c38
< t, err := ctor(h, nil, nil, nil, nil)
---
> t, err := ctor(h, nil, nil, nil, nil, nil)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/config/reflection_magic.go b/vendor/github.com/libp2p/go-libp2p/config/reflection_magic.go
13a14,15
>
> madns "github.com/multiformats/go-multiaddr-dns"
83c85
< type constructor func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager) interface{}
---
> type constructor func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager, *madns.Resolver) interface{}
134c136
< ) (func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager) (interface{}, error), error) {
---
> ) (func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager, *madns.Resolver) (interface{}, error), error) {
158c160
< return func(h host.Host, u transport.Upgrader, psk pnet.PSK, cg connmgr.ConnectionGater, rcmgr network.ResourceManager) (interface{}, error) {
---
> return func(h host.Host, u transport.Upgrader, psk pnet.PSK, cg connmgr.ConnectionGater, rcmgr network.ResourceManager, resolver *madns.Resolver) (interface{}, error) {
161c163
< if arg := makeArg(h, u, psk, cg, rcmgr); arg != nil {
---
> if arg := makeArg(h, u, psk, cg, rcmgr, resolver); arg != nil {
diff -r --color a/vendor/github.com/libp2p/go-libp2p/config/security.go b/vendor/github.com/libp2p/go-libp2p/config/security.go
44c44
< t, err := ctor(h, nil, nil, nil, nil)
---
> t, err := ctor(h, nil, nil, nil, nil, nil)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/config/transport.go b/vendor/github.com/libp2p/go-libp2p/config/transport.go
8a9,10
>
> madns "github.com/multiformats/go-multiaddr-dns"
14c16
< type TptC func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager) (transport.Transport, error)
---
> type TptC func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager, *madns.Resolver) (transport.Transport, error)
42c44
< return func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) (transport.Transport, error) {
---
> return func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager, _ *madns.Resolver) (transport.Transport, error) {
50,51c52,53
< return func(h host.Host, u transport.Upgrader, psk pnet.PSK, cg connmgr.ConnectionGater, rcmgr network.ResourceManager) (transport.Transport, error) {
< t, err := ctor(h, u, psk, cg, rcmgr)
---
> return func(h host.Host, u transport.Upgrader, psk pnet.PSK, cg connmgr.ConnectionGater, rcmgr network.ResourceManager, resolver *madns.Resolver) (transport.Transport, error) {
> t, err := ctor(h, u, psk, cg, rcmgr, resolver)
59c61
< func makeTransports(h host.Host, u transport.Upgrader, cg connmgr.ConnectionGater, psk pnet.PSK, rcmgr network.ResourceManager, tpts []TptC) ([]transport.Transport, error) {
---
> func makeTransports(h host.Host, u transport.Upgrader, cg connmgr.ConnectionGater, psk pnet.PSK, rcmgr network.ResourceManager, resolver *madns.Resolver, tpts []TptC) ([]transport.Transport, error) {
62c64
< t, err := tC(h, u, psk, cg, rcmgr)
---
> t, err := tC(h, u, psk, cg, rcmgr, resolver)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/core/transport/transport.go b/vendor/github.com/libp2p/go-libp2p/core/transport/transport.go
79a80,85
> // Resolver can be optionally implemented by transports that want to resolve or transform the
> // multiaddr.
> type Resolver interface {
> Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error)
> }
>
diff -r --color a/vendor/github.com/libp2p/go-libp2p/defaults.go b/vendor/github.com/libp2p/go-libp2p/defaults.go
19a20
> madns "github.com/multiformats/go-multiaddr-dns"
67c68,72
< defaultIP4ListenAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
---
> defaultIP4TCPListenAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
> if err != nil {
> return err
> }
> defaultIP4QUICListenAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/udp/0/quic")
72c77,81
< defaultIP6ListenAddr, err := multiaddr.NewMultiaddr("/ip6/::/tcp/0")
---
> defaultIP6TCPListenAddr, err := multiaddr.NewMultiaddr("/ip6/::/tcp/0")
> if err != nil {
> return err
> }
> defaultIP6QUICListenAddr, err := multiaddr.NewMultiaddr("/ip6/::/udp/0/quic")
77,78c86,89
< defaultIP4ListenAddr,
< defaultIP6ListenAddr,
---
> defaultIP4TCPListenAddr,
> defaultIP4QUICListenAddr,
> defaultIP6TCPListenAddr,
> defaultIP6QUICListenAddr,
108a120,124
> // DefaultMultiaddrResolver creates a default connection manager
> var DefaultMultiaddrResolver = func(cfg *Config) error {
> return cfg.Apply(MultiaddrResolver(madns.DefaultResolver))
> }
>
151a168,171
> },
> {
> fallback: func(cfg *Config) bool { return cfg.MultiaddrResolver == nil },
> opt: DefaultMultiaddrResolver,
diff -r --color a/vendor/github.com/libp2p/go-libp2p/options.go b/vendor/github.com/libp2p/go-libp2p/options.go
416a417,425
> // ProtocolVersion sets the protocolVersion string required by the
> // libp2p Identify protocol.
> func ProtocolVersion(s string) Option {
> return func(cfg *Config) error {
> cfg.ProtocolVersion = s
> return nil
> }
> }
>
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go
3a4
> "context"
15c16
< peerSource func(num int) <-chan peer.AddrInfo
---
> peerSource func(ctx context.Context, num int) <-chan peer.AddrInfo
105c106,108
< func WithPeerSource(f func(numPeers int) <-chan peer.AddrInfo, minInterval time.Duration) Option {
---
> // The context.Context passed MAY be canceled when AutoRelay feels satisfied, it will be canceled when the node is shutting down.
> // If the channel is canceled you MUST close the output channel at some point.
> func WithPeerSource(f func(ctx context.Context, numPeers int) <-chan peer.AddrInfo, minInterval time.Duration) Option {
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go
62c62
< peerSource func(int) <-chan peer.AddrInfo
---
> peerSource func(context.Context, int) <-chan peer.AddrInfo
85c85
< func newRelayFinder(host *basic.BasicHost, peerSource func(int) <-chan peer.AddrInfo, conf *config) *relayFinder {
---
> func newRelayFinder(host *basic.BasicHost, peerSource func(context.Context, int) <-chan peer.AddrInfo, conf *config) *relayFinder {
209c209
< peerChan := rf.peerSource(rf.conf.maxCandidates)
---
> peerChan := rf.peerSource(ctx, rf.conf.maxCandidates)
238c238
< peerChan = rf.peerSource(rf.conf.maxCandidates)
---
> peerChan = rf.peerSource(ctx, rf.conf.maxCandidates)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go
41,44d40
< // The maximum number of address resolution steps we'll perform for a single
< // peer (for all addresses).
< const maxAddressResolution = 32
<
147a144,146
> // ProtocolVersion sets the protocol version for the host.
> ProtocolVersion string
>
229c228,233
< h.ids, err = identify.NewIDService(h, identify.UserAgent(opts.UserAgent), identify.DisableSignedPeerRecord())
---
> h.ids, err = identify.NewIDService(
> h,
> identify.UserAgent(opts.UserAgent),
> identify.ProtocolVersion(opts.ProtocolVersion),
> identify.DisableSignedPeerRecord(),
> )
231c235,239
< h.ids, err = identify.NewIDService(h, identify.UserAgent(opts.UserAgent))
---
> h.ids, err = identify.NewIDService(
> h,
> identify.UserAgent(opts.UserAgent),
> identify.ProtocolVersion(opts.ProtocolVersion),
> )
704,709d711
< resolved, err := h.resolveAddrs(ctx, h.Peerstore().PeerInfo(pi.ID))
< if err != nil {
< return err
< }
< h.Peerstore().AddAddrs(pi.ID, resolved, peerstore.TempAddrTTL)
<
711,772d712
< }
<
< func (h *BasicHost) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multiaddr, error) {
< proto := ma.ProtocolWithCode(ma.P_P2P).Name
< p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
< if err != nil {
< return nil, err
< }
<
< resolveSteps := 0
<
< // Recursively resolve all addrs.
< //
< // While the toResolve list is non-empty:
< // * Pop an address off.
< // * If the address is fully resolved, add it to the resolved list.
< // * Otherwise, resolve it and add the results to the "to resolve" list.
< toResolve := append(([]ma.Multiaddr)(nil), pi.Addrs...)
< resolved := make([]ma.Multiaddr, 0, len(pi.Addrs))
< for len(toResolve) > 0 {
< // pop the last addr off.
< addr := toResolve[len(toResolve)-1]
< toResolve = toResolve[:len(toResolve)-1]
<
< // if it's resolved, add it to the resolved list.
< if !madns.Matches(addr) {
< resolved = append(resolved, addr)
< continue
< }
<
< resolveSteps++
<
< // We've resolved too many addresses. We can keep all the fully
< // resolved addresses but we'll need to skip the rest.
< if resolveSteps >= maxAddressResolution {
< log.Warnf(
< "peer %s asked us to resolve too many addresses: %s/%s",
< pi.ID,
< resolveSteps,
< maxAddressResolution,
< )
< continue
< }
<
< // otherwise, resolve it
< reqaddr := addr.Encapsulate(p2paddr)
< resaddrs, err := h.maResolver.Resolve(ctx, reqaddr)
< if err != nil {
< log.Infof("error resolving %s: %s", reqaddr, err)
< }
<
< // add the results to the toResolve list.
< for _, res := range resaddrs {
< pi, err := peer.AddrInfoFromP2pAddr(res)
< if err != nil {
< log.Infof("error parsing %s: %s", res, err)
< }
< toResolve = append(toResolve, pi.Addrs...)
< }
< }
<
< return resolved, nil
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go
517c517
< Memory: 1 << 20,
---
> Memory: 32 << 20,
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md
452c452
< manager](https://pkg.go.dev/github.com/libp2p/go-libp2p-core/connmgr#ConnManager),
---
> manager](https://pkg.go.dev/github.com/libp2p/go-libp2p/core/connmgr#ConnManager),
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go
4a5,6
> "math"
> "math/big"
81a84,103
> func addInt64WithOverflow(a int64, b int64) (c int64, ok bool) {
> c = a + b
> return c, (c > a) == (b > 0)
> }
>
> // mulInt64WithOverflow checks for overflow in multiplying two int64s. See
> // https://groups.google.com/g/golang-nuts/c/h5oSN5t3Au4/m/KaNQREhZh0QJ
> func mulInt64WithOverflow(a, b int64) (c int64, ok bool) {
> const mostPositive = 1<<63 - 1
> const mostNegative = -(mostPositive + 1)
> c = a * b
> if a == 0 || b == 0 || a == 1 || b == 1 {
> return c, true
> }
> if a == mostNegative || b == mostNegative {
> return c, false
> }
> return c, c/b == a
> }
>
84,85c106,109
< // overflow check; this also has the side effect that we cannot reserve negative memory.
< newmem := rc.memory + rsvp
---
> if rsvp < 0 {
> return fmt.Errorf("can't reserve negative memory. rsvp=%v", rsvp)
> }
>
87c111,130
< threshold := (1 + int64(prio)) * limit / 256
---
> if limit == math.MaxInt64 {
> // Special case where we've set max limits.
> return nil
> }
>
> newmem, addOk := addInt64WithOverflow(rc.memory, rsvp)
>
> threshold, mulOk := mulInt64WithOverflow(1+int64(prio), limit)
> if !mulOk {
> thresholdBig := big.NewInt(limit)
> thresholdBig = thresholdBig.Mul(thresholdBig, big.NewInt(1+int64(prio)))
> thresholdBig.Rsh(thresholdBig, 8) // Divide 256
> if !thresholdBig.IsInt64() {
> // Shouldn't happen since the threshold can only be <= limit
> threshold = limit
> }
> threshold = thresholdBig.Int64()
> } else {
> threshold = threshold / 256
> }
89c132
< if newmem > threshold {
---
> if !addOk || newmem > threshold {
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/conn.go
8c8
< "github.com/libp2p/go-yamux/v3"
---
> "github.com/libp2p/go-yamux/v4"
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/stream.go
8c8
< "github.com/libp2p/go-yamux/v3"
---
> "github.com/libp2p/go-yamux/v4"
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go
10c10
< "github.com/libp2p/go-yamux/v3"
---
> "github.com/libp2p/go-yamux/v4"
40a41,45
> var newSpan func() (yamux.MemoryManager, error)
> if scope != nil {
> newSpan = func() (yamux.MemoryManager, error) { return scope.BeginSpan() }
> }
>
44c49
< s, err = yamux.Server(nc, t.Config(), scope)
---
> s, err = yamux.Server(nc, t.Config(), newSpan)
46c51
< s, err = yamux.Client(nc, t.Config(), scope)
---
> s, err = yamux.Client(nc, t.Config(), newSpan)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go
9a10
> "github.com/benbjohnson/clock"
29a31,32
> clock clock.Clock
>
77c80
< func (s *segment) tagInfoFor(p peer.ID) *peerInfo {
---
> func (s *segment) tagInfoFor(p peer.ID, now time.Time) *peerInfo {
85c88
< firstSeen: time.Now(), // this timestamp will be updated when the first Connected notification arrives.
---
> firstSeen: now, // this timestamp will be updated when the first Connected notification arrives.
104a108
> clock: clock.New(),
118a123
> clock: cfg.clock,
170c175
< cm.lastTrim = time.Now()
---
> cm.lastTrim = cm.clock.Now()
246,258c251,255
< func (p peerInfos) SortByValue() {
< sort.Slice(p, func(i, j int) bool {
< left, right := p[i], p[j]
< // temporary peers are preferred for pruning.
< if left.temp != right.temp {
< return left.temp
< }
< // otherwise, compare by value.
< return left.value < right.value
< })
< }
<
< func (p peerInfos) SortByValueAndStreams() {
---
> // SortByValueAndStreams sorts peerInfos by their value and stream count. It
> // will sort peers with no streams before those with streams (all else being
> // equal). If `sortByMoreStreams` is true it will sort peers with more streams
> // before those with fewer streams. This is useful to prioritize freeing memory.
> func (p peerInfos) SortByValueAndStreams(sortByMoreStreams bool) {
280a278,281
> // prefer closing inactive connections (no streams open)
> if rightStreams != leftStreams && (leftStreams == 0 || rightStreams == 0) {
> return leftStreams < rightStreams
> }
285,286c286,292
< // prune connections with a higher number of streams first
< return rightStreams < leftStreams
---
>
> if sortByMoreStreams {
> // prune connections with a higher number of streams first
> return rightStreams < leftStreams
> } else {
> return leftStreams < rightStreams
> }
313c319
< ticker := time.NewTicker(interval)
---
> ticker := cm.clock.Ticker(interval)
338c344
< cm.lastTrim = time.Now()
---
> cm.lastTrim = cm.clock.Now()
371c377
< candidates.SortByValueAndStreams()
---
> candidates.SortByValueAndStreams(true)
401c407
< candidates.SortByValueAndStreams()
---
> candidates.SortByValueAndStreams(true)
429c435
< gracePeriodStart := time.Now().Add(-cm.cfg.gracePeriod)
---
> gracePeriodStart := cm.clock.Now().Add(-cm.cfg.gracePeriod)
462c468
< candidates.SortByValue()
---
> candidates.SortByValueAndStreams(false)
531c537
< pi := s.tagInfoFor(p)
---
> pi := s.tagInfoFor(p, cm.clock.Now())
561c567
< pi := s.tagInfoFor(p)
---
> pi := s.tagInfoFor(p, cm.clock.Now())
631c637
< firstSeen: time.Now(),
---
> firstSeen: cm.clock.Now(),
642c648
< pinfo.firstSeen = time.Now()
---
> pinfo.firstSeen = cm.clock.Now()
651c657
< pinfo.conns[c] = time.Now()
---
> pinfo.conns[c] = cm.clock.Now()
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go
224c224
< p := s.tagInfoFor(peer)
---
> p := s.tagInfoFor(peer, d.clock.Now())
247c247
< p := s.tagInfoFor(rm.peer)
---
> p := s.tagInfoFor(rm.peer, d.clock.Now())
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/options.go
5a6,7
>
> "github.com/benbjohnson/clock"
15a18
> clock clock.Clock
24a28,35
> return nil
> }
> }
>
> // WithClock sets the internal clock impl
> func WithClock(c clock.Clock) Option {
> return func(cfg *config) error {
> cfg.clock = c
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go
12a13
> "github.com/libp2p/go-libp2p/core/peerstore"
15a17
> madns "github.com/multiformats/go-multiaddr-dns"
18a21,24
> // The maximum number of address resolution steps we'll perform for a single
> // peer (for all addresses).
> const maxAddressResolution = 32
>
295c301,326
< goodAddrs := s.filterKnownUndialables(p, peerAddrs)
---
> peerAddrsAfterTransportResolved := make([]ma.Multiaddr, 0, len(peerAddrs))
> for _, a := range peerAddrs {
> tpt := s.TransportForDialing(a)
> resolver, ok := tpt.(transport.Resolver)
> if ok {
> resolvedAddrs, err := resolver.Resolve(ctx, a)
> if err != nil {
> log.Warnf("Failed to resolve multiaddr %s by transport %v: %v", a, tpt, err)
> continue
> }
> peerAddrsAfterTransportResolved = append(peerAddrsAfterTransportResolved, resolvedAddrs...)
> } else {
> peerAddrsAfterTransportResolved = append(peerAddrsAfterTransportResolved, a)
> }
> }
>
> // Resolve dns or dnsaddrs
> resolved, err := s.resolveAddrs(ctx, peer.AddrInfo{
> ID: p,
> Addrs: peerAddrsAfterTransportResolved,
> })
> if err != nil {
> return nil, err
> }
>
> goodAddrs := s.filterKnownUndialables(p, resolved)
304c335,399
< return goodAddrs, nil
---
> s.peers.AddAddrs(p, goodAddrs, peerstore.TempAddrTTL)
>
> return resolved, nil
> }
>
> func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multiaddr, error) {
> proto := ma.ProtocolWithCode(ma.P_P2P).Name
> p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
> if err != nil {
> return nil, err
> }
>
> resolveSteps := 0
>
> // Recursively resolve all addrs.
> //
> // While the toResolve list is non-empty:
> // * Pop an address off.
> // * If the address is fully resolved, add it to the resolved list.
> // * Otherwise, resolve it and add the results to the "to resolve" list.
> toResolve := append(([]ma.Multiaddr)(nil), pi.Addrs...)
> resolved := make([]ma.Multiaddr, 0, len(pi.Addrs))
> for len(toResolve) > 0 {
> // pop the last addr off.
> addr := toResolve[len(toResolve)-1]
> toResolve = toResolve[:len(toResolve)-1]
>
> // if it's resolved, add it to the resolved list.
> if !madns.Matches(addr) {
> resolved = append(resolved, addr)
> continue
> }
>
> resolveSteps++
>
> // We've resolved too many addresses. We can keep all the fully
> // resolved addresses but we'll need to skip the rest.
> if resolveSteps >= maxAddressResolution {
> log.Warnf(
> "peer %s asked us to resolve too many addresses: %s/%s",
> pi.ID,
> resolveSteps,
> maxAddressResolution,
> )
> continue
> }
>
> // otherwise, resolve it
> reqaddr := addr.Encapsulate(p2paddr)
> resaddrs, err := s.maResolver.Resolve(ctx, reqaddr)
> if err != nil {
> log.Infof("error resolving %s: %s", reqaddr, err)
> }
>
> // add the results to the toResolve list.
> for _, res := range resaddrs {
> pi, err := peer.AddrInfoFromP2pAddr(res)
> if err != nil {
> log.Infof("error parsing %s: %s", res, err)
> }
> toResolve = append(toResolve, pi.Addrs...)
> }
> }
>
> return resolved, nil
347c442
< return ma.FilterAddrs(addrs,
---
> return maybeRemoveWebTransportAddrs(ma.FilterAddrs(addrs,
355c450
< )
---
> ))
436a532,569
> }
>
> func isWebTransport(addr ma.Multiaddr) bool {
> _, err := addr.ValueForProtocol(ma.P_WEBTRANSPORT)
> return err == nil
> }
>
> func isQUIC(addr ma.Multiaddr) bool {
> _, err := addr.ValueForProtocol(ma.P_QUIC)
> return err == nil && !isWebTransport(addr)
> }
>
> // If we have QUIC addresses, we don't want to dial WebTransport addresses.
> // It's better to have a native QUIC connection.
> // Note that this is a hack. The correct solution would be a proper
> // Happy-Eyeballs-style dialing.
> func maybeRemoveWebTransportAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
> var hasQuic, hasWebTransport bool
> for _, addr := range addrs {
> if isQUIC(addr) {
> hasQuic = true
> }
> if isWebTransport(addr) {
> hasWebTransport = true
> }
> }
> if !hasWebTransport || !hasQuic {
> return addrs
> }
> var c int
> for _, addr := range addrs {
> if isWebTransport(addr) {
> continue
> }
> addrs[c] = addr
> c++
> }
> return addrs[:c]
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go
21a22
> madns "github.com/multiformats/go-multiaddr-dns"
56a58,65
> // WithMultiaddrResolver sets a custom multiaddress resolver
> func WithMultiaddrResolver(maResolver *madns.Resolver) Option {
> return func(s *Swarm) error {
> s.maResolver = maResolver
> return nil
> }
> }
>
129a139,140
> maResolver *madns.Resolver
>
155a167
> maResolver: madns.DefaultResolver,
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_transport.go
21a22
>
29,36c30,35
<
< for _, p := range protocols {
< transport, ok := s.transports.m[p.Code]
< if !ok {
< continue
< }
< if transport.Proxy() {
< return transport
---
> if isRelayAddr(a) {
> return s.transports.m[ma.P_CIRCUIT]
> }
> for _, t := range s.transports.m {
> if t.CanDial(a) {
> return t
39,40c38
<
< return s.transports.m[protocols[len(protocols)-1].Code]
---
> return nil
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go
36,40c36
< // LibP2PVersion holds the current protocol version for a client running this code
< // TODO(jbenet): fix the versioning mess.
< // XXX: Don't change this till 2020. You'll break all go-ipfs versions prior to
< // 0.4.17 which asserted an exact version match.
< const LibP2PVersion = "ipfs/0.1.0"
---
> const DefaultProtocolVersion = "ipfs/0.1.0"
92,93c88,90
< Host host.Host
< UserAgent string
---
> Host host.Host
> UserAgent string
> ProtocolVersion string
137a135,139
> protocolVersion := DefaultProtocolVersion
> if cfg.protocolVersion != "" {
> protocolVersion = cfg.protocolVersion
> }
>
139,140c141,143
< Host: h,
< UserAgent: userAgent,
---
> Host: h,
> UserAgent: userAgent,
> ProtocolVersion: protocolVersion,
191,192c194,197
< sub, err := ids.Host.EventBus().Subscribe([]interface{}{&event.EvtLocalProtocolsUpdated{},
< &event.EvtLocalAddressesUpdated{}}, eventbus.BufSize(256))
---
> sub, err := ids.Host.EventBus().Subscribe([]interface{}{
> &event.EvtLocalProtocolsUpdated{},
> &event.EvtLocalAddressesUpdated{},
> }, eventbus.BufSize(256))
492d496
<
544,547c548,549
< pv := LibP2PVersion
< av := ids.UserAgent
< mes.ProtocolVersion = &pv
< mes.AgentVersion = &av
---
> mes.ProtocolVersion = &ids.ProtocolVersion
> mes.AgentVersion = &ids.UserAgent
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go
3a4
> protocolVersion string
9a11,18
>
> // ProtocolVersion sets the protocol version string that will be used to
> // identify the family of protocols used by the peer.
> func ProtocolVersion(s string) Option {
> return func(cfg *config) {
> cfg.protocolVersion = s
> }
> }
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go
13,17d12
< "github.com/minio/sha256-simd"
< "golang.org/x/crypto/chacha20poly1305"
<
< "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
<
19a15
> "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
23a20
> "github.com/minio/sha256-simd"
68,72d64
< payload, err := s.generateHandshakePayload(kp)
< if err != nil {
< return err
< }
<
82,87c74,75
< // We can re-use this buffer for all handshake messages as its size
< // will be the size of the maximum handshake message for the Noise XX pattern.
< // Also, since we prefix every noise handshake message with its length, we need to account for
< // it when we fetch the buffer from the pool
< maxMsgSize := 2*noise.DH25519.DHLen() + len(payload) + 2*chacha20poly1305.Overhead
< hbuf := pool.Get(maxMsgSize + LengthPrefixLength)
---
> // We can re-use this buffer for all handshake messages.
> hbuf := pool.Get(2 << 10)
92d79
< // do not send the payload just yet, as it would be plaintext; not secret.
94,95c81
< err = s.sendHandshakeMessage(hs, nil, hbuf)
< if err != nil {
---
> if err := s.sendHandshakeMessage(hs, nil, hbuf); err != nil {
104c90
< err = s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
---
> rcvdEd, err := s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
107a94,98
> if s.initiatorEarlyDataHandler != nil {
> if err := s.initiatorEarlyDataHandler.Received(ctx, s.insecureConn, rcvdEd); err != nil {
> return err
> }
> }
111c102,106
< err = s.sendHandshakeMessage(hs, payload, hbuf)
---
> var ed *pb.NoiseExtensions
> if s.initiatorEarlyDataHandler != nil {
> ed = s.initiatorEarlyDataHandler.Send(ctx, s.insecureConn, s.remoteID)
> }
> payload, err := s.generateHandshakePayload(kp, ed)
112a108,110
> return err
> }
> if err := s.sendHandshakeMessage(hs, payload, hbuf); err != nil {
114a113
> return nil
117d115
< // We don't expect any payload on the first message.
125c123,127
< err = s.sendHandshakeMessage(hs, payload, hbuf)
---
> var ed *pb.NoiseExtensions
> if s.responderEarlyDataHandler != nil {
> ed = s.responderEarlyDataHandler.Send(ctx, s.insecureConn, s.remoteID)
> }
> payload, err := s.generateHandshakePayload(kp, ed)
126a129,131
> return err
> }
> if err := s.sendHandshakeMessage(hs, payload, hbuf); err != nil {
135c140
< err = s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
---
> rcvdEd, err := s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
138a144,149
> if s.responderEarlyDataHandler != nil {
> if err := s.responderEarlyDataHandler.Received(ctx, s.insecureConn, rcvdEd); err != nil {
> return err
> }
> }
> return nil
140,141d150
<
< return nil
218,219c227,228
< func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey) ([]byte, error) {
< // obtain the public key from the handshake session so we can sign it with
---
> func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey, ext *pb.NoiseExtensions) ([]byte, error) {
> // obtain the public key from the handshake session, so we can sign it with
234,237c243,247
< payload := new(pb.NoiseHandshakePayload)
< payload.IdentityKey = localKeyRaw
< payload.IdentitySig = signedPayload
< payloadEnc, err := proto.Marshal(payload)
---
> payloadEnc, err := proto.Marshal(&pb.NoiseHandshakePayload{
> IdentityKey: localKeyRaw,
> IdentitySig: signedPayload,
> Extensions: ext,
> })
246c256,257
< func (s *secureSession) handleRemoteHandshakePayload(payload []byte, remoteStatic []byte) error {
---
> // It returns the data attached to the payload.
> func (s *secureSession) handleRemoteHandshakePayload(payload []byte, remoteStatic []byte) (*pb.NoiseExtensions, error) {
251c262
< return fmt.Errorf("error unmarshaling remote handshake payload: %w", err)
---
> return nil, fmt.Errorf("error unmarshaling remote handshake payload: %w", err)
257c268
< return err
---
> return nil, err
261c272
< return err
---
> return nil, err
269c280
< return fmt.Errorf("peer id mismatch: expected %s, but remote key matches %s", s.remoteID.Pretty(), id.Pretty())
---
> return nil, fmt.Errorf("peer id mismatch: expected %s, but remote key matches %s", s.remoteID.Pretty(), id.Pretty())
277c288
< return fmt.Errorf("error verifying signature: %w", err)
---
> return nil, fmt.Errorf("error verifying signature: %w", err)
279c290
< return fmt.Errorf("handshake signature invalid")
---
> return nil, fmt.Errorf("handshake signature invalid")
285c296
< return nil
---
> return nhp.Extensions, nil
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go
24a25,68
> type NoiseExtensions struct {
> WebtransportCerthashes [][]byte `protobuf:"bytes,1,rep,name=webtransport_certhashes,json=webtransportCerthashes" json:"webtransport_certhashes,omitempty"`
> }
>
> func (m *NoiseExtensions) Reset() { *m = NoiseExtensions{} }
> func (m *NoiseExtensions) String() string { return proto.CompactTextString(m) }
> func (*NoiseExtensions) ProtoMessage() {}
> func (*NoiseExtensions) Descriptor() ([]byte, []int) {
> return fileDescriptor_678c914f1bee6d56, []int{0}
> }
> func (m *NoiseExtensions) XXX_Unmarshal(b []byte) error {
> return m.Unmarshal(b)
> }
> func (m *NoiseExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
> if deterministic {
> return xxx_messageInfo_NoiseExtensions.Marshal(b, m, deterministic)
> } else {
> b = b[:cap(b)]
> n, err := m.MarshalToSizedBuffer(b)
> if err != nil {
> return nil, err
> }
> return b[:n], nil
> }
> }
> func (m *NoiseExtensions) XXX_Merge(src proto.Message) {
> xxx_messageInfo_NoiseExtensions.Merge(m, src)
> }
> func (m *NoiseExtensions) XXX_Size() int {
> return m.Size()
> }
> func (m *NoiseExtensions) XXX_DiscardUnknown() {
> xxx_messageInfo_NoiseExtensions.DiscardUnknown(m)
> }
>
> var xxx_messageInfo_NoiseExtensions proto.InternalMessageInfo
>
> func (m *NoiseExtensions) GetWebtransportCerthashes() [][]byte {
> if m != nil {
> return m.WebtransportCerthashes
> }
> return nil
> }
>
26,28c70,72
< IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey,proto3" json:"identity_key,omitempty"`
< IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig,proto3" json:"identity_sig,omitempty"`
< Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
---
> IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey" json:"identity_key"`
> IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig" json:"identity_sig"`
> Extensions *NoiseExtensions `protobuf:"bytes,4,opt,name=extensions" json:"extensions,omitempty"`
35c79
< return fileDescriptor_678c914f1bee6d56, []int{0}
---
> return fileDescriptor_678c914f1bee6d56, []int{1}
78c122
< func (m *NoiseHandshakePayload) GetData() []byte {
---
> func (m *NoiseHandshakePayload) GetExtensions() *NoiseExtensions {
80c124
< return m.Data
---
> return m.Extensions
85a130
> proto.RegisterType((*NoiseExtensions)(nil), "pb.NoiseExtensions")
92c137
< // 152 bytes of a gzipped FileDescriptorProto
---
> // 221 bytes of a gzipped FileDescriptorProto
94,102c139,183
< 0xc9, 0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0x2a, 0xe4,
< 0x12, 0xf5, 0xcb, 0xcf, 0x2c, 0x4e, 0xf5, 0x48, 0xcc, 0x4b, 0x29, 0xce, 0x48, 0xcc, 0x4e, 0x0d,
< 0x80, 0x28, 0x11, 0x52, 0xe4, 0xe2, 0xc9, 0x4c, 0x49, 0xcd, 0x2b, 0xc9, 0x2c, 0xa9, 0x8c, 0xcf,
< 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0xe2, 0x86, 0x89, 0x79, 0xa7, 0x56, 0xa2,
< 0x28, 0x29, 0xce, 0x4c, 0x97, 0x60, 0x42, 0x55, 0x12, 0x9c, 0x99, 0x2e, 0x24, 0xc4, 0xc5, 0x92,
< 0x92, 0x58, 0x92, 0x28, 0xc1, 0x0c, 0x96, 0x02, 0xb3, 0x9d, 0x24, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
< 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63,
< 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89, 0x0d, 0xec, 0x2e, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff,
< 0x51, 0x37, 0xd7, 0x40, 0xa8, 0x00, 0x00, 0x00,
---
> 0xc9, 0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0xf2, 0xe2,
> 0xe2, 0xf7, 0xcb, 0xcf, 0x2c, 0x4e, 0x75, 0xad, 0x28, 0x49, 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x2b,
> 0x16, 0x32, 0xe7, 0x12, 0x2f, 0x4f, 0x4d, 0x2a, 0x29, 0x4a, 0xcc, 0x2b, 0x2e, 0xc8, 0x2f, 0x2a,
> 0x89, 0x4f, 0x4e, 0x2d, 0x2a, 0xc9, 0x48, 0x2c, 0xce, 0x48, 0x2d, 0x96, 0x60, 0x54, 0x60, 0xd6,
> 0xe0, 0x09, 0x12, 0x43, 0x96, 0x76, 0x86, 0xcb, 0x2a, 0xcd, 0x63, 0xe4, 0x12, 0x05, 0x1b, 0xe6,
> 0x91, 0x98, 0x97, 0x52, 0x9c, 0x91, 0x98, 0x9d, 0x1a, 0x00, 0xb1, 0x4f, 0x48, 0x9d, 0x8b, 0x27,
> 0x33, 0x25, 0x35, 0xaf, 0x24, 0xb3, 0xa4, 0x32, 0x3e, 0x3b, 0xb5, 0x52, 0x82, 0x51, 0x81, 0x51,
> 0x83, 0xc7, 0x89, 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0x6e, 0x98, 0x8c, 0x77, 0x6a, 0x25, 0x8a,
> 0xc2, 0xe2, 0xcc, 0x74, 0x09, 0x26, 0x6c, 0x0a, 0x83, 0x33, 0xd3, 0x85, 0x8c, 0xb9, 0xb8, 0x52,
> 0xe1, 0x4e, 0x96, 0x60, 0x51, 0x60, 0xd4, 0xe0, 0x36, 0x12, 0xd6, 0x2b, 0x48, 0xd2, 0x43, 0xf3,
> 0x4d, 0x10, 0x92, 0x32, 0x27, 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0,
> 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x00,
> 0x04, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xb0, 0x39, 0x45, 0x1a, 0x01, 0x00, 0x00,
> }
>
> func (m *NoiseExtensions) Marshal() (dAtA []byte, err error) {
> size := m.Size()
> dAtA = make([]byte, size)
> n, err := m.MarshalToSizedBuffer(dAtA[:size])
> if err != nil {
> return nil, err
> }
> return dAtA[:n], nil
> }
>
> func (m *NoiseExtensions) MarshalTo(dAtA []byte) (int, error) {
> size := m.Size()
> return m.MarshalToSizedBuffer(dAtA[:size])
> }
>
> func (m *NoiseExtensions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
> i := len(dAtA)
> _ = i
> var l int
> _ = l
> if len(m.WebtransportCerthashes) > 0 {
> for iNdEx := len(m.WebtransportCerthashes) - 1; iNdEx >= 0; iNdEx-- {
> i -= len(m.WebtransportCerthashes[iNdEx])
> copy(dAtA[i:], m.WebtransportCerthashes[iNdEx])
> i = encodeVarintPayload(dAtA, i, uint64(len(m.WebtransportCerthashes[iNdEx])))
> i--
> dAtA[i] = 0xa
> }
> }
> return len(dAtA) - i, nil
125,128c206,214
< if len(m.Data) > 0 {
< i -= len(m.Data)
< copy(dAtA[i:], m.Data)
< i = encodeVarintPayload(dAtA, i, uint64(len(m.Data)))
---
> if m.Extensions != nil {
> {
> size, err := m.Extensions.MarshalToSizedBuffer(dAtA[:i])
> if err != nil {
> return 0, err
> }
> i -= size
> i = encodeVarintPayload(dAtA, i, uint64(size))
> }
130c216
< dAtA[i] = 0x1a
---
> dAtA[i] = 0x22
132c218
< if len(m.IdentitySig) > 0 {
---
> if m.IdentitySig != nil {
139c225
< if len(m.IdentityKey) > 0 {
---
> if m.IdentityKey != nil {
159a246,260
> func (m *NoiseExtensions) Size() (n int) {
> if m == nil {
> return 0
> }
> var l int
> _ = l
> if len(m.WebtransportCerthashes) > 0 {
> for _, b := range m.WebtransportCerthashes {
> l = len(b)
> n += 1 + l + sovPayload(uint64(l))
> }
> }
> return n
> }
>
166,167c267,268
< l = len(m.IdentityKey)
< if l > 0 {
---
> if m.IdentityKey != nil {
> l = len(m.IdentityKey)
170,171c271,272
< l = len(m.IdentitySig)
< if l > 0 {
---
> if m.IdentitySig != nil {
> l = len(m.IdentitySig)
174,175c275,276
< l = len(m.Data)
< if l > 0 {
---
> if m.Extensions != nil {
> l = m.Extensions.Size()
186a288,369
> func (m *NoiseExtensions) Unmarshal(dAtA []byte) error {
> l := len(dAtA)
> iNdEx := 0
> for iNdEx < l {
> preIndex := iNdEx
> var wire uint64
> for shift := uint(0); ; shift += 7 {
> if shift >= 64 {
> return ErrIntOverflowPayload
> }
> if iNdEx >= l {
> return io.ErrUnexpectedEOF
> }
> b := dAtA[iNdEx]
> iNdEx++
> wire |= uint64(b&0x7F) << shift
> if b < 0x80 {
> break
> }
> }
> fieldNum := int32(wire >> 3)
> wireType := int(wire & 0x7)
> if wireType == 4 {
> return fmt.Errorf("proto: NoiseExtensions: wiretype end group for non-group")
> }
> if fieldNum <= 0 {
> return fmt.Errorf("proto: NoiseExtensions: illegal tag %d (wire type %d)", fieldNum, wire)
> }
> switch fieldNum {
> case 1:
> if wireType != 2 {
> return fmt.Errorf("proto: wrong wireType = %d for field WebtransportCerthashes", wireType)
> }
> var byteLen int
> for shift := uint(0); ; shift += 7 {
> if shift >= 64 {
> return ErrIntOverflowPayload
> }
> if iNdEx >= l {
> return io.ErrUnexpectedEOF
> }
> b := dAtA[iNdEx]
> iNdEx++
> byteLen |= int(b&0x7F) << shift
> if b < 0x80 {
> break
> }
> }
> if byteLen < 0 {
> return ErrInvalidLengthPayload
> }
> postIndex := iNdEx + byteLen
> if postIndex < 0 {
> return ErrInvalidLengthPayload
> }
> if postIndex > l {
> return io.ErrUnexpectedEOF
> }
> m.WebtransportCerthashes = append(m.WebtransportCerthashes, make([]byte, postIndex-iNdEx))
> copy(m.WebtransportCerthashes[len(m.WebtransportCerthashes)-1], dAtA[iNdEx:postIndex])
> iNdEx = postIndex
> default:
> iNdEx = preIndex
> skippy, err := skipPayload(dAtA[iNdEx:])
> if err != nil {
> return err
> }
> if (skippy < 0) || (iNdEx+skippy) < 0 {
> return ErrInvalidLengthPayload
> }
> if (iNdEx + skippy) > l {
> return io.ErrUnexpectedEOF
> }
> iNdEx += skippy
> }
> }
>
> if iNdEx > l {
> return io.ErrUnexpectedEOF
> }
> return nil
> }
284c467
< case 3:
---
> case 4:
286c469
< return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
---
> return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
288c471
< var byteLen int
---
> var msglen int
298c481
< byteLen |= int(b&0x7F) << shift
---
> msglen |= int(b&0x7F) << shift
303c486
< if byteLen < 0 {
---
> if msglen < 0 {
306c489
< postIndex := iNdEx + byteLen
---
> postIndex := iNdEx + msglen
313,315c496,500
< m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
< if m.Data == nil {
< m.Data = []byte{}
---
> if m.Extensions == nil {
> m.Extensions = &NoiseExtensions{}
> }
> if err := m.Extensions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
> return err
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.proto b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.proto
1c1
< syntax = "proto3";
---
> syntax = "proto2";
3a4,7
> message NoiseExtensions {
> repeated bytes webtransport_certhashes = 1;
> }
>
5,7c9,11
< bytes identity_key = 1;
< bytes identity_sig = 2;
< bytes data = 3;
---
> optional bytes identity_key = 1;
> optional bytes identity_sig = 2;
> optional NoiseExtensions extensions = 4;
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go
39a40,41
>
> initiatorEarlyDataHandler, responderEarlyDataHandler EarlyDataHandler
44c46
< func newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, prologue []byte, initiator bool) (*secureSession, error) {
---
> func newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, prologue []byte, initiatorEDH, responderEDH EarlyDataHandler, initiator bool) (*secureSession, error) {
46,52c48,56
< insecureConn: insecure,
< insecureReader: bufio.NewReader(insecure),
< initiator: initiator,
< localID: tpt.localID,
< localKey: tpt.privateKey,
< remoteID: remote,
< prologue: prologue,
---
> insecureConn: insecure,
> insecureReader: bufio.NewReader(insecure),
> initiator: initiator,
> localID: tpt.localID,
> localKey: tpt.privateKey,
> remoteID: remote,
> prologue: prologue,
> initiatorEarlyDataHandler: initiatorEDH,
> responderEarlyDataHandler: responderEDH,
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session_transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session_transport.go
9a10,11
> "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
>
14a17,52
> // Prologue sets a prologue for the Noise session.
> // The handshake will only complete successfully if both parties set the same prologue.
> // See https://noiseprotocol.org/noise.html#prologue for details.
> func Prologue(prologue []byte) SessionOption {
> return func(s *SessionTransport) error {
> s.prologue = prologue
> return nil
> }
> }
>
> // EarlyDataHandler defines what the application payload is for either the second
> // (if responder) or third (if initiator) handshake message, and defines the
> // logic for handling the other side's early data. Note the early data in the
> // second handshake message is encrypted, but the peer is not authenticated at that point.
> type EarlyDataHandler interface {
> // Send for the initiator is called for the client before sending the third
> // handshake message. Defines the application payload for the third message.
> // Send for the responder is called before sending the second handshake message.
> Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions
> // Received for the initiator is called when the second handshake message
> // from the responder is received.
> // Received for the responder is called when the third handshake message
> // from the initiator is received.
> Received(context.Context, net.Conn, *pb.NoiseExtensions) error
> }
>
> // EarlyData sets the `EarlyDataHandler` for the initiator and responder roles.
> // See `EarlyDataHandler` for more details.
> func EarlyData(initiator, responder EarlyDataHandler) SessionOption {
> return func(s *SessionTransport) error {
> s.initiatorEarlyDataHandler = initiator
> s.responderEarlyDataHandler = responder
> return nil
> }
> }
>
22a61,62
>
> initiatorEarlyDataHandler, responderEarlyDataHandler EarlyDataHandler
28c68
< c, err := newSecureSession(i.t, ctx, insecure, p, i.prologue, false)
---
> c, err := newSecureSession(i.t, ctx, insecure, p, i.prologue, i.initiatorEarlyDataHandler, i.responderEarlyDataHandler, false)
40,57c80
< return newSecureSession(i.t, ctx, insecure, p, i.prologue, true)
< }
<
< func (t *Transport) WithSessionOptions(opts ...SessionOption) (sec.SecureTransport, error) {
< st := &SessionTransport{t: t}
< for _, opt := range opts {
< if err := opt(st); err != nil {
< return nil, err
< }
< }
< return st, nil
< }
<
< func Prologue(prologue []byte) SessionOption {
< return func(s *SessionTransport) error {
< s.prologue = prologue
< return nil
< }
---
> return newSecureSession(i.t, ctx, insecure, p, i.prologue, i.initiatorEarlyDataHandler, i.responderEarlyDataHandler, true)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go
44c44
< c, err := newSecureSession(t, ctx, insecure, p, nil, false)
---
> c, err := newSecureSession(t, ctx, insecure, p, nil, nil, nil, false)
56c56,66
< return newSecureSession(t, ctx, insecure, p, nil, true)
---
> return newSecureSession(t, ctx, insecure, p, nil, nil, nil, true)
> }
>
> func (t *Transport) WithSessionOptions(opts ...SessionOption) (sec.SecureTransport, error) {
> st := &SessionTransport{t: t}
> for _, opt := range opts {
> if err := opt(st); err != nil {
> return nil, err
> }
> }
> return st, nil
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go
4a5
> "net"
14a16,23
> type pConn interface {
> net.PacketConn
>
> // count conn reference
> DecreaseCount()
> IncreaseCount()
> }
>
17c26
< pconn *reuseConn
---
> pconn pConn
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go
23c23
< conn *reuseConn
---
> conn pConn
33c33
< func newListener(rconn *reuseConn, t *transport, localPeer peer.ID, key ic.PrivKey, identity *p2ptls.Identity, rcmgr network.ResourceManager) (tpt.Listener, error) {
---
> func newListener(pconn pConn, t *transport, localPeer peer.ID, key ic.PrivKey, identity *p2ptls.Identity, rcmgr network.ResourceManager) (tpt.Listener, error) {
43c43
< ln, err := quicListen(rconn, &tlsConf, t.serverConfig)
---
> ln, err := quicListen(pconn, &tlsConf, t.serverConfig)
52c52
< conn: rconn,
---
> conn: pconn,
148c148,158
< return l.quicListener.Close()
---
>
> if err := l.quicListener.Close(); err != nil {
> return err
> }
>
> if _, ok := l.conn.(*noreuseConn); ok {
> // if we use a `noreuseConn`, close the underlying connection
> return l.conn.Close()
> }
>
> return nil
Only in b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic: options.go
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer.go
16c16
< var tracer logging.Tracer
---
> var qlogTracer logging.Tracer
19d18
< tracers := []logging.Tracer{&metricsTracer{}}
21,23c20
< if qlogger := initQlogger(qlogDir); qlogger != nil {
< tracers = append(tracers, qlogger)
< }
---
> qlogTracer = initQlogger(qlogDir)
25d21
< tracer = logging.NewMultiplexedTracer(tracers...)
65c61,63
< Writer: bufio.NewWriter(f),
---
> // The size of a qlog file for a raw file download is ~2/3 of the amount of data transferred.
> // bufio.NewWriter creates a buffer with a buffer of only 4 kB, leading to a large number of syscalls.
> Writer: bufio.NewWriterSize(f, 128<<10),
83c81
< buf := bufio.NewWriter(f)
---
> buf := bufio.NewWriterSize(f, 128<<10)
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer_metrics.go
170c170,172
< type metricsTracer struct{}
---
> type metricsTracer struct {
> logging.NullTracer
> }
182,184d183
< func (m *metricsTracer) DroppedPacket(addr net.Addr, packetType logging.PacketType, count logging.ByteCount, reason logging.PacketDropReason) {
< }
<
185a185,186
> logging.NullConnectionTracer
>
227,229d227
< func (m *metricsConnTracer) NegotiatedVersion(chosen quic.VersionNumber, clientVersions []quic.VersionNumber, serverVersions []quic.VersionNumber) {
< }
<
267,269d264
< func (m *metricsConnTracer) SentTransportParameters(parameters *logging.TransportParameters) {}
< func (m *metricsConnTracer) ReceivedTransportParameters(parameters *logging.TransportParameters) {}
< func (m *metricsConnTracer) RestoredTransportParameters(parameters *logging.TransportParameters) {}
332,333d326
< func (m *metricsConnTracer) AcknowledgedPacket(logging.EncryptionLevel, logging.PacketNumber) {}
<
347,351d339
< func (m *metricsConnTracer) UpdatedCongestionState(state logging.CongestionState) {}
< func (m *metricsConnTracer) UpdatedPTOCount(value uint32) {}
< func (m *metricsConnTracer) UpdatedKeyFromTLS(level logging.EncryptionLevel, perspective logging.Perspective) {
< }
< func (m *metricsConnTracer) UpdatedKey(generation logging.KeyPhase, remote bool) {}
357,363d344
< func (m *metricsConnTracer) DroppedKey(generation logging.KeyPhase) {}
< func (m *metricsConnTracer) SetLossTimer(timerType logging.TimerType, level logging.EncryptionLevel, time time.Time) {
< }
<
< func (m *metricsConnTracer) LossTimerExpired(timerType logging.TimerType, level logging.EncryptionLevel) {
< }
< func (m *metricsConnTracer) LossTimerCanceled() {}
373,374d353
<
< func (m *metricsConnTracer) Debug(name, msg string) {}
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go
28a29
> quiclogging "github.com/lucas-clemente/quic-go/logging"
45,47c46,48
< AcceptToken: func(clientAddr net.Addr, _ *quic.Token) bool {
< // TODO(#6): require source address validation when under load
< return true
---
> RequireAddressValidation: func(net.Addr) bool {
> // TODO(#1535): require source address validation when under load
> return false
55a57,63
> type noreuseConn struct {
> *net.UDPConn
> }
>
> func (c *noreuseConn) IncreaseCount() {}
> func (c *noreuseConn) DecreaseCount() {}
>
57,58c65,67
< reuseUDP4 *reuse
< reuseUDP6 *reuse
---
> reuseUDP4 *reuse
> reuseUDP6 *reuse
> reuseportEnable bool
61c70
< func newConnManager() (*connManager, error) {
---
> func newConnManager(reuseport bool) (*connManager, error) {
64d72
<
66,67c74,76
< reuseUDP4: reuseUDP4,
< reuseUDP6: reuseUDP6,
---
> reuseUDP4: reuseUDP4,
> reuseUDP6: reuseUDP6,
> reuseportEnable: reuseport,
82,83c91,100
< func (c *connManager) Listen(network string, laddr *net.UDPAddr) (*reuseConn, error) {
< reuse, err := c.getReuse(network)
---
> func (c *connManager) Listen(network string, laddr *net.UDPAddr) (pConn, error) {
> if c.reuseportEnable {
> reuse, err := c.getReuse(network)
> if err != nil {
> return nil, err
> }
> return reuse.Listen(network, laddr)
> }
>
> conn, err := net.ListenUDP(network, laddr)
87c104
< return reuse.Listen(network, laddr)
---
> return &noreuseConn{conn}, nil
90,91c107,123
< func (c *connManager) Dial(network string, raddr *net.UDPAddr) (*reuseConn, error) {
< reuse, err := c.getReuse(network)
---
> func (c *connManager) Dial(network string, raddr *net.UDPAddr) (pConn, error) {
> if c.reuseportEnable {
> reuse, err := c.getReuse(network)
> if err != nil {
> return nil, err
> }
> return reuse.Dial(network, raddr)
> }
>
> var laddr *net.UDPAddr
> switch network {
> case "udp4":
> laddr = &net.UDPAddr{IP: net.IPv4zero, Port: 0}
> case "udp6":
> laddr = &net.UDPAddr{IP: net.IPv6zero, Port: 0}
> }
> conn, err := net.ListenUDP(network, laddr)
95c127
< return reuse.Dial(network, raddr)
---
> return &noreuseConn{conn}, nil
136c168,173
< func NewTransport(key ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager) (tpt.Transport, error) {
---
> func NewTransport(key ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, opts ...Option) (tpt.Transport, error) {
> var cfg config
> if err := cfg.apply(opts...); err != nil {
> return nil, fmt.Errorf("unable to apply quic-tpt option(s): %w", err)
> }
>
149c186
< connManager, err := newConnManager()
---
> connManager, err := newConnManager(!cfg.disableReuseport)
156c193
< config := quicConfig.Clone()
---
> qconfig := quicConfig.Clone()
162,163c199,200
< config.StatelessResetKey = make([]byte, 32)
< if _, err := io.ReadFull(keyReader, config.StatelessResetKey); err != nil {
---
> qconfig.StatelessResetKey = make([]byte, 32)
> if _, err := io.ReadFull(keyReader, qconfig.StatelessResetKey); err != nil {
166c203,212
< config.Tracer = tracer
---
> var tracers []quiclogging.Tracer
> if qlogTracer != nil {
> tracers = append(tracers, qlogTracer)
> }
> if cfg.metrics {
> tracers = append(tracers, &metricsTracer{})
> }
> if len(tracers) > 0 {
> qconfig.Tracer = quiclogging.NewMultiplexedTracer(tracers...)
> }
178,180c224,226
< config.AllowConnectionWindowIncrease = tr.allowWindowIncrease
< tr.serverConfig = config
< tr.clientConfig = config.Clone()
---
> qconfig.AllowConnectionWindowIncrease = tr.allowWindowIncrease
> tr.serverConfig = qconfig
> tr.clientConfig = qconfig.Clone()
307c353
< if _, err := pconn.UDPConn.WriteToUDP(payload, addr); err != nil {
---
> if _, err := pconn.WriteTo(payload, addr); err != nil {
371a418,420
> if !t.connManager.reuseportEnable {
> conn.Close()
> }
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go
108a109,115
> func WithMetrics() Option {
> return func(tr *TcpTransport) error {
> tr.enableMetrics = true
> return nil
> }
> }
>
115,116c122,123
< // Explicitly disable reuseport.
< disableReuseport bool
---
> disableReuseport bool // Explicitly disable reuseport.
> enableMetrics bool
192,195c199,206
< c, err := newTracingConn(conn, true)
< if err != nil {
< connScope.Done()
< return nil, err
---
> c := conn
> if t.enableMetrics {
> var err error
> c, err = newTracingConn(conn, true)
> if err != nil {
> connScope.Done()
> return nil, err
> }
222c233,235
< list = newTracingListener(&tcpListener{list, 0})
---
> if t.enableMetrics {
> list = newTracingListener(&tcpListener{list, 0})
> }
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go
108,111c108,110
< // Only look at the _last_ component.
< maddr, wscomponent := ma.SplitLast(maddr)
< if maddr == nil || wscomponent == nil {
< return nil, fmt.Errorf("websocket addrs need at least two components")
---
> parsed, err := parseWebsocketMultiaddr(maddr)
> if err != nil {
> return nil, err
114,118c113,114
< var scheme string
< switch wscomponent.Protocol().Code {
< case ma.P_WS:
< scheme = "ws"
< case ma.P_WSS:
---
> scheme := "ws"
> if parsed.isWSS {
120,121d115
< default:
< return nil, fmt.Errorf("not a websocket multiaddr")
124c118
< network, host, err := manet.DialArgs(maddr)
---
> network, host, err := manet.DialArgs(parsed.restMultiaddr)
136a131,174
> }
>
> type parsedWebsocketMultiaddr struct {
> isWSS bool
> // sni is the SNI value for the TLS handshake
> sni *ma.Component
> // the rest of the multiaddr before the /tls/sni/example.com/ws or /ws or /wss
> restMultiaddr ma.Multiaddr
> }
>
> func parseWebsocketMultiaddr(a ma.Multiaddr) (parsedWebsocketMultiaddr, error) {
> out := parsedWebsocketMultiaddr{}
> // First check if we have a WSS component. If so we'll canonicalize it into a /tls/ws
> withoutWss := a.Decapsulate(wssComponent)
> if !withoutWss.Equal(a) {
> a = withoutWss.Encapsulate(tlsWsComponent)
> }
>
> // Remove the ws component
> withoutWs := a.Decapsulate(wsComponent)
> if withoutWs.Equal(a) {
> return out, fmt.Errorf("not a websocket multiaddr")
> }
>
> rest := withoutWs
> // If this is not a wss then withoutWs is the rest of the multiaddr
> out.restMultiaddr = withoutWs
> for {
> var head *ma.Component
> rest, head = ma.SplitLast(rest)
> if head == nil || rest == nil {
> break
> }
>
> if head.Protocol().Code == ma.P_SNI {
> out.sni = head
> } else if head.Protocol().Code == ma.P_TLS {
> out.isWSS = true
> out.restMultiaddr = rest
> break
> }
> }
>
> return out, nil
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go
13,17d12
< var (
< wsma = ma.StringCast("/ws")
< wssma = ma.StringCast("/wss")
< )
<
27a23,34
> func (pwma *parsedWebsocketMultiaddr) toMultiaddr() ma.Multiaddr {
> if !pwma.isWSS {
> return pwma.restMultiaddr.Encapsulate(wsComponent)
> }
>
> if pwma.sni == nil {
> return pwma.restMultiaddr.Encapsulate(tlsComponent).Encapsulate(wsComponent)
> }
>
> return pwma.restMultiaddr.Encapsulate(tlsComponent).Encapsulate(pwma.sni).Encapsulate(wsComponent)
> }
>
31,34c38,43
< // Only look at the _last_ component.
< maddr, wscomponent := ma.SplitLast(a)
< isWSS := wscomponent.Equal(wssma)
< if isWSS && tlsConf == nil {
---
> parsed, err := parseWebsocketMultiaddr(a)
> if err != nil {
> return nil, err
> }
>
> if parsed.isWSS && tlsConf == nil {
37c46,47
< lnet, lnaddr, err := manet.DialArgs(maddr)
---
>
> lnet, lnaddr, err := manet.DialArgs(parsed.restMultiaddr)
56a67
> parsed.restMultiaddr = laddr
60c71
< laddr: laddr.Encapsulate(wscomponent),
---
> laddr: parsed.toMultiaddr(),
65c76
< if isWSS {
---
> if parsed.isWSS {
diff -r --color a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go
24,26c24,44
< // This is _not_ WsFmt because we want the transport to stick to dialing fully
< // resolved addresses.
< var dialMatcher = mafmt.And(mafmt.Or(mafmt.IP, mafmt.DNS), mafmt.Base(ma.P_TCP), mafmt.Or(mafmt.Base(ma.P_WS), mafmt.Base(ma.P_WSS)))
---
> var dialMatcher = mafmt.And(
> mafmt.Or(mafmt.IP, mafmt.DNS),
> mafmt.Base(ma.P_TCP),
> mafmt.Or(
> mafmt.Base(ma.P_WS),
> mafmt.And(
> mafmt.Or(
> mafmt.And(
> mafmt.Base(ma.P_TLS),
> mafmt.Base(ma.P_SNI)),
> mafmt.Base(ma.P_TLS),
> ),
> mafmt.Base(ma.P_WS)),
> mafmt.Base(ma.P_WSS)))
>
> var (
> wssComponent = ma.StringCast("/wss")
> tlsWsComponent = ma.StringCast("/tls/ws")
> tlsComponent = ma.StringCast("/tls")
> wsComponent = ma.StringCast("/ws")
> )
80,81c98,100
< upgrader: u,
< rcmgr: rcmgr,
---
> upgrader: u,
> rcmgr: rcmgr,
> tlsClientConf: &tls.Config{},
102a122,157
> func (t *WebsocketTransport) Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) {
> parsed, err := parseWebsocketMultiaddr(maddr)
> if err != nil {
> return nil, err
> }
>
> if !parsed.isWSS {
> // No /tls/ws component, this isn't a secure websocket multiaddr. We can just return it here
> return []ma.Multiaddr{maddr}, nil
> }
>
> if parsed.sni == nil {
> var err error
> // We don't have an sni component, we'll use dns/dnsaddr
> ma.ForEach(parsed.restMultiaddr, func(c ma.Component) bool {
> switch c.Protocol().Code {
> case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR:
> // err shouldn't happen since this means we couldn't parse a dns hostname for an sni value.
> parsed.sni, err = ma.NewComponent("sni", c.Value())
> return false
> }
> return true
> })
> if err != nil {
> return nil, err
> }
> }
>
> if parsed.sni == nil {
> // we didn't find anything to set the sni with. So we just return the given multiaddr
> return []ma.Multiaddr{maddr}, nil
> }
>
> return []ma.Multiaddr{parsed.toMultiaddr()}, nil
> }
>
124c179,183
< dialer.TLSClientConfig = t.tlsClientConf
---
> sni := ""
> sni, err = raddr.ValueForProtocol(ma.P_SNI)
> if err != nil {
> sni = ""
> }
125a185,191
> if sni != "" {
> copytlsClientConf := t.tlsClientConf.Clone()
> copytlsClientConf.ServerName = sni
> dialer.TLSClientConfig = copytlsClientConf
> } else {
> dialer.TLSClientConfig = t.tlsClientConf
> }
126a193
>
diff -r --color a/vendor/github.com/libp2p/go-libp2p/version.json b/vendor/github.com/libp2p/go-libp2p/version.json
2c2
< "version": "v0.22.0"
---
> "version": "v0.23.2"
Only in b/vendor/github.com/libp2p: go-libp2p-consensus
Only in a/vendor/github.com/libp2p/go-libp2p-core: peerstore
Only in a/vendor/github.com/libp2p/go-libp2p-core: record
Only in b/vendor/github.com/libp2p: go-libp2p-gorpc
diff -r --color a/vendor/github.com/libp2p/go-libp2p-gostream/addr.go b/vendor/github.com/libp2p/go-libp2p-gostream/addr.go
3c3
< import "github.com/libp2p/go-libp2p-core/peer"
---
> import "github.com/libp2p/go-libp2p/core/peer"
14c14
< func (a *addr) String() string { return a.id.Pretty() }
---
> func (a *addr) String() string { return a.id.String() }
diff -r --color a/vendor/github.com/libp2p/go-libp2p-gostream/conn.go b/vendor/github.com/libp2p/go-libp2p-gostream/conn.go
6d5
< "time"
8,11c7,10
< "github.com/libp2p/go-libp2p-core/host"
< "github.com/libp2p/go-libp2p-core/network"
< "github.com/libp2p/go-libp2p-core/peer"
< "github.com/libp2p/go-libp2p-core/protocol"
---
> "github.com/libp2p/go-libp2p/core/host"
> "github.com/libp2p/go-libp2p/core/network"
> "github.com/libp2p/go-libp2p/core/peer"
> "github.com/libp2p/go-libp2p/core/protocol"
17c16
< s network.Stream
---
> network.Stream
25,40d23
< // Read reads data from the connection.
< func (c *conn) Read(b []byte) (n int, err error) {
< return c.s.Read(b)
< }
<
< // Write writes data to the connection.
< func (c *conn) Write(b []byte) (n int, err error) {
< return c.s.Write(b)
< }
<
< // Close closes the connection.
< // Any blocked Read or Write operations will be unblocked and return errors.
< func (c *conn) Close() error {
< return c.s.Close()
< }
<
43c26
< return &addr{c.s.Conn().LocalPeer()}
---
> return &addr{c.Stream.Conn().LocalPeer()}
48,70c31
< return &addr{c.s.Conn().RemotePeer()}
< }
<
< // SetDeadline sets the read and write deadlines associated
< // with the connection. It is equivalent to calling both
< // SetReadDeadline and SetWriteDeadline.
< // See https://golang.org/pkg/net/#Conn for more details.
< func (c *conn) SetDeadline(t time.Time) error {
< return c.s.SetDeadline(t)
< }
<
< // SetReadDeadline sets the deadline for future Read calls.
< // A zero value for t means Read will not time out.
< func (c *conn) SetReadDeadline(t time.Time) error {
< return c.s.SetReadDeadline(t)
< }
<
< // SetWriteDeadline sets the deadline for future Write calls.
< // Even if write times out, it may return n > 0, indicating that
< // some of the data was successfully written.
< // A zero value for t means Write will not time out.
< func (c *conn) SetWriteDeadline(t time.Time) error {
< return c.s.SetWriteDeadline(t)
---
> return &addr{c.Stream.Conn().RemotePeer()}
diff -r --color a/vendor/github.com/libp2p/go-libp2p-gostream/listener.go b/vendor/github.com/libp2p/go-libp2p-gostream/listener.go
7,9c7,9
< "github.com/libp2p/go-libp2p-core/host"
< "github.com/libp2p/go-libp2p-core/network"
< "github.com/libp2p/go-libp2p-core/protocol"
---
> "github.com/libp2p/go-libp2p/core/host"
> "github.com/libp2p/go-libp2p/core/network"
> "github.com/libp2p/go-libp2p/core/protocol"
diff -r --color a/vendor/github.com/libp2p/go-libp2p-gostream/version.json b/vendor/github.com/libp2p/go-libp2p-gostream/version.json
2c2
< "version": "v0.4.0"
---
> "version": "v0.5.0"
diff -r --color a/vendor/github.com/libp2p/go-libp2p-pubsub/score_params.go b/vendor/github.com/libp2p/go-libp2p-pubsub/score_params.go
13c13,16
< // GossipThreshold is the score threshold below which gossip propagation is supressed;
---
> // whether it is allowed to just set some params and not all of them.
> SkipAtomicValidation bool
>
> // GossipThreshold is the score threshold below which gossip propagation is suppressed;
21,22c24,25
< // GraylistThreshold is the score threshold below which message processing is supressed altogether,
< // implementing an effective graylist according to peer score; should be negative and <= PublisThreshold.
---
> // GraylistThreshold is the score threshold below which message processing is suppressed altogether,
> // implementing an effective gray list according to peer score; should be negative and <= PublishThreshold.
35,42c38,48
< if p.GossipThreshold > 0 || isInvalidNumber(p.GossipThreshold) {
< return fmt.Errorf("invalid gossip threshold; it must be <= 0 and a valid number")
< }
< if p.PublishThreshold > 0 || p.PublishThreshold > p.GossipThreshold || isInvalidNumber(p.PublishThreshold) {
< return fmt.Errorf("invalid publish threshold; it must be <= 0 and <= gossip threshold and a valid number")
< }
< if p.GraylistThreshold > 0 || p.GraylistThreshold > p.PublishThreshold || isInvalidNumber(p.GraylistThreshold) {
< return fmt.Errorf("invalid graylist threshold; it must be <= 0 and <= publish threshold and a valid number")
---
>
> if !p.SkipAtomicValidation || p.PublishThreshold != 0 || p.GossipThreshold != 0 || p.GraylistThreshold != 0 {
> if p.GossipThreshold > 0 || isInvalidNumber(p.GossipThreshold) {
> return fmt.Errorf("invalid gossip threshold; it must be <= 0 and a valid number")
> }
> if p.PublishThreshold > 0 || p.PublishThreshold > p.GossipThreshold || isInvalidNumber(p.PublishThreshold) {
> return fmt.Errorf("invalid publish threshold; it must be <= 0 and <= gossip threshold and a valid number")
> }
> if p.GraylistThreshold > 0 || p.GraylistThreshold > p.PublishThreshold || isInvalidNumber(p.GraylistThreshold) {
> return fmt.Errorf("invalid graylist threshold; it must be <= 0 and <= publish threshold and a valid number")
> }
44,45c50,54
< if p.AcceptPXThreshold < 0 || isInvalidNumber(p.AcceptPXThreshold) {
< return fmt.Errorf("invalid accept PX threshold; it must be >= 0 and a valid number")
---
>
> if !p.SkipAtomicValidation || p.AcceptPXThreshold != 0 {
> if p.AcceptPXThreshold < 0 || isInvalidNumber(p.AcceptPXThreshold) {
> return fmt.Errorf("invalid accept PX threshold; it must be >= 0 and a valid number")
> }
47,48c56,60
< if p.OpportunisticGraftThreshold < 0 || isInvalidNumber(p.OpportunisticGraftThreshold) {
< return fmt.Errorf("invalid opportunistic grafting threshold; it must be >= 0 and a valid number")
---
>
> if !p.SkipAtomicValidation || p.OpportunisticGraftThreshold != 0 {
> if p.OpportunisticGraftThreshold < 0 || isInvalidNumber(p.OpportunisticGraftThreshold) {
> return fmt.Errorf("invalid opportunistic grafting threshold; it must be >= 0 and a valid number")
> }
49a62
>
53a67,69
> // whether it is allowed to just set some params and not all of them.
> SkipAtomicValidation bool
>
101a118,120
> // whether it is allowed to just set some params and not all of them.
> SkipAtomicValidation bool
>
106,107c125,126
< // This is the time the peer has ben grafted in the mesh.
< // The value of of the parameter is the time/TimeInMeshQuantum, capped by TimeInMeshCap
---
> // This is the time the peer has been grafted in the mesh.
> // The value of the parameter is the time/TimeInMeshQuantum, capped by TimeInMeshCap.
127c146
< // It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer
---
> // It effectively tracks first and near-first deliveries, i.e., a message seen from a mesh peer
162,164c181,185
< // check that the topic score is 0 or something positive
< if p.TopicScoreCap < 0 || isInvalidNumber(p.TopicScoreCap) {
< return fmt.Errorf("invalid topic score cap; must be positive (or 0 for no cap) and a valid number")
---
> if !p.SkipAtomicValidation || p.TopicScoreCap != 0 {
> // check that the topic score is 0 or something positive
> if p.TopicScoreCap < 0 || isInvalidNumber(p.TopicScoreCap) {
> return fmt.Errorf("invalid topic score cap; must be positive (or 0 for no cap) and a valid number")
> }
169c190,196
< return fmt.Errorf("missing application specific score function")
---
> if p.SkipAtomicValidation {
> p.AppSpecificScore = func(p peer.ID) float64 {
> return 0
> }
> } else {
> return fmt.Errorf("missing application specific score function")
> }
172,177c199,206
< // check the IP colocation factor
< if p.IPColocationFactorWeight > 0 || isInvalidNumber(p.IPColocationFactorWeight) {
< return fmt.Errorf("invalid IPColocationFactorWeight; must be negative (or 0 to disable) and a valid number")
< }
< if p.IPColocationFactorWeight != 0 && p.IPColocationFactorThreshold < 1 {
< return fmt.Errorf("invalid IPColocationFactorThreshold; must be at least 1")
---
> if !p.SkipAtomicValidation || p.IPColocationFactorWeight != 0 {
> // check the IP collocation factor
> if p.IPColocationFactorWeight > 0 || isInvalidNumber(p.IPColocationFactorWeight) {
> return fmt.Errorf("invalid IPColocationFactorWeight; must be negative (or 0 to disable) and a valid number")
> }
> if p.IPColocationFactorWeight != 0 && p.IPColocationFactorThreshold < 1 {
> return fmt.Errorf("invalid IPColocationFactorThreshold; must be at least 1")
> }
181,188c210,219
< if p.BehaviourPenaltyWeight > 0 || isInvalidNumber(p.BehaviourPenaltyWeight) {
< return fmt.Errorf("invalid BehaviourPenaltyWeight; must be negative (or 0 to disable) and a valid number")
< }
< if p.BehaviourPenaltyWeight != 0 && (p.BehaviourPenaltyDecay <= 0 || p.BehaviourPenaltyDecay >= 1 || isInvalidNumber(p.BehaviourPenaltyDecay)) {
< return fmt.Errorf("invalid BehaviourPenaltyDecay; must be between 0 and 1")
< }
< if p.BehaviourPenaltyThreshold < 0 || isInvalidNumber(p.BehaviourPenaltyThreshold) {
< return fmt.Errorf("invalid BehaviourPenaltyThreshold; must be >= 0 and a valid number")
---
> if !p.SkipAtomicValidation || p.BehaviourPenaltyWeight != 0 || p.BehaviourPenaltyThreshold != 0 {
> if p.BehaviourPenaltyWeight > 0 || isInvalidNumber(p.BehaviourPenaltyWeight) {
> return fmt.Errorf("invalid BehaviourPenaltyWeight; must be negative (or 0 to disable) and a valid number")
> }
> if p.BehaviourPenaltyWeight != 0 && (p.BehaviourPenaltyDecay <= 0 || p.BehaviourPenaltyDecay >= 1 || isInvalidNumber(p.BehaviourPenaltyDecay)) {
> return fmt.Errorf("invalid BehaviourPenaltyDecay; must be between 0 and 1")
> }
> if p.BehaviourPenaltyThreshold < 0 || isInvalidNumber(p.BehaviourPenaltyThreshold) {
> return fmt.Errorf("invalid BehaviourPenaltyThreshold; must be >= 0 and a valid number")
> }
192,196c223,229
< if p.DecayInterval < time.Second {
< return fmt.Errorf("invalid DecayInterval; must be at least 1s")
< }
< if p.DecayToZero <= 0 || p.DecayToZero >= 1 || isInvalidNumber(p.DecayToZero) {
< return fmt.Errorf("invalid DecayToZero; must be between 0 and 1")
---
> if !p.SkipAtomicValidation || p.DecayInterval != 0 || p.DecayToZero != 0 {
> if p.DecayInterval < time.Second {
> return fmt.Errorf("invalid DecayInterval; must be at least 1s")
> }
> if p.DecayToZero <= 0 || p.DecayToZero >= 1 || isInvalidNumber(p.DecayToZero) {
> return fmt.Errorf("invalid DecayToZero; must be between 0 and 1")
> }
209a243,279
> if err := p.validateTimeInMeshParams(); err != nil {
> return err
> }
>
> // check P2
> if err := p.validateMessageDeliveryParams(); err != nil {
> return err
> }
> // check P3
> if err := p.validateMeshMessageDeliveryParams(); err != nil {
> return err
> }
>
> // check P3b
> if err := p.validateMessageFailurePenaltyParams(); err != nil {
> return err
> }
>
> // check P4
> if err := p.validateInvalidMessageDeliveryParams(); err != nil {
> return err
> }
>
> return nil
> }
>
> func (p *TopicScoreParams) validateTimeInMeshParams() error {
> if p.SkipAtomicValidation {
> // in non-atomic mode, parameters at their zero values are dismissed from validation.
> if p.TimeInMeshWeight == 0 && p.TimeInMeshQuantum == 0 && p.TimeInMeshCap == 0 {
> return nil
> }
> }
>
> // either atomic validation mode, or some parameters have been set a value,
> // hence, proceed with normal validation of all related parameters in this context.
>
223c293,306
< // check P2
---
> return nil
> }
>
> func (p *TopicScoreParams) validateMessageDeliveryParams() error {
> if p.SkipAtomicValidation {
> // in non-atomic mode, parameters at their zero values are dismissed from validation.
> if p.FirstMessageDeliveriesWeight == 0 && p.FirstMessageDeliveriesCap == 0 && p.FirstMessageDeliveriesDecay == 0 {
> return nil
> }
> }
>
> // either atomic validation mode, or some parameters have been set a value,
> // hence, proceed with normal validation of all related parameters in this context.
>
234c317,335
< // check P3
---
> return nil
> }
>
> func (p *TopicScoreParams) validateMeshMessageDeliveryParams() error {
> if p.SkipAtomicValidation {
> // in non-atomic mode, parameters at their zero values are dismissed from validation.
> if p.MeshMessageDeliveriesWeight == 0 &&
> p.MeshMessageDeliveriesCap == 0 &&
> p.MeshMessageDeliveriesDecay == 0 &&
> p.MeshMessageDeliveriesThreshold == 0 &&
> p.MeshMessageDeliveriesWindow == 0 &&
> p.MeshMessageDeliveriesActivation == 0 {
> return nil
> }
> }
>
> // either atomic validation mode, or some parameters have been set a value,
> // hence, proceed with normal validation of all related parameters in this context.
>
254c355,368
< // check P3b
---
> return nil
> }
>
> func (p *TopicScoreParams) validateMessageFailurePenaltyParams() error {
> if p.SkipAtomicValidation {
> // in selective mode, parameters at their zero values are dismissed from validation.
> if p.MeshFailurePenaltyDecay == 0 && p.MeshFailurePenaltyWeight == 0 {
> return nil
> }
> }
>
> // either atomic validation mode, or some parameters have been set a value,
> // hence, proceed with normal validation of all related parameters in this context.
>
262c376,389
< // check P4
---
> return nil
> }
>
> func (p *TopicScoreParams) validateInvalidMessageDeliveryParams() error {
> if p.SkipAtomicValidation {
> // in selective mode, parameters at their zero values are dismissed from validation.
> if p.InvalidMessageDeliveriesDecay == 0 && p.InvalidMessageDeliveriesWeight == 0 {
> return nil
> }
> }
>
> // either atomic validation mode, or some parameters have been set a value,
> // hence, proceed with normal validation of all related parameters in this context.
>
284c411
< // ScoreParameterDecay computes the decay factor for a parameter using base as the DecayInterval
---
> // ScoreParameterDecayWithBase computes the decay factor for a parameter using base as the DecayInterval
Only in b/vendor/github.com/libp2p: go-libp2p-raft
Only in a/vendor/github.com/libp2p/go-yamux: v3
Only in b/vendor/github.com/libp2p/go-yamux: v4
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/client.go b/vendor/github.com/lucas-clemente/quic-go/client.go
45,49c45,46
< var (
< // make it possible to mock connection ID generation in the tests
< generateConnectionID = protocol.GenerateConnectionID
< generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
< )
---
> // make it possible to mock connection ID for initial generation in the tests
> var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
196c193
< packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDLength, config.StatelessResetKey, config.Tracer)
---
> packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDGenerator.ConnectionIDLen(), config.StatelessResetKey, config.Tracer)
259c256
< srcConnID, err := generateConnectionID(config.ConnectionIDLength)
---
> srcConnID, err := config.ConnectionIDGenerator.GenerateConnectionID()
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/closed_conn.go b/vendor/github.com/lucas-clemente/quic-go/closed_conn.go
4c4,5
< "sync"
---
> "math/bits"
> "net"
14,22c15
< conn sendConn
< connClosePacket []byte
<
< closeOnce sync.Once
< closeChan chan struct{} // is closed when the connection is closed or destroyed
<
< receivedPackets chan *receivedPacket
< counter uint64 // number of packets received
<
---
> counter uint32
23a17
> logger utils.Logger
25c19
< logger utils.Logger
---
> sendPacket func(net.Addr, *packetInfo)
31,56c25,29
< func newClosedLocalConn(
< conn sendConn,
< connClosePacket []byte,
< perspective protocol.Perspective,
< logger utils.Logger,
< ) packetHandler {
< s := &closedLocalConn{
< conn: conn,
< connClosePacket: connClosePacket,
< perspective: perspective,
< logger: logger,
< closeChan: make(chan struct{}),
< receivedPackets: make(chan *receivedPacket, 64),
< }
< go s.run()
< return s
< }
<
< func (s *closedLocalConn) run() {
< for {
< select {
< case p := <-s.receivedPackets:
< s.handlePacketImpl(p)
< case <-s.closeChan:
< return
< }
---
> func newClosedLocalConn(sendPacket func(net.Addr, *packetInfo), pers protocol.Perspective, logger utils.Logger) packetHandler {
> return &closedLocalConn{
> sendPacket: sendPacket,
> perspective: pers,
> logger: logger,
60,68c33,34
< func (s *closedLocalConn) handlePacket(p *receivedPacket) {
< select {
< case s.receivedPackets <- p:
< default:
< }
< }
<
< func (s *closedLocalConn) handlePacketImpl(_ *receivedPacket) {
< s.counter++
---
> func (c *closedLocalConn) handlePacket(p *receivedPacket) {
> c.counter++
71,78c37,38
< for n := s.counter; n > 1; n = n / 2 {
< if n%2 != 0 {
< return
< }
< }
< s.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", s.counter)
< if err := s.conn.Write(s.connClosePacket); err != nil {
< s.logger.Debugf("Error retransmitting CONNECTION_CLOSE: %s", err)
---
> if bits.OnesCount32(c.counter) != 1 {
> return
79a40,41
> c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", c.counter)
> c.sendPacket(p.remoteAddr, p.info)
82,94c44,46
< func (s *closedLocalConn) shutdown() {
< s.destroy(nil)
< }
<
< func (s *closedLocalConn) destroy(error) {
< s.closeOnce.Do(func() {
< close(s.closeChan)
< })
< }
<
< func (s *closedLocalConn) getPerspective() protocol.Perspective {
< return s.perspective
< }
---
> func (c *closedLocalConn) shutdown() {}
> func (c *closedLocalConn) destroy(error) {}
> func (c *closedLocalConn) getPerspective() protocol.Perspective { return c.perspective }
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/codecov.yml b/vendor/github.com/lucas-clemente/quic-go/codecov.yml
14a15
> - logging/null_tracer.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/config.go b/vendor/github.com/lucas-clemente/quic-go/config.go
4a5
> "net"
7,8d7
< "github.com/lucas-clemente/quic-go/internal/utils"
<
9a9
> "github.com/lucas-clemente/quic-go/internal/utils"
19c19
< return utils.MaxDuration(protocol.DefaultHandshakeTimeout, 2*c.HandshakeIdleTimeout)
---
> return utils.Max(protocol.DefaultHandshakeTimeout, 2*c.HandshakeIdleTimeout)
38,40c38,43
< config = populateConfig(config)
< if config.ConnectionIDLength == 0 {
< config.ConnectionIDLength = protocol.DefaultConnectionIDLength
---
> config = populateConfig(config, protocol.DefaultConnectionIDLength)
> if config.MaxTokenAge == 0 {
> config.MaxTokenAge = protocol.TokenValidity
> }
> if config.MaxRetryTokenAge == 0 {
> config.MaxRetryTokenAge = protocol.RetryTokenValidity
42,43c45,46
< if config.AcceptToken == nil {
< config.AcceptToken = defaultAcceptToken
---
> if config.RequireAddressValidation == nil {
> config.RequireAddressValidation = func(net.Addr) bool { return false }
51,53c54,56
< config = populateConfig(config)
< if config.ConnectionIDLength == 0 && !createdPacketConn {
< config.ConnectionIDLength = protocol.DefaultConnectionIDLength
---
> defaultConnIDLen := protocol.DefaultConnectionIDLength
> if createdPacketConn {
> defaultConnIDLen = 0
54a58,59
>
> config = populateConfig(config, defaultConnIDLen)
58c63
< func populateConfig(config *Config) *Config {
---
> func populateConfig(config *Config, defaultConnIDLen int) *Config {
65a71,74
> conIDLen := config.ConnectionIDLength
> if config.ConnectionIDLength == 0 {
> conIDLen = defaultConnIDLen
> }
101a111,114
> connIDGenerator := config.ConnectionIDGenerator
> if connIDGenerator == nil {
> connIDGenerator = &protocol.DefaultConnectionIDGenerator{ConnLen: conIDLen}
> }
107c120,122
< AcceptToken: config.AcceptToken,
---
> MaxTokenAge: config.MaxTokenAge,
> MaxRetryTokenAge: config.MaxRetryTokenAge,
> RequireAddressValidation: config.RequireAddressValidation,
116c131,132
< ConnectionIDLength: config.ConnectionIDLength,
---
> ConnectionIDLength: conIDLen,
> ConnectionIDGenerator: connIDGenerator,
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/connection.go b/vendor/github.com/lucas-clemente/quic-go/connection.go
98c98
< ReplaceWithClosed(protocol.ConnectionID, packetHandler)
---
> ReplaceWithClosed([]protocol.ConnectionID, protocol.Perspective, []byte)
244a245
> clientAddressValidated bool,
282a284
> s.config.ConnectionIDGenerator,
290a293
> clientAddressValidated,
316a320,321
> } else {
> params.MaxDatagramFrameSize = protocol.InvalidByteCount
409a415
> s.config.ConnectionIDGenerator,
417a424
> false, /* has no effect */
440a448,449
> } else {
> params.MaxDatagramFrameSize = protocol.InvalidByteCount
535,537c544
< if s.config.EnableDatagrams {
< s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
< }
---
> s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
546c553,557
< go s.cryptoStreamHandler.RunHandshake()
---
> handshaking := make(chan struct{})
> go func() {
> defer close(handshaking)
> s.cryptoStreamHandler.RunHandshake()
> }()
696a708,709
> s.cryptoStreamHandler.Close()
> <-handshaking
702d714
< s.cryptoStreamHandler.Close()
722c734
< return s.peerParams.MaxDatagramFrameSize != protocol.InvalidByteCount
---
> return s.peerParams.MaxDatagramFrameSize > 0
819c831
< maxPacketSize = utils.MinByteCount(maxPacketSize, protocol.MaxPacketBufferSize)
---
> maxPacketSize = utils.Min(maxPacketSize, protocol.MaxPacketBufferSize)
1516c1528
< s.connIDGenerator.ReplaceWithClosed(newClosedRemoteConn(s.perspective))
---
> s.connIDGenerator.ReplaceWithClosed(s.perspective, nil)
1533,1534c1545
< cs := newClosedLocalConn(s.conn, connClosePacket, s.perspective, s.logger)
< s.connIDGenerator.ReplaceWithClosed(cs)
---
> s.connIDGenerator.ReplaceWithClosed(s.perspective, connClosePacket)
1621c1632
< s.keepAliveInterval = utils.MinDuration(s.config.KeepAlivePeriod, utils.MinDuration(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
---
> s.keepAliveInterval = utils.Min(s.config.KeepAlivePeriod, utils.Min(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
1972a1984,1987
> if !s.supportsDatagrams() {
> return errors.New("datagram support disabled")
> }
>
1982a1998,2000
> if !s.config.EnableDatagrams {
> return nil, errors.New("datagram support disabled")
> }
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go b/vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go
13c13
< connIDLen int
---
> generator ConnectionIDGenerator
23c23
< replaceWithClosed func(protocol.ConnectionID, packetHandler)
---
> replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte)
36c36
< replaceWithClosed func(protocol.ConnectionID, packetHandler),
---
> replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte),
37a38
> generator ConnectionIDGenerator,
41c42
< connIDLen: initialConnectionID.Len(),
---
> generator: generator,
57c58
< if m.connIDLen == 0 {
---
> if m.generator.ConnectionIDLen() == 0 {
66c67
< for i := uint64(len(m.activeSrcConnIDs)); i < utils.MinUint64(limit, protocol.MaxIssuedConnectionIDs); i++ {
---
> for i := uint64(len(m.activeSrcConnIDs)); i < utils.Min(limit, protocol.MaxIssuedConnectionIDs); i++ {
102c103
< connID, err := protocol.GenerateConnectionID(m.connIDLen)
---
> connID, err := m.generator.GenerateConnectionID()
133c134,135
< func (m *connIDGenerator) ReplaceWithClosed(handler packetHandler) {
---
> func (m *connIDGenerator) ReplaceWithClosed(pers protocol.Perspective, connClose []byte) {
> connIDs := make([]protocol.ConnectionID, 0, len(m.activeSrcConnIDs)+1)
135c137
< m.replaceWithClosed(m.initialClientDestConnID, handler)
---
> connIDs = append(connIDs, m.initialClientDestConnID)
138c140
< m.replaceWithClosed(connID, handler)
---
> connIDs = append(connIDs, connID)
139a142
> m.replaceWithClosed(connIDs, pers, connClose)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go b/vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go
8a9
> list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist"
11a13,18
> type newConnID struct {
> SequenceNumber uint64
> ConnectionID protocol.ConnectionID
> StatelessResetToken protocol.StatelessResetToken
> }
>
13c20
< queue utils.NewConnectionIDList
---
> queue list.List[newConnID]
74c81
< var next *utils.NewConnectionIDElement
---
> var next *list.Element[newConnID]
107c114
< h.queue.PushBack(utils.NewConnectionID{
---
> h.queue.PushBack(newConnID{
126c133
< h.queue.InsertBefore(utils.NewConnectionID{
---
> h.queue.InsertBefore(newConnID{
141c148
< h.highestRetired = utils.MaxUint64(h.highestRetired, h.activeSequenceNumber)
---
> h.highestRetired = utils.Max(h.highestRetired, h.activeSequenceNumber)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go b/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go
59c59
< s.highestOffset = utils.MaxByteCount(s.highestOffset, highestOffset)
---
> s.highestOffset = utils.Max(s.highestOffset, highestOffset)
110c110
< n := utils.MinByteCount(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
---
> n := utils.Min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go b/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go
7c7
< "github.com/lucas-clemente/quic-go/internal/utils"
---
> list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist"
9a10,15
> // byteInterval is an interval from one ByteCount to the other
> type byteInterval struct {
> Start protocol.ByteCount
> End protocol.ByteCount
> }
>
18c24
< gaps *utils.ByteIntervalList
---
> gaps *list.List[byteInterval]
25c31
< gaps: utils.NewByteIntervalList(),
---
> gaps: list.New[byteInterval](),
28c34
< s.gaps.PushFront(utils.ByteInterval{Start: 0, End: protocol.MaxByteCount})
---
> s.gaps.PushFront(byteInterval{Start: 0, End: protocol.MaxByteCount})
121c127
< var nextGap *utils.ByteIntervalElement
---
> var nextGap *list.Element[byteInterval]
143c149
< s.gaps.InsertAfter(utils.ByteInterval{Start: end, End: startGapEnd}, startGap)
---
> s.gaps.InsertAfter(byteInterval{Start: end, End: startGapEnd}, startGap)
167c173
< func (s *frameSorter) findStartGap(offset protocol.ByteCount) (*utils.ByteIntervalElement, bool) {
---
> func (s *frameSorter) findStartGap(offset protocol.ByteCount) (*list.Element[byteInterval], bool) {
179c185
< func (s *frameSorter) findEndGap(startGap *utils.ByteIntervalElement, offset protocol.ByteCount) (*utils.ByteIntervalElement, bool) {
---
> func (s *frameSorter) findEndGap(startGap *list.Element[byteInterval], offset protocol.ByteCount) (*list.Element[byteInterval], bool) {
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/interface.go b/vendor/github.com/lucas-clemente/quic-go/interface.go
29,38d28
< // A Token can be used to verify the ownership of the client address.
< type Token struct {
< // IsRetryToken encodes how the client received the token. There are two ways:
< // * In a Retry packet sent when trying to establish a new connection.
< // * In a NEW_TOKEN frame on a previous connection.
< IsRetryToken bool
< RemoteAddr string
< SentTime time.Time
< }
<
213a204,221
> // A ConnectionIDGenerator is an interface that allows clients to implement their own format
> // for the Connection IDs that servers/clients use as SrcConnectionID in QUIC packets.
> //
> // Connection IDs generated by an implementation should always produce IDs of constant size.
> type ConnectionIDGenerator interface {
> // GenerateConnectionID generates a new ConnectionID.
> // Generated ConnectionIDs should be unique and observers should not be able to correlate two ConnectionIDs.
> GenerateConnectionID() ([]byte, error)
>
> // ConnectionIDLen tells what is the length of the ConnectionIDs generated by the implementation of
> // this interface.
> // Effectively, this means that implementations of ConnectionIDGenerator must always return constant-size
> // connection IDs. Valid lengths are between 0 and 20 and calls to GenerateConnectionID.
> // 0-length ConnectionsIDs can be used when an endpoint (server or client) does not require multiplexing connections
> // in the presence of a connection migration environment.
> ConnectionIDLen() int
> }
>
225a234,238
> // An optional ConnectionIDGenerator to be used for ConnectionIDs generated during the lifecycle of a QUIC connection.
> // The goal is to give some control on how connection IDs, which can be useful in some scenarios, in particular for servers.
> // By default, if not provided, random connection IDs with the length given by ConnectionIDLength is used.
> // Otherwise, if one is provided, then ConnectionIDLength is ignored.
> ConnectionIDGenerator ConnectionIDGenerator
236,243c249,260
< // AcceptToken determines if a Token is accepted.
< // It is called with token = nil if the client didn't send a token.
< // If not set, a default verification function is used:
< // * it verifies that the address matches, and
< // * if the token is a retry token, that it was issued within the last 5 seconds
< // * else, that it was issued within the last 24 hours.
< // This option is only valid for the server.
< AcceptToken func(clientAddr net.Addr, token *Token) bool
---
> // RequireAddressValidation determines if a QUIC Retry packet is sent.
> // This allows the server to verify the client's address, at the cost of increasing the handshake latency by 1 RTT.
> // See https://datatracker.ietf.org/doc/html/rfc9000#section-8 for details.
> // If not set, every client is forced to prove its remote address.
> RequireAddressValidation func(net.Addr) bool
> // MaxRetryTokenAge is the maximum age of a Retry token.
> // If not set, it defaults to 5 seconds. Only valid for a server.
> MaxRetryTokenAge time.Duration
> // MaxTokenAge is the maximum age of the token presented during the handshake,
> // for tokens that were issued on a previous connection.
> // If not set, it defaults to 24 hours. Only valid for a server.
> MaxTokenAge time.Duration
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go
9c9,11
< // NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler
---
> // NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler.
> // clientAddressValidated indicates whether the address was validated beforehand by an address validation token.
> // clientAddressValidated has no effect for a client.
13a16
> clientAddressValidated bool,
19c22
< sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, pers, tracer, logger)
---
> sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, clientAddressValidated, pers, tracer, logger)
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler: gen.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go
25a26,29
> func (p *Packet) outstanding() bool {
> return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket
> }
>
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler: packet_linkedlist.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go
75c75
< p.period = utils.MinPacketNumber(2*p.period, p.maxPeriod)
---
> p.period = utils.Min(2*p.period, p.maxPeriod)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go
5c5
< "github.com/lucas-clemente/quic-go/internal/utils"
---
> list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist"
8a9,14
> // interval is an interval from one PacketNumber to the other
> type interval struct {
> Start protocol.PacketNumber
> End protocol.PacketNumber
> }
>
13c19
< ranges *utils.PacketIntervalList
---
> ranges *list.List[interval]
20c26
< ranges: utils.NewPacketIntervalList(),
---
> ranges: list.New[interval](),
37c43
< h.ranges.PushBack(utils.PacketInterval{Start: p, End: p})
---
> h.ranges.PushBack(interval{Start: p, End: p})
64c70
< h.ranges.InsertAfter(utils.PacketInterval{Start: p, End: p}, el)
---
> h.ranges.InsertAfter(interval{Start: p, End: p}, el)
70c76
< h.ranges.InsertBefore(utils.PacketInterval{Start: p, End: p}, h.ranges.Front())
---
> h.ranges.InsertBefore(interval{Start: p, End: p}, h.ranges.Front())
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go
178c178
< DelayTime: utils.MaxDuration(0, now.Sub(h.largestObservedReceivedTime)),
---
> DelayTime: utils.Max(0, now.Sub(h.largestObservedReceivedTime)),
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go
103a104,105
> // clientAddressValidated indicates whether the address was validated beforehand by an address validation token.
> // If the address was validated, the amplification limit doesn't apply. It has no effect for a client.
107a110
> clientAddressValidated bool,
122c125
< peerAddressValidated: pers == protocol.PerspectiveClient,
---
> peerAddressValidated: pers == protocol.PerspectiveClient || clientAddressValidated,
259c262
< for p := utils.MaxPacketNumber(0, pnSpace.largestSent+1); p < packet.PacketNumber; p++ {
---
> for p := utils.Max(0, pnSpace.largestSent+1); p < packet.PacketNumber; p++ {
291c294
< pnSpace.largestAcked = utils.MaxPacketNumber(pnSpace.largestAcked, largestAcked)
---
> pnSpace.largestAcked = utils.Max(pnSpace.largestAcked, largestAcked)
313c316
< ackDelay = utils.MinDuration(ack.DelayTime, h.rttStats.MaxAckDelay())
---
> ackDelay = utils.Min(ack.DelayTime, h.rttStats.MaxAckDelay())
409c412
< h.lowestNotConfirmedAcked = utils.MaxPacketNumber(h.lowestNotConfirmedAcked, p.LargestAcked+1)
---
> h.lowestNotConfirmedAcked = utils.Max(h.lowestNotConfirmedAcked, p.LargestAcked+1)
557c560
< maxRTT := float64(utils.MaxDuration(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT()))
---
> maxRTT := float64(utils.Max(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT()))
561c564
< lossDelay = utils.MaxDuration(lossDelay, protocol.TimerGranularity)
---
> lossDelay = utils.Max(lossDelay, protocol.TimerGranularity)
601c604
< p.declaredLost = true
---
> p = pnSpace.history.DeclareLost(p)
770c773
< p.declaredLost = true
---
> pnSpace.history.DeclareLost(p)
811c814
< h.rttStats.UpdateRTT(utils.MaxDuration(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now)
---
> h.rttStats.UpdateRTT(utils.Max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go
8a9
> list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist"
12,15c13,17
< rttStats *utils.RTTStats
< packetList *PacketList
< packetMap map[protocol.PacketNumber]*PacketElement
< highestSent protocol.PacketNumber
---
> rttStats *utils.RTTStats
> outstandingPacketList *list.List[Packet]
> etcPacketList *list.List[Packet]
> packetMap map[protocol.PacketNumber]*list.Element[Packet]
> highestSent protocol.PacketNumber
20,23c22,26
< rttStats: rttStats,
< packetList: NewPacketList(),
< packetMap: make(map[protocol.PacketNumber]*PacketElement),
< highestSent: protocol.InvalidPacketNumber,
---
> rttStats: rttStats,
> outstandingPacketList: list.New[Packet](),
> etcPacketList: list.New[Packet](),
> packetMap: make(map[protocol.PacketNumber]*list.Element[Packet]),
> highestSent: protocol.InvalidPacketNumber,
33c36
< el := h.packetList.PushBack(Packet{
---
> el := h.etcPacketList.PushBack(Packet{
44c47,52
< el := h.packetList.PushBack(*p)
---
> var el *list.Element[Packet]
> if p.outstanding() {
> el = h.outstandingPacketList.PushBack(*p)
> } else {
> el = h.etcPacketList.PushBack(*p)
> }
52,53c60,77
< var next *PacketElement
< for el := h.packetList.Front(); cont && el != nil; el = next {
---
> outstandingEl := h.outstandingPacketList.Front()
> etcEl := h.etcPacketList.Front()
> var el *list.Element[Packet]
> // whichever has the next packet number is returned first
> for cont {
> if outstandingEl == nil || (etcEl != nil && etcEl.Value.PacketNumber < outstandingEl.Value.PacketNumber) {
> el = etcEl
> } else {
> el = outstandingEl
> }
> if el == nil {
> return nil
> }
> if el == outstandingEl {
> outstandingEl = outstandingEl.Next()
> } else {
> etcEl = etcEl.Next()
> }
55d78
< next = el.Next()
64c87
< // FirstOutStanding returns the first outstanding packet.
---
> // FirstOutstanding returns the first outstanding packet.
66,70c89,91
< for el := h.packetList.Front(); el != nil; el = el.Next() {
< p := &el.Value
< if !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket {
< return p
< }
---
> el := h.outstandingPacketList.Front()
> if el == nil {
> return nil
72c93
< return nil
---
> return &el.Value
84c105,106
< h.packetList.Remove(el)
---
> h.outstandingPacketList.Remove(el)
> h.etcPacketList.Remove(el)
90c112
< return h.FirstOutstanding() != nil
---
> return h.outstandingPacketList.Len() > 0
95,96c117,120
< var nextEl *PacketElement
< for el := h.packetList.Front(); el != nil; el = nextEl {
---
> var nextEl *list.Element[Packet]
> // we don't iterate outstandingPacketList, as we should not delete outstanding packets.
> // being outstanding for more than 3*PTO should only happen in the case of drastic RTT changes.
> for el := h.etcPacketList.Front(); el != nil; el = nextEl {
102,104d125
< if !p.skippedPacket && !p.declaredLost { // should only happen in the case of drastic RTT changes
< continue
< }
106c127,150
< h.packetList.Remove(el)
---
> h.etcPacketList.Remove(el)
> }
> }
>
> func (h *sentPacketHistory) DeclareLost(p *Packet) *Packet {
> el, ok := h.packetMap[p.PacketNumber]
> if !ok {
> return nil
> }
> // try to remove it from both lists, as we don't know which one it currently belongs to.
> // Remove is a no-op for elements that are not in the list.
> h.outstandingPacketList.Remove(el)
> h.etcPacketList.Remove(el)
> p.declaredLost = true
> // move it to the correct position in the etc list (based on the packet number)
> for el = h.etcPacketList.Back(); el != nil; el = el.Prev() {
> if el.Value.PacketNumber < p.PacketNumber {
> break
> }
> }
> if el == nil {
> el = h.etcPacketList.PushFront(*p)
> } else {
> el = h.etcPacketList.InsertAfter(*p, el)
107a152,153
> h.packetMap[p.PacketNumber] = el
> return &el.Value
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go
190c190
< targetCongestionWindow = utils.MinByteCount(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2)
---
> targetCongestionWindow = utils.Min(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go
181c181
< c.largestAckedPacketNumber = utils.MaxPacketNumber(ackedPacketNumber, c.largestAckedPacketNumber)
---
> c.largestAckedPacketNumber = utils.Max(ackedPacketNumber, c.largestAckedPacketNumber)
249c249
< c.congestionWindow = utils.MinByteCount(c.maxCongestionWindow(), c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
---
> c.congestionWindow = utils.Min(c.maxCongestionWindow(), c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go
78,79c78,79
< minRTTincreaseThresholdUs = utils.MinInt64(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs)
< minRTTincreaseThreshold := time.Duration(utils.MaxInt64(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond
---
> minRTTincreaseThresholdUs = utils.Min(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs)
> minRTTincreaseThreshold := time.Duration(utils.Max(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go
53c53
< return utils.MinByteCount(p.maxBurstSize(), budget)
---
> return utils.Min(p.maxBurstSize(), budget)
57c57
< return utils.MaxByteCount(
---
> return utils.Max(
69c69
< return p.lastSentTime.Add(utils.MaxDuration(
---
> return p.lastSentTime.Add(utils.Max(
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go
110c110
< newSize := utils.MinByteCount(2*c.receiveWindowSize, c.maxReceiveWindowSize)
---
> newSize := utils.Min(2*c.receiveWindowSize, c.maxReceiveWindowSize)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go
90c90
< newSize := utils.MinByteCount(inc, c.maxReceiveWindowSize)
---
> newSize := utils.Min(inc, c.maxReceiveWindowSize)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go
126c126
< return utils.MinByteCount(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize())
---
> return utils.Min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize())
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go
86c86
< o.highestRcvdPN = utils.MaxPacketNumber(o.highestRcvdPN, pn)
---
> o.highestRcvdPN = utils.Max(o.highestRcvdPN, pn)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go
3a4
> "bytes"
20,22c21,23
< IsRetryToken bool
< RemoteAddr string
< SentTime time.Time
---
> IsRetryToken bool
> SentTime time.Time
> encodedRemoteAddr []byte
27a29,33
> // ValidateRemoteAddr validates the address, but does not check expiration
> func (t *Token) ValidateRemoteAddr(addr net.Addr) bool {
> return bytes.Equal(encodeRemoteAddr(addr), t.encodedRemoteAddr)
> }
>
104,106c110,112
< IsRetryToken: t.IsRetryToken,
< RemoteAddr: decodeRemoteAddr(t.RemoteAddr),
< SentTime: time.Unix(0, t.Timestamp),
---
> IsRetryToken: t.IsRetryToken,
> SentTime: time.Unix(0, t.Timestamp),
> encodedRemoteAddr: t.RemoteAddr,
121,133d126
< }
<
< // decodeRemoteAddr decodes the remote address saved in the token
< func decodeRemoteAddr(data []byte) string {
< // data will never be empty for a token that we generated.
< // Check it to be on the safe side
< if len(data) == 0 {
< return ""
< }
< if data[0] == tokenPrefixIP {
< return net.IP(data[1:]).String()
< }
< return string(data[1:])
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go
172c172
< a.highestRcvdPN = utils.MaxPacketNumber(a.highestRcvdPN, pn)
---
> a.highestRcvdPN = utils.Max(a.highestRcvdPN, pn)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go
69a70,81
>
> type DefaultConnectionIDGenerator struct {
> ConnLen int
> }
>
> func (d *DefaultConnectionIDGenerator) GenerateConnectionID() ([]byte, error) {
> return GenerateConnectionID(d.ConnLen)
> }
>
> func (d *DefaultConnectionIDGenerator) ConnectionIDLen() int {
> return d.ConnLen
> }
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/qtls: go116.go
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/qtls: go117.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go
2d1
< // +build go1.18,!go1.19
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go
2d1
< // +build go1.19
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go
2d1
< // +build go1.20
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go
1,3c1
< //go:build (go1.9 || go1.10 || go1.11 || go1.12 || go1.13 || go1.14 || go1.15) && !go1.16
< // +build go1.9 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15
< // +build !go1.16
---
> //go:build !go1.18
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: byteinterval_linkedlist.go
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: gen.go
Only in b/vendor/github.com/lucas-clemente/quic-go/internal/utils: linkedlist
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go
7c7
< "github.com/lucas-clemente/quic-go/internal/protocol"
---
> "golang.org/x/exp/constraints"
13,14c13
< // Max returns the maximum of two Ints
< func Max(a, b int) int {
---
> func Max[T constraints.Ordered](a, b T) T {
21,22c20
< // MaxUint32 returns the maximum of two uint32
< func MaxUint32(a, b uint32) uint32 {
---
> func Min[T constraints.Ordered](a, b T) T {
24,95d21
< return b
< }
< return a
< }
<
< // MaxUint64 returns the maximum of two uint64
< func MaxUint64(a, b uint64) uint64 {
< if a < b {
< return b
< }
< return a
< }
<
< // MinUint64 returns the maximum of two uint64
< func MinUint64(a, b uint64) uint64 {
< if a < b {
< return a
< }
< return b
< }
<
< // Min returns the minimum of two Ints
< func Min(a, b int) int {
< if a < b {
< return a
< }
< return b
< }
<
< // MinUint32 returns the maximum of two uint32
< func MinUint32(a, b uint32) uint32 {
< if a < b {
< return a
< }
< return b
< }
<
< // MinInt64 returns the minimum of two int64
< func MinInt64(a, b int64) int64 {
< if a < b {
< return a
< }
< return b
< }
<
< // MaxInt64 returns the minimum of two int64
< func MaxInt64(a, b int64) int64 {
< if a > b {
< return a
< }
< return b
< }
<
< // MinByteCount returns the minimum of two ByteCounts
< func MinByteCount(a, b protocol.ByteCount) protocol.ByteCount {
< if a < b {
< return a
< }
< return b
< }
<
< // MaxByteCount returns the maximum of two ByteCounts
< func MaxByteCount(a, b protocol.ByteCount) protocol.ByteCount {
< if a < b {
< return b
< }
< return a
< }
<
< // MaxDuration returns the max duration
< func MaxDuration(a, b time.Duration) time.Duration {
< if a > b {
101,108d26
< // MinDuration returns the minimum duration
< func MinDuration(a, b time.Duration) time.Duration {
< if a > b {
< return b
< }
< return a
< }
<
117c35
< return MinDuration(a, b)
---
> return Min(a, b)
151,166d68
< return a
< }
< return b
< }
<
< // MaxPacketNumber returns the max packet number
< func MaxPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber {
< if a > b {
< return a
< }
< return b
< }
<
< // MinPacketNumber returns the min packet number
< func MinPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber {
< if a < b {
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: new_connection_id.go
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: newconnectionid_linkedlist.go
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: packet_interval.go
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: packetinterval_linkedlist.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go
58c58
< pto := r.SmoothedRTT() + MaxDuration(4*r.MeanDeviation(), protocol.TimerGranularity)
---
> pto := r.SmoothedRTT() + Max(4*r.MeanDeviation(), protocol.TimerGranularity)
125,126c125,126
< r.meanDeviation = MaxDuration(r.meanDeviation, AbsDuration(r.smoothedRTT-r.latestRTT))
< r.smoothedRTT = MaxDuration(r.smoothedRTT, r.latestRTT)
---
> r.meanDeviation = Max(r.meanDeviation, AbsDuration(r.smoothedRTT-r.latestRTT))
> r.smoothedRTT = Max(r.smoothedRTT, r.latestRTT)
Only in a/vendor/github.com/lucas-clemente/quic-go/internal/utils: streamframe_interval.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go
22,23c22
< isLongHeader := data[0]&0x80 > 0
< if !isLongHeader {
---
> if !IsLongHeaderPacket(data[0]) {
38a38,42
> // IsLongHeaderPacket says if this is a Long Header packet
> func IsLongHeaderPacket(firstByte byte) bool {
> return firstByte&0x80 > 0
> }
>
44c48
< return b[0]&0x80 > 0 && b[1] == 0 && b[2] == 0 && b[3] == 0 && b[4] == 0
---
> return IsLongHeaderPacket(b[0]) && b[1] == 0 && b[2] == 0 && b[3] == 0 && b[4] == 0
53c57
< if b[0]&0x80 == 0 {
---
> if !IsLongHeaderPacket(b[0]) {
132c136
< IsLongHeader: typeByte&0x80 > 0,
---
> IsLongHeader: IsLongHeaderPacket(typeByte),
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go
288c288
< p.MaxIdleTimeout = utils.MaxDuration(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Millisecond)
---
> p.MaxIdleTimeout = utils.Max(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Millisecond)
Only in b/vendor/github.com/lucas-clemente/quic-go/logging: null_tracer.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go b/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go
25,51d24
< type zeroRTTQueue struct {
< queue []*receivedPacket
< retireTimer *time.Timer
< }
<
< var _ packetHandler = &zeroRTTQueue{}
<
< func (h *zeroRTTQueue) handlePacket(p *receivedPacket) {
< if len(h.queue) < protocol.Max0RTTQueueLen {
< h.queue = append(h.queue, p)
< }
< }
< func (h *zeroRTTQueue) shutdown() {}
< func (h *zeroRTTQueue) destroy(error) {}
< func (h *zeroRTTQueue) getPerspective() protocol.Perspective { return protocol.PerspectiveClient }
< func (h *zeroRTTQueue) EnqueueAll(sess packetHandler) {
< for _, p := range h.queue {
< sess.handlePacket(p)
< }
< }
<
< func (h *zeroRTTQueue) Clear() {
< for _, p := range h.queue {
< p.buffer.Release()
< }
< }
<
60,62c33,36
< type packetHandlerMapEntry struct {
< packetHandler packetHandler
< is0RTTQueue bool
---
> type closePacket struct {
> payload []byte
> addr net.Addr
> info *packetInfo
75c49,51
< handlers map[string] /* string(ConnectionID)*/ packetHandlerMapEntry
---
> closeQueue chan closePacket
>
> handlers map[string] /* string(ConnectionID)*/ packetHandler
154c130
< handlers: make(map[string]packetHandlerMapEntry),
---
> handlers: make(map[string]packetHandler),
157a134
> closeQueue: make(chan closePacket, 4),
163a141
> go m.runCloseQueue()
205c183
< h.handlers[string(id)] = packetHandlerMapEntry{packetHandler: handler}
---
> h.handlers[string(id)] = handler
215,216c193,195
< if entry, ok := h.handlers[string(clientDestConnID)]; ok {
< if !entry.is0RTTQueue {
---
> if handler, ok := h.handlers[string(clientDestConnID)]; ok {
> q, ok = handler.(*zeroRTTQueue)
> if !ok {
220d198
< q = entry.packetHandler.(*zeroRTTQueue)
227c205
< sess := fn()
---
> conn := fn()
229c207
< q.EnqueueAll(sess)
---
> q.EnqueueAll(conn)
231,232c209,210
< h.handlers[string(clientDestConnID)] = packetHandlerMapEntry{packetHandler: sess}
< h.handlers[string(newConnID)] = packetHandlerMapEntry{packetHandler: sess}
---
> h.handlers[string(clientDestConnID)] = conn
> h.handlers[string(newConnID)] = conn
254c232,254
< func (h *packetHandlerMap) ReplaceWithClosed(id protocol.ConnectionID, handler packetHandler) {
---
> // ReplaceWithClosed is called when a connection is closed.
> // Depending on which side closed the connection, we need to:
> // * remote close: absorb delayed packets
> // * local close: retransmit the CONNECTION_CLOSE packet, in case it was lost
> func (h *packetHandlerMap) ReplaceWithClosed(ids []protocol.ConnectionID, pers protocol.Perspective, connClosePacket []byte) {
> var handler packetHandler
> if connClosePacket != nil {
> handler = newClosedLocalConn(
> func(addr net.Addr, info *packetInfo) {
> select {
> case h.closeQueue <- closePacket{payload: connClosePacket, addr: addr, info: info}:
> default:
> // Oops, we're backlogged.
> // Just drop the packet, sending CONNECTION_CLOSE copies is best effort anyway.
> }
> },
> pers,
> h.logger,
> )
> } else {
> handler = newClosedRemoteConn(pers)
> }
>
256c256,258
< h.handlers[string(id)] = packetHandlerMapEntry{packetHandler: handler}
---
> for _, id := range ids {
> h.handlers[string(id)] = handler
> }
258c260
< h.logger.Debugf("Replacing connection for connection ID %s with a closed connection.", id)
---
> h.logger.Debugf("Replacing connection for connection IDs %s with a closed connection.", ids)
263c265,267
< delete(h.handlers, string(id))
---
> for _, id := range ids {
> delete(h.handlers, string(id))
> }
265c269
< h.logger.Debugf("Removing connection ID %s for a closed connection after it has been retired.", id)
---
> h.logger.Debugf("Removing connection IDs %s for a closed connection after it has been retired.", ids)
268a273,283
> func (h *packetHandlerMap) runCloseQueue() {
> for {
> select {
> case <-h.listening:
> return
> case p := <-h.closeQueue:
> h.conn.WritePacket(p.payload, p.addr, p.info.OOB())
> }
> }
> }
>
295,296c310,311
< for _, entry := range h.handlers {
< if entry.packetHandler.getPerspective() == protocol.PerspectiveServer {
---
> for _, handler := range h.handlers {
> if handler.getPerspective() == protocol.PerspectiveServer {
302c317
< }(entry.packetHandler)
---
> }(handler)
327c342
< for _, entry := range h.handlers {
---
> for _, handler := range h.handlers {
332c347
< }(entry.packetHandler)
---
> }(handler)
382,383c397,398
< if entry, ok := h.handlers[string(connID)]; ok {
< if entry.is0RTTQueue { // only enqueue 0-RTT packets in the 0-RTT queue
---
> if handler, ok := h.handlers[string(connID)]; ok {
> if ha, ok := handler.(*zeroRTTQueue); ok { // only enqueue 0-RTT packets in the 0-RTT queue
385c400
< entry.packetHandler.handlePacket(p)
---
> ha.handlePacket(p)
389c404
< entry.packetHandler.handlePacket(p)
---
> handler.handlePacket(p)
393c408
< if p.data[0]&0x80 == 0 {
---
> if !wire.IsLongHeaderPacket(p.data[0]) {
407,410c422
< h.handlers[string(connID)] = packetHandlerMapEntry{
< packetHandler: queue,
< is0RTTQueue: true,
< }
---
> h.handlers[string(connID)] = queue
416,424c428,438
< if entry, ok := h.handlers[string(connID)]; ok && entry.is0RTTQueue {
< delete(h.handlers, string(connID))
< h.numZeroRTTEntries--
< if h.numZeroRTTEntries < 0 {
< panic("number of 0-RTT queues < 0")
< }
< entry.packetHandler.(*zeroRTTQueue).Clear()
< if h.logger.Debug() {
< h.logger.Debugf("Removing 0-RTT queue for %s.", connID)
---
> if handler, ok := h.handlers[string(connID)]; ok {
> if q, ok := handler.(*zeroRTTQueue); ok {
> delete(h.handlers, string(connID))
> h.numZeroRTTEntries--
> if h.numZeroRTTEntries < 0 {
> panic("number of 0-RTT queues < 0")
> }
> q.Clear()
> if h.logger.Debug() {
> h.logger.Debugf("Removing 0-RTT queue for %s.", connID)
> }
436c450
< if data[0]&0x80 != 0 {
---
> if wire.IsLongHeaderPacket(data[0]) {
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go b/vendor/github.com/lucas-clemente/quic-go/packet_packer.go
892c892
< p.maxPacketSize = utils.MinByteCount(p.maxPacketSize, params.MaxUDPPayloadSize)
---
> p.maxPacketSize = utils.Min(p.maxPacketSize, params.MaxUDPPayloadSize)
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/README.md b/vendor/github.com/lucas-clemente/quic-go/README.md
8c8
< quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the [Unreliable Datagram Extension, RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221). It has support for HTTP/3 [RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114).
---
> quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)).
10c10
< In addition the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem.
---
> In addition to the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem.
14c14
< *We currently support Go 1.16.x, Go 1.17.x, and Go 1.18.x.*
---
> *We currently support Go 1.18.x and Go 1.19.x.*
57c57
<
---
> | [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) |
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/send_stream.go b/vendor/github.com/lucas-clemente/quic-go/send_stream.go
280c280
< maxDataLen := utils.MinByteCount(sendWindow, nextFrame.MaxDataLen(maxBytes, s.version))
---
> maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, s.version))
315c315
< s.getDataForWriting(f, utils.MinByteCount(maxDataLen, sendWindow))
---
> s.getDataForWriting(f, utils.Min(maxDataLen, sendWindow))
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/server.go b/vendor/github.com/lucas-clemente/quic-go/server.go
91a92
> bool, /* client address validated by an address validation token */
193c194
< connHandler, err := getMultiplexer().AddConn(conn, config.ConnectionIDLength, config.StatelessResetKey, config.Tracer)
---
> connHandler, err := getMultiplexer().AddConn(conn, config.ConnectionIDGenerator.ConnectionIDLen(), config.StatelessResetKey, config.Tracer)
244,263d244
< var defaultAcceptToken = func(clientAddr net.Addr, token *Token) bool {
< if token == nil {
< return false
< }
< validity := protocol.TokenValidity
< if token.IsRetryToken {
< validity = protocol.RetryTokenValidity
< }
< if time.Now().After(token.SentTime.Add(validity)) {
< return false
< }
< var sourceAddr string
< if udpAddr, ok := clientAddr.(*net.UDPAddr); ok {
< sourceAddr = udpAddr.IP.String()
< } else {
< sourceAddr = clientAddr.String()
< }
< return sourceAddr == token.RemoteAddr
< }
<
344c325
< hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDLength)
---
> hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDGenerator.ConnectionIDLen())
397a379,398
> // validateToken returns false if:
> // - address is invalid
> // - token is expired
> // - token is null
> func (s *baseServer) validateToken(token *handshake.Token, addr net.Addr) bool {
> if token == nil {
> return false
> }
> if !token.ValidateRemoteAddr(addr) {
> return false
> }
> if !token.IsRetryToken && time.Since(token.SentTime) > s.config.MaxTokenAge {
> return false
> }
> if token.IsRetryToken && time.Since(token.SentTime) > s.config.MaxRetryTokenAge {
> return false
> }
> return true
> }
>
408c409
< token *Token
---
> token *handshake.Token
413c414
< c, err := s.tokenGenerator.DecodeToken(hdr.Token)
---
> tok, err := s.tokenGenerator.DecodeToken(hdr.Token)
415,422c416,418
< token = &Token{
< IsRetryToken: c.IsRetryToken,
< RemoteAddr: c.RemoteAddr,
< SentTime: c.SentTime,
< }
< if token.IsRetryToken {
< origDestConnID = c.OriginalDestConnectionID
< retrySrcConnID = &c.RetrySrcConnectionID
---
> if tok.IsRetryToken {
> origDestConnID = tok.OriginalDestConnectionID
> retrySrcConnID = &tok.RetrySrcConnectionID
423a420
> token = tok
426,429c423,437
< if !s.config.AcceptToken(p.remoteAddr, token) {
< go func() {
< defer p.buffer.Release()
< if token != nil && token.IsRetryToken {
---
>
> clientAddrIsValid := s.validateToken(token, p.remoteAddr)
>
> if token != nil && !clientAddrIsValid {
> // For invalid and expired non-retry tokens, we don't send an INVALID_TOKEN error.
> // We just ignore them, and act as if there was no token on this packet at all.
> // This also means we might send a Retry later.
> if !token.IsRetryToken {
> token = nil
> } else {
> // For Retry tokens, we send an INVALID_ERROR if
> // * the token is too old, or
> // * the token is invalid, in case of a retry token.
> go func() {
> defer p.buffer.Release()
433,434c441,447
< return
< }
---
> }()
> return nil
> }
> }
> if token == nil && s.config.RequireAddressValidation(p.remoteAddr) {
> go func() {
> defer p.buffer.Release()
453c466
< connID, err := protocol.GenerateConnectionID(s.config.ConnectionIDLength)
---
> connID, err := s.config.ConnectionIDGenerator.GenerateConnectionID()
457c470
< s.logger.Debugf("Changing connection ID to %s.", connID)
---
> s.logger.Debugf("Changing connection ID to %s.", protocol.ConnectionID(connID))
486a500
> clientAddrIsValid,
538c552
< srcConnID, err := protocol.GenerateConnectionID(s.config.ConnectionIDLength)
---
> srcConnID, err := s.config.ConnectionIDGenerator.GenerateConnectionID()
554c568
< s.logger.Debugf("Changing connection ID to %s.", srcConnID)
---
> s.logger.Debugf("Changing connection ID to %s.", protocol.ConnectionID(srcConnID))
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_generic_helper.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/streams_map.go b/vendor/github.com/lucas-clemente/quic-go/streams_map.go
58,61c58,61
< outgoingBidiStreams *outgoingBidiStreamsMap
< outgoingUniStreams *outgoingUniStreamsMap
< incomingBidiStreams *incomingBidiStreamsMap
< incomingUniStreams *incomingUniStreamsMap
---
> outgoingBidiStreams *outgoingStreamsMap[streamI]
> outgoingUniStreams *outgoingStreamsMap[sendStreamI]
> incomingBidiStreams *incomingStreamsMap[streamI]
> incomingUniStreams *incomingStreamsMap[receiveStreamI]
88c88,89
< m.outgoingBidiStreams = newOutgoingBidiStreamsMap(
---
> m.outgoingBidiStreams = newOutgoingStreamsMap(
> protocol.StreamTypeBidi,
95c96,97
< m.incomingBidiStreams = newIncomingBidiStreamsMap(
---
> m.incomingBidiStreams = newIncomingStreamsMap(
> protocol.StreamTypeBidi,
103c105,106
< m.outgoingUniStreams = newOutgoingUniStreamsMap(
---
> m.outgoingUniStreams = newOutgoingStreamsMap(
> protocol.StreamTypeUni,
110c113,114
< m.incomingUniStreams = newIncomingUniStreamsMap(
---
> m.incomingUniStreams = newIncomingStreamsMap(
> protocol.StreamTypeUni,
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_incoming_bidi.go
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_incoming_generic.go
Only in b/vendor/github.com/lucas-clemente/quic-go: streams_map_incoming.go
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_incoming_uni.go
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_outgoing_bidi.go
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_outgoing_generic.go
Only in b/vendor/github.com/lucas-clemente/quic-go: streams_map_outgoing.go
Only in a/vendor/github.com/lucas-clemente/quic-go: streams_map_outgoing_uni.go
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go
2d1
< // +build !linux,!windows
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go
2d1
< // +build linux
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go
2d1
< // +build windows
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go
2d1
< // +build darwin
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go
2d1
< // +build freebsd
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go
2d1
< // +build linux
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go
2d1
< // +build !darwin,!linux,!freebsd,!windows
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go
2d1
< // +build darwin linux freebsd
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go b/vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go
2d1
< // +build windows
diff -r --color a/vendor/github.com/lucas-clemente/quic-go/tools.go b/vendor/github.com/lucas-clemente/quic-go/tools.go
2d1
< // +build tools
7c6
< _ "github.com/cheekybits/genny"
---
> _ "github.com/golang/mock/mockgen"
Only in b/vendor/github.com/lucas-clemente/quic-go: zero_rtt_queue.go
Only in a/vendor/github.com/marten-seemann: qtls-go1-16
Only in a/vendor/github.com/marten-seemann: qtls-go1-17
diff -r --color a/vendor/github.com/multiformats/go-base32/base32.go b/vendor/github.com/multiformats/go-base32/base32.go
96c96
< // HexEncoding is the ``Extended Hex Alphabet'' defined in RFC 4648.
---
> // HexEncoding is the “Extended Hex Alphabet” defined in RFC 4648.
228a229
> //lint:ignore S1001 fixed-length 5-byte slice
Only in b/vendor/github.com/multiformats/go-base32: version.json
diff -r --color a/vendor/github.com/multiformats/go-multiaddr/doc.go b/vendor/github.com/multiformats/go-multiaddr/doc.go
10,14c10,14
< import (
< "bytes"
< "strings"
< ma "github.com/multiformats/go-multiaddr"
< )
---
> import (
> "bytes"
> "strings"
> ma "github.com/multiformats/go-multiaddr"
> )
16,17c16,17
< // construct from a string (err signals parse failure)
< m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
---
> // construct from a string (err signals parse failure)
> m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
19,20c19,20
< // construct from bytes (err signals parse failure)
< m2, err := ma.NewMultiaddrBytes(m1.Bytes())
---
> // construct from bytes (err signals parse failure)
> m2, err := ma.NewMultiaddrBytes(m1.Bytes())
22,33c22,27
< // true
< strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234")
< strings.Equal(m1.String(), m2.String())
< bytes.Equal(m1.Bytes(), m2.Bytes())
< m1.Equal(m2)
< m2.Equal(m1)
<
< // tunneling (en/decap)
< printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
< proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
< printerOverProxy := proxy.Encapsulate(printer)
< proxyAgain := printerOverProxy.Decapsulate(printer)
---
> // true
> strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234")
> strings.Equal(m1.String(), m2.String())
> bytes.Equal(m1.Bytes(), m2.Bytes())
> m1.Equal(m2)
> m2.Equal(m1)
34a29,33
> // tunneling (en/decap)
> printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
> proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
> printerOverProxy := proxy.Encapsulate(printer)
> proxyAgain := printerOverProxy.Decapsulate(printer)
diff -r --color a/vendor/github.com/multiformats/go-multiaddr/filter.go b/vendor/github.com/multiformats/go-multiaddr/filter.go
88,89c88,90
< // Instead, the highest-specific last filter should win; that way more specific filters
< // override more general ones.
---
> //
> // Instead, the highest-specific last filter should win; that way more specific filters
> // override more general ones.
diff -r --color a/vendor/github.com/multiformats/go-multiaddr/interface.go b/vendor/github.com/multiformats/go-multiaddr/interface.go
15,18c15
< import ma "github.com/multiformats/go-multiaddr"
<
< addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80")
< // err non-nil when parsing failed.
---
> import ma "github.com/multiformats/go-multiaddr"
19a17,18
> addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80")
> // err non-nil when parsing failed.
51c50
< // Decapsultate removes a Multiaddr wrapping. For example:
---
> // Decapsulate removes a Multiaddr wrapping. For example:
53c52,54
< // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80
---
> // /ip4/1.2.3.4/tcp/80 decapsulate /tcp/80 = /ip4/1.2.3.4
> // /ip4/1.2.3.4/tcp/80 decapsulate /udp/80 = /ip4/1.2.3.4/tcp/80
> // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = nil
diff -r --color a/vendor/github.com/multiformats/go-multiaddr/net/net.go b/vendor/github.com/multiformats/go-multiaddr/net/net.go
84,88c84,88
< // * If the wrapped connection exposes the "half-open" closer methods
< // (CloseWrite, CloseRead), these will be available on the wrapped connection
< // via type assertions.
< // * If the wrapped connection is a UnixConn, IPConn, TCPConn, or UDPConn, all
< // methods on these wrapped connections will be available via type assertions.
---
> // - If the wrapped connection exposes the "half-open" closer methods
> // (CloseWrite, CloseRead), these will be available on the wrapped connection
> // via type assertions.
> // - If the wrapped connection is a UnixConn, IPConn, TCPConn, or UDPConn, all
> // methods on these wrapped connections will be available via type assertions.
227,229c227,229
< // * Connections returned from Accept implement multiaddr/net Conn.
< // * Calling WrapNetListener on the net.Listener returned by this function will
< // return the original (underlying) multiaddr/net Listener.
---
> // - Connections returned from Accept implement multiaddr/net Conn.
> // - Calling WrapNetListener on the net.Listener returned by this function will
> // return the original (underlying) multiaddr/net Listener.
diff -r --color a/vendor/github.com/multiformats/go-multiaddr/protocols.go b/vendor/github.com/multiformats/go-multiaddr/protocols.go
6,38c6,40
< P_IP4 = 0x0004
< P_TCP = 0x0006
< P_DNS = 0x0035 // 4 or 6
< P_DNS4 = 0x0036
< P_DNS6 = 0x0037
< P_DNSADDR = 0x0038
< P_UDP = 0x0111
< P_DCCP = 0x0021
< P_IP6 = 0x0029
< P_IP6ZONE = 0x002A
< P_IPCIDR = 0x002B
< P_QUIC = 0x01CC
< P_WEBTRANSPORT = 0x01D1
< P_CERTHASH = 0x01D2
< P_SCTP = 0x0084
< P_CIRCUIT = 0x0122
< P_UDT = 0x012D
< P_UTP = 0x012E
< P_UNIX = 0x0190
< P_P2P = 0x01A5
< P_IPFS = 0x01A5 // alias for backwards compatibility
< P_HTTP = 0x01E0
< P_HTTPS = 0x01BB // deprecated alias for /tls/http
< P_ONION = 0x01BC // also for backwards compatibility
< P_ONION3 = 0x01BD
< P_GARLIC64 = 0x01BE
< P_GARLIC32 = 0x01BF
< P_P2P_WEBRTC_DIRECT = 0x0114
< P_TLS = 0x01c0
< P_NOISE = 0x01c6
< P_WS = 0x01DD
< P_WSS = 0x01DE // deprecated alias for /tls/ws
< P_PLAINTEXTV2 = 0x706c61
---
> P_IP4 = 4
> P_TCP = 6
> P_DNS = 53 // 4 or 6
> P_DNS4 = 54
> P_DNS6 = 55
> P_DNSADDR = 56
> P_UDP = 273
> P_DCCP = 33
> P_IP6 = 41
> P_IP6ZONE = 42
> P_IPCIDR = 43
> P_QUIC = 460
> P_WEBTRANSPORT = 465
> P_CERTHASH = 466
> P_SCTP = 132
> P_CIRCUIT = 290
> P_UDT = 301
> P_UTP = 302
> P_UNIX = 400
> P_P2P = 421
> P_IPFS = P_P2P // alias for backwards compatibility
> P_HTTP = 480
> P_HTTPS = 443 // deprecated alias for /tls/http
> P_ONION = 444 // also for backwards compatibility
> P_ONION3 = 445
> P_GARLIC64 = 446
> P_GARLIC32 = 447
> P_P2P_WEBRTC_DIRECT = 276
> P_TLS = 448
> P_SNI = 449
> P_NOISE = 454
> P_WS = 477
> P_WSS = 478 // deprecated alias for /tls/ws
> P_PLAINTEXTV2 = 7367777
> P_WEBRTC = 280
229a232,238
> protoSNI = Protocol{
> Name: "sni",
> Size: LengthPrefixedVarSize,
> Code: P_SNI,
> VCode: CodeToVarint(P_SNI),
> Transcoder: TranscoderDns,
> }
249a259,263
> protoWebRTC = Protocol{
> Name: "webrtc",
> Code: P_WEBRTC,
> VCode: CodeToVarint(P_WEBRTC),
> }
281a296
> protoSNI,
285a301
> protoWebRTC,
diff -r --color a/vendor/github.com/multiformats/go-multiaddr/version.json b/vendor/github.com/multiformats/go-multiaddr/version.json
2c2
< "version": "v0.6.0"
---
> "version": "v0.7.0"
diff -r --color a/vendor/github.com/multiformats/go-multicodec/version.json b/vendor/github.com/multiformats/go-multicodec/version.json
2c2
< "version": "v0.5.0"
---
> "version": "v0.6.0"
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
72,74c72,74
< // func (c customCollector) Describe(ch chan<- *Desc) {
< // DescribeByCollect(c, ch)
< // }
---
> // func (c customCollector) Describe(ch chan<- *Desc) {
> // DescribeByCollect(c, ch)
> // }
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
54c54
< // than 64 runes in total.
---
> // than 128 runes in total.
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
22a23,25
>
> "github.com/prometheus/client_golang/prometheus/internal"
>
157c160
< sort.Sort(labelPairSorter(d.constLabelPairs))
---
> sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
Only in b/vendor/github.com/prometheus/client_golang/prometheus: get_pid.go
Only in b/vendor/github.com/prometheus/client_golang/prometheus: get_pid_gopherjs.go
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
21a22,25
> // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
> // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
> // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
> // populated using runtime/metrics.
200,207d203
< }, {
< desc: NewDesc(
< memstatNamespace("gc_cpu_fraction"),
< "The fraction of this program's available CPU time used by the GC since the program started.",
< nil, nil,
< ),
< eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
< valType: GaugeValue,
235c231
< memstatNamespace("last_gc_time_seconds"),
---
> "go_memstats_last_gc_time_seconds",
257,258c253,255
< n, _ := runtime.ThreadCreateProfile(nil)
< ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
---
>
> n := getRuntimeNumThreads()
> ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
271d267
<
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
42a43,57
> msMetrics := goRuntimeMemStats()
> msMetrics = append(msMetrics, struct {
> desc *Desc
> eval func(*runtime.MemStats) float64
> valType ValueType
> }{
> // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
> desc: NewDesc(
> memstatNamespace("gc_cpu_fraction"),
> "The fraction of this program's available CPU time used by the GC since the program started.",
> nil, nil,
> ),
> eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
> valType: GaugeValue,
> })
49c64
< msMetrics: goRuntimeMemStats(),
---
> msMetrics: msMetrics,
Only in a/vendor/github.com/prometheus/client_golang/prometheus: go_collector_go117.go
Only in b/vendor/github.com/prometheus/client_golang/prometheus: go_collector_latest.go
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
583a584
>
588d588
<
Only in b/vendor/github.com/prometheus/client_golang/prometheus/internal: difflib.go
Only in b/vendor/github.com/prometheus/client_golang/prometheus/internal: go_collector_options.go
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
64,66c64,66
< name = name + "_" + unit
< if d.Cumulative {
< name = name + "_total"
---
> name += "_" + unit
> if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
> name += "_total"
87,88c87,88
< // Rebucket as powers of 2.
< return rebucketExp(buckets, 2)
---
> // Re-bucket as powers of 2.
> return reBucketExp(buckets, 2)
90c90
< // Rebucket as powers of 10 and then merge all buckets greater
---
> // Re-bucket as powers of 10 and then merge all buckets greater
92c92
< b := rebucketExp(buckets, 10)
---
> b := reBucketExp(buckets, 10)
106c106
< // rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
---
> // reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
110c110
< func rebucketExp(buckets []float64, base float64) []float64 {
---
> func reBucketExp(buckets []float64, base float64) []float64 {
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
22,23c22,24
< // metricSorter is a sortable slice of *dto.Metric.
< type metricSorter []*dto.Metric
---
> // LabelPairSorter implements sort.Interface. It is used to sort a slice of
> // dto.LabelPair pointers.
> type LabelPairSorter []*dto.LabelPair
25c26
< func (s metricSorter) Len() int {
---
> func (s LabelPairSorter) Len() int {
29c30
< func (s metricSorter) Swap(i, j int) {
---
> func (s LabelPairSorter) Swap(i, j int) {
33c34,49
< func (s metricSorter) Less(i, j int) bool {
---
> func (s LabelPairSorter) Less(i, j int) bool {
> return s[i].GetName() < s[j].GetName()
> }
>
> // MetricSorter is a sortable slice of *dto.Metric.
> type MetricSorter []*dto.Metric
>
> func (s MetricSorter) Len() int {
> return len(s)
> }
>
> func (s MetricSorter) Swap(i, j int) {
> s[i], s[j] = s[j], s[i]
> }
>
> func (s MetricSorter) Less(i, j int) bool {
71c87
< sort.Sort(metricSorter(mf.Metric))
---
> sort.Sort(MetricSorter(mf.Metric))
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
42c42
< "%s: %q has %d variable labels named %q but %d values %q were provided",
---
> "%w: %q has %d variable labels named %q but %d values %q were provided",
52c52
< "%s: expected %d label values but got %d in %#v",
---
> "%w: expected %d label values but got %d in %#v",
70c70
< "%s: expected %d label values but got %d in %#v",
---
> "%w: expected %d label values but got %d in %#v",
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
16a17,19
> "errors"
> "math"
> "sort"
118,133d120
< // labelPairSorter implements sort.Interface. It is used to sort a slice of
< // dto.LabelPair pointers.
< type labelPairSorter []*dto.LabelPair
<
< func (s labelPairSorter) Len() int {
< return len(s)
< }
<
< func (s labelPairSorter) Swap(i, j int) {
< s[i], s[j] = s[j], s[i]
< }
<
< func (s labelPairSorter) Less(i, j int) bool {
< return s[i].GetName() < s[j].GetName()
< }
<
175a163,255
> }
>
> type withExemplarsMetric struct {
> Metric
>
> exemplars []*dto.Exemplar
> }
>
> func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
> if err := m.Metric.Write(pb); err != nil {
> return err
> }
>
> switch {
> case pb.Counter != nil:
> pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
> case pb.Histogram != nil:
> for _, e := range m.exemplars {
> // pb.Histogram.Bucket are sorted by UpperBound.
> i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
> return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
> })
> if i < len(pb.Histogram.Bucket) {
> pb.Histogram.Bucket[i].Exemplar = e
> } else {
> // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
> b := &dto.Bucket{
> CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
> UpperBound: proto.Float64(math.Inf(1)),
> Exemplar: e,
> }
> pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
> }
> }
> default:
> // TODO(bwplotka): Implement Gauge?
> return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
> }
>
> return nil
> }
>
> // Exemplar is easier to use, user-facing representation of *dto.Exemplar.
> type Exemplar struct {
> Value float64
> Labels Labels
> // Optional.
> // Default value (time.Time{}) indicates its empty, which should be
> // understood as time.Now() time at the moment of creation of metric.
> Timestamp time.Time
> }
>
> // NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
> // exemplars. Exemplars are validated.
> //
> // Only last applicable exemplar is injected from the list.
> // For example for Counter it means last exemplar is injected.
> // For Histogram, it means last applicable exemplar for each bucket is injected.
> //
> // NewMetricWithExemplars works best with MustNewConstMetric and
> // MustNewConstHistogram, see example.
> func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
> if len(exemplars) == 0 {
> return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
> }
>
> var (
> now = time.Now()
> exs = make([]*dto.Exemplar, len(exemplars))
> err error
> )
> for i, e := range exemplars {
> ts := e.Timestamp
> if ts == (time.Time{}) {
> ts = now
> }
> exs[i], err = newExemplar(e.Value, ts, e.Labels)
> if err != nil {
> return nil, err
> }
> }
>
> return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
> }
>
> // MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
> // NewMetricWithExemplars would have returned an error.
> func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
> ret, err := NewMetricWithExemplars(m, exemplars...)
> if err != nil {
> panic(err)
> }
> return ret
Only in b/vendor/github.com/prometheus/client_golang/prometheus: num_threads.go
Only in b/vendor/github.com/prometheus/client_golang/prometheus: num_threads_gopherjs.go
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
61c61
< // invalid or if the provided labels contain more than 64 runes in total.
---
> // invalid or if the provided labels contain more than 128 runes in total.
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
19d18
< "io/ioutil"
107,108c106
< pid := os.Getpid()
< c.pidFn = func() (int, error) { return pid, nil }
---
> c.pidFn = getPIDFn()
155c153
< content, err := ioutil.ReadFile(pidFilePath)
---
> content, err := os.ReadFile(pidFilePath)
157c155
< return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
---
> return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
161c159
< return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
---
> return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
Only in b/vendor/github.com/prometheus/client_golang/prometheus: process_collector_js.go
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
14,15c14,15
< //go:build !windows
< // +build !windows
---
> //go:build !windows && !js
> // +build !windows,!js
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
79,83c79,85
< type closeNotifierDelegator struct{ *responseWriterDelegator }
< type flusherDelegator struct{ *responseWriterDelegator }
< type hijackerDelegator struct{ *responseWriterDelegator }
< type readerFromDelegator struct{ *responseWriterDelegator }
< type pusherDelegator struct{ *responseWriterDelegator }
---
> type (
> closeNotifierDelegator struct{ *responseWriterDelegator }
> flusherDelegator struct{ *responseWriterDelegator }
> hijackerDelegator struct{ *responseWriterDelegator }
> readerFromDelegator struct{ *responseWriterDelegator }
> pusherDelegator struct{ *responseWriterDelegator }
> )
88a91
>
96a100
>
99a104
>
109a115
>
264c270
< pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
---
> pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
35a36
> "errors"
86a88,94
> return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
> }
>
> // HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
> // can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
> // call to `done` of that `Gather`.
> func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
106c114,115
< if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
---
> are := &prometheus.AlreadyRegisteredError{}
> if errors.As(err, are) {
126c135,136
< mfs, err := reg.Gather()
---
> mfs, done, err := reg.Gather()
> defer done()
245c255,256
< if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
---
> are := &prometheus.AlreadyRegisteredError{}
> if errors.As(err, are) {
257c268,269
< if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
---
> are := &prometheus.AlreadyRegisteredError{}
> if errors.As(err, are) {
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
41c41
< return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
---
> return func(r *http.Request) (*http.Response, error) {
45c45
< })
---
> }
61a62,63
> // Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
> //
64c66
< rtOpts := &option{}
---
> rtOpts := defaultOptions()
66c68
< o(rtOpts)
---
> o.apply(rtOpts)
71c73
< return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
---
> return func(r *http.Request) (*http.Response, error) {
73a76,80
> exemplarAdd(
> counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
> 1,
> rtOpts.getExemplarFn(r.Context()),
> )
77c84
< })
---
> }
96a104,105
> // Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
> //
100c109
< rtOpts := &option{}
---
> rtOpts := defaultOptions()
102c111
< o(rtOpts)
---
> o.apply(rtOpts)
107c116
< return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
---
> return func(r *http.Request) (*http.Response, error) {
111c120,124
< obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
---
> exemplarObserve(
> obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
> time.Since(start).Seconds(),
> rtOpts.getExemplarFn(r.Context()),
> )
114c127
< })
---
> }
152c165
< return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
---
> return func(r *http.Request) (*http.Response, error) {
234c247
< })
---
> }
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
30a31,46
> func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
> if labels == nil {
> obs.Observe(val)
> return
> }
> obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
> }
>
> func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
> if labels == nil {
> obs.Add(val)
> return
> }
> obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
> }
>
51c67
< //`WithExtraMethods` can be used to add more methods to the set. The Observe
---
> // `WithExtraMethods` can be used to add more methods to the set. The Observe
65c81
< mwOpts := &option{}
---
> hOpts := defaultOptions()
67c83
< o(mwOpts)
---
> o.apply(hOpts)
73c89
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
78,79c94,99
< obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
< })
---
> exemplarObserve(
> obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
> time.Since(now).Seconds(),
> hOpts.getExemplarFn(r.Context()),
> )
> }
82c102
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
85,86c105,111
< obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
< })
---
>
> exemplarObserve(
> obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
> time.Since(now).Seconds(),
> hOpts.getExemplarFn(r.Context()),
> )
> }
107c132
< mwOpts := &option{}
---
> hOpts := defaultOptions()
109c134
< o(mwOpts)
---
> o.apply(hOpts)
115c140
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
118,119c143,149
< counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
< })
---
>
> exemplarAdd(
> counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
> 1,
> hOpts.getExemplarFn(r.Context()),
> )
> }
122c152
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
124,125c154,159
< counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
< })
---
> exemplarAdd(
> counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
> 1,
> hOpts.getExemplarFn(r.Context()),
> )
> }
151c185
< mwOpts := &option{}
---
> hOpts := defaultOptions()
153c187
< o(mwOpts)
---
> o.apply(hOpts)
158c192
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
161c195,199
< obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
---
> exemplarObserve(
> obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
> time.Since(now).Seconds(),
> hOpts.getExemplarFn(r.Context()),
> )
164c202
< })
---
> }
187c225
< mwOpts := &option{}
---
> hOpts := defaultOptions()
189c227
< o(mwOpts)
---
> o.apply(hOpts)
193d230
<
195c232
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
199,200c236,241
< obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
< })
---
> exemplarObserve(
> obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
> float64(size),
> hOpts.getExemplarFn(r.Context()),
> )
> }
203c244
< return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
---
> return func(w http.ResponseWriter, r *http.Request) {
206,207c247,252
< obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
< })
---
> exemplarObserve(
> obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
> float64(size),
> hOpts.getExemplarFn(r.Context()),
> )
> }
230c275
< mwOpts := &option{}
---
> hOpts := defaultOptions()
232c277
< o(mwOpts)
---
> o.apply(hOpts)
240c285,289
< obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
---
> exemplarObserve(
> obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
> float64(d.Written()),
> hOpts.getExemplarFn(r.Context()),
> )
249c298
< func checkLabels(c prometheus.Collector) (code bool, method bool) {
---
> func checkLabels(c prometheus.Collector) (code, method bool) {
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
16,17c16,17
< // Option are used to configure a middleware or round tripper..
< type Option func(*option)
---
> import (
> "context"
19,20c19,34
< type option struct {
< extraMethods []string
---
> "github.com/prometheus/client_golang/prometheus"
> )
>
> // Option are used to configure both handler (middleware) or round tripper.
> type Option interface {
> apply(*options)
> }
>
> // options store options for both a handler or round tripper.
> type options struct {
> extraMethods []string
> getExemplarFn func(requestCtx context.Context) prometheus.Labels
> }
>
> func defaultOptions() *options {
> return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
22a37,40
> type optionApplyFunc func(*options)
>
> func (o optionApplyFunc) apply(opt *options) { o(opt) }
>
28c46
< return func(o *option) {
---
> return optionApplyFunc(func(o *options) {
30c48,57
< }
---
> })
> }
>
> // WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
> // If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
> // will get instrumented without exemplar.
> func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
> return optionApplyFunc(func(o *options) {
> o.getExemplarFn = getExemplarFn
> })
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
17a18
> "errors"
19d19
< "io/ioutil"
292c292
< return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
---
> return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
409a410,417
> r.mtx.RLock()
>
> if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
> // Fast path.
> r.mtx.RUnlock()
> return nil, nil
> }
>
419d426
< r.mtx.RLock()
559c566
< tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
---
> tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
578c585
< if err := os.Chmod(tmp.Name(), 0644); err != nil {
---
> if err := os.Chmod(tmp.Name(), 0o644); err != nil {
599c606
< return fmt.Errorf("error collecting metric %v: %s", desc, err)
---
> return fmt.Errorf("error collecting metric %v: %w", desc, err)
721c728,729
< if multiErr, ok := err.(MultiError); ok {
---
> multiErr := MultiError{}
> if errors.As(err, &multiErr) {
723c731
< errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
---
> errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
726c734
< errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
---
> errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
887c895
< if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
---
> if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
891c899
< sort.Sort(labelPairSorter(copiedLabels))
---
> sort.Sort(internal.LabelPairSorter(copiedLabels))
938c946
< sort.Sort(labelPairSorter(lpsFromDesc))
---
> sort.Sort(internal.LabelPairSorter(lpsFromDesc))
949a958,1043
> }
>
> var _ TransactionalGatherer = &MultiTRegistry{}
>
> // MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
> // transactional gatherers.
> //
> // It is caller responsibility to ensure two registries have mutually exclusive metric families,
> // no deduplication will happen.
> type MultiTRegistry struct {
> tGatherers []TransactionalGatherer
> }
>
> // NewMultiTRegistry creates MultiTRegistry.
> func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
> return &MultiTRegistry{
> tGatherers: tGatherers,
> }
> }
>
> // Gather implements TransactionalGatherer interface.
> func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
> errs := MultiError{}
>
> dFns := make([]func(), 0, len(r.tGatherers))
> // TODO(bwplotka): Implement concurrency for those?
> for _, g := range r.tGatherers {
> // TODO(bwplotka): Check for duplicates?
> m, d, err := g.Gather()
> errs.Append(err)
>
> mfs = append(mfs, m...)
> dFns = append(dFns, d)
> }
>
> // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
> sort.Slice(mfs, func(i, j int) bool {
> return *mfs[i].Name < *mfs[j].Name
> })
> return mfs, func() {
> for _, d := range dFns {
> d()
> }
> }, errs.MaybeUnwrap()
> }
>
> // TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
> // used by metric family is no longer used by a caller. This allows implementations with cache.
> type TransactionalGatherer interface {
> // Gather returns metrics in a lexicographically sorted slice
> // of uniquely named MetricFamily protobufs. Gather ensures that the
> // returned slice is valid and self-consistent so that it can be used
> // for valid exposition. As an exception to the strict consistency
> // requirements described for metric.Desc, Gather will tolerate
> // different sets of label names for metrics of the same metric family.
> //
> // Even if an error occurs, Gather attempts to gather as many metrics as
> // possible. Hence, if a non-nil error is returned, the returned
> // MetricFamily slice could be nil (in case of a fatal error that
> // prevented any meaningful metric collection) or contain a number of
> // MetricFamily protobufs, some of which might be incomplete, and some
> // might be missing altogether. The returned error (which might be a
> // MultiError) explains the details. Note that this is mostly useful for
> // debugging purposes. If the gathered protobufs are to be used for
> // exposition in actual monitoring, it is almost always better to not
> // expose an incomplete result and instead disregard the returned
> // MetricFamily protobufs in case the returned error is non-nil.
> //
> // Important: done is expected to be triggered (even if the error occurs!)
> // once caller does not need returned slice of dto.MetricFamily.
> Gather() (_ []*dto.MetricFamily, done func(), err error)
> }
>
> // ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
> func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
> return &noTransactionGatherer{g: g}
> }
>
> type noTransactionGatherer struct {
> g Gatherer
> }
>
> // Gather implements TransactionalGatherer interface.
> func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
> mfs, err := g.g.Gather()
> return mfs, func() {}, err
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
25a26,27
> "github.com/prometheus/client_golang/prometheus/internal"
>
40a43,59
> var (
> CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
> GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
> UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
> )
>
> func (v ValueType) ToDTO() *dto.MetricType {
> switch v {
> case CounterValue:
> return CounterMetricTypePtr
> case GaugeValue:
> return GaugeMetricTypePtr
> default:
> return UntypedMetricTypePtr
> }
> }
>
93a113,118
>
> metric := &dto.Metric{}
> if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil {
> return nil, err
> }
>
95,98c120,121
< desc: desc,
< valType: valueType,
< val: value,
< labelPairs: MakeLabelPairs(desc, labelValues),
---
> desc: desc,
> metric: metric,
113,116c136,137
< desc *Desc
< valType ValueType
< val float64
< labelPairs []*dto.LabelPair
---
> desc *Desc
> metric *dto.Metric
124c145,149
< return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
---
> out.Label = m.metric.Label
> out.Counter = m.metric.Counter
> out.Gauge = m.metric.Gauge
> out.Untyped = m.metric.Untyped
> return nil
173c198
< sort.Sort(labelPairSorter(labelPairs))
---
> sort.Sort(internal.LabelPairSorter(labelPairs))
178c203
< const ExemplarMaxRunes = 64
---
> const ExemplarMaxRunes = 128
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
101a102,111
> // DeletePartialMatch deletes all metrics where the variable labels contain all of those
> // passed in as labels. The order of the labels does not matter.
> // It returns the number of metrics deleted.
> //
> // Note that curried labels will never be matched if deleting from the curried vector.
> // To match curried labels with DeletePartialMatch, it must be called on the base vector.
> func (m *MetricVec) DeletePartialMatch(labels Labels) int {
> return m.metricMap.deleteByLabels(labels, m.curry)
> }
>
383a394,469
> // deleteByLabels deletes a metric if the given labels are present in the metric.
> func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
> m.mtx.Lock()
> defer m.mtx.Unlock()
>
> var numDeleted int
>
> for h, metrics := range m.metrics {
> i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
> if i >= len(metrics) {
> // Didn't find matching labels in this metric slice.
> continue
> }
> delete(m.metrics, h)
> numDeleted++
> }
>
> return numDeleted
> }
>
> // findMetricWithPartialLabel returns the index of the matching metric or
> // len(metrics) if not found.
> func findMetricWithPartialLabels(
> desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
> ) int {
> for i, metric := range metrics {
> if matchPartialLabels(desc, metric.values, labels, curry) {
> return i
> }
> }
> return len(metrics)
> }
>
> // indexOf searches the given slice of strings for the target string and returns
> // the index or len(items) as well as a boolean whether the search succeeded.
> func indexOf(target string, items []string) (int, bool) {
> for i, l := range items {
> if l == target {
> return i, true
> }
> }
> return len(items), false
> }
>
> // valueMatchesVariableOrCurriedValue determines if a value was previously curried,
> // and returns whether it matches either the "base" value or the curried value accordingly.
> // It also indicates whether the match is against a curried or uncurried value.
> func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
> for _, curriedValue := range curry {
> if curriedValue.index == index {
> // This label was curried. See if the curried value matches our target.
> return curriedValue.value == targetValue, true
> }
> }
> // This label was not curried. See if the current value matches our target label.
> return values[index] == targetValue, false
> }
>
> // matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
> func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
> for l, v := range labels {
> // Check if the target label exists in our metrics and get the index.
> varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
> if validLabel {
> // Check the value of that label against the target value.
> // We don't consider curried values in partial matches.
> matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
> if matches && !curried {
> continue
> }
> }
> return false
> }
> return true
> }
>
488c574
< func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
---
> func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
diff -r --color a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
23a24,25
>
> "github.com/prometheus/client_golang/prometheus/internal"
185c187
< sort.Sort(labelPairSorter(out.Label))
---
> sort.Sort(internal.LabelPairSorter(out.Label))
Only in b/vendor/github.com: twmb
Only in b/vendor/github.com: ugorji
diff -r --color a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go
9a10
> "reflect"
45a47,49
> // DefaultCommand is the (optional) name of a command
> // to run if no command names are passed as CLI arguments.
> DefaultCommand string
76a81,82
> // Execute this function when an invalid flag is accessed from the context
> InvalidFlagAccessHandler InvalidFlagAccessFunc
130d135
< HelpName: filepath.Base(os.Args[0]),
274c279,281
< _ = ShowAppHelp(cCtx)
---
> if !a.HideHelp {
> _ = ShowAppHelp(cCtx)
> }
295c302,304
< _ = ShowAppHelp(cCtx)
---
> if !a.HideHelp {
> _ = ShowAppHelp(cCtx)
> }
298a308,319
> if a.After != nil && !cCtx.shellComplete {
> defer func() {
> if afterErr := a.After(cCtx); afterErr != nil {
> if err != nil {
> err = newMultiError(err, afterErr)
> } else {
> err = afterErr
> }
> }
> }()
> }
>
315,327c336
< if a.After != nil {
< defer func() {
< if afterErr := a.After(cCtx); afterErr != nil {
< if err != nil {
< err = newMultiError(err, afterErr)
< } else {
< err = afterErr
< }
< }
< }()
< }
<
< if a.Before != nil {
---
> if a.Before != nil && !cCtx.shellComplete {
335a345
> var c *Command
339,341c349,376
< c := a.Command(name)
< if c != nil {
< return c.Run(cCtx)
---
> if a.validCommandName(name) {
> c = a.Command(name)
> } else {
> hasDefault := a.DefaultCommand != ""
> isFlagName := checkStringSliceIncludes(name, cCtx.FlagNames())
>
> var (
> isDefaultSubcommand = false
> defaultHasSubcommands = false
> )
>
> if hasDefault {
> dc := a.Command(a.DefaultCommand)
> defaultHasSubcommands = len(dc.Subcommands) > 0
> for _, dcSub := range dc.Subcommands {
> if checkStringSliceIncludes(name, dcSub.Names()) {
> isDefaultSubcommand = true
> break
> }
> }
> }
>
> if isFlagName || (hasDefault && (defaultHasSubcommands && isDefaultSubcommand)) {
> argsWithDefault := a.argsWithDefaultCommand(args)
> if !reflect.DeepEqual(args, argsWithDefault) {
> c = a.Command(argsWithDefault.First())
> }
> }
342a378,383
> } else if a.DefaultCommand != "" {
> c = a.Command(a.DefaultCommand)
> }
>
> if c != nil {
> return c.Run(cCtx)
462c503
< if a.After != nil {
---
> if a.After != nil && !cCtx.shellComplete {
476c517
< if a.Before != nil {
---
> if a.Before != nil && !cCtx.shellComplete {
572a614,648
> func (a *App) commandNames() []string {
> var cmdNames []string
>
> for _, cmd := range a.Commands {
> cmdNames = append(cmdNames, cmd.Names()...)
> }
>
> return cmdNames
> }
>
> func (a *App) validCommandName(checkCmdName string) bool {
> valid := false
> allCommandNames := a.commandNames()
>
> for _, cmdName := range allCommandNames {
> if checkCmdName == cmdName {
> valid = true
> break
> }
> }
>
> return valid
> }
>
> func (a *App) argsWithDefaultCommand(oldArgs Args) Args {
> if a.DefaultCommand != "" {
> rawArgs := append([]string{a.DefaultCommand}, oldArgs.Slice()...)
> newArgs := args(rawArgs)
>
> return &newArgs
> }
>
> return oldArgs
> }
>
603a680,691
> }
>
> func checkStringSliceIncludes(want string, sSlice []string) bool {
> found := false
> for _, s := range sSlice {
> if want == s {
> found = true
> break
> }
> }
>
> return found
diff -r --color a/vendor/github.com/urfave/cli/v2/cli.go b/vendor/github.com/urfave/cli/v2/cli.go
4,6c4,7
< // func main() {
< // (&cli.App{}).Run(os.Args)
< // }
---
> //
> // func main() {
> // (&cli.App{}).Run(os.Args)
> // }
9,17d9
< // func main() {
< // app := &cli.App{
< // Name: "greet",
< // Usage: "say a greeting",
< // Action: func(c *cli.Context) error {
< // fmt.Println("Greetings")
< // return nil
< // },
< // }
19,20c11,22
< // app.Run(os.Args)
< // }
---
> // func main() {
> // app := &cli.App{
> // Name: "greet",
> // Usage: "say a greeting",
> // Action: func(c *cli.Context) error {
> // fmt.Println("Greetings")
> // return nil
> // },
> // }
> //
> // app.Run(os.Args)
> // }
23c25
< //go:generate go run internal/genflags/cmd/genflags/main.go
---
> //go:generate go run cmd/urfave-cli-genflags/main.go
diff -r --color a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go
128c128,130
< _ = ShowCommandHelp(cCtx, c.Name)
---
> if !c.HideHelp {
> _ = ShowCommandHelp(cCtx, c.Name)
> }
138c140,142
< _ = ShowCommandHelp(cCtx, c.Name)
---
> if !c.HideHelp {
> _ = ShowCommandHelp(cCtx, c.Name)
> }
diff -r --color a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go
48a49,51
> if cCtx.flagSet.Lookup(name) == nil {
> cCtx.onInvalidFlag(name)
> }
104a108,117
> // Count returns the num of occurences of this flag
> func (cCtx *Context) Count(name string) int {
> if fs := cCtx.lookupFlagSet(name); fs != nil {
> if cf, ok := fs.Lookup(name).Value.(Countable); ok {
> return cf.Count()
> }
> }
> return 0
> }
>
161c174
<
---
> cCtx.onInvalidFlag(name)
173,175c186
< if len(key) > 1 {
< flagName = key
< }
---
> flagName = key
192a204,213
> }
>
> func (cCtx *Context) onInvalidFlag(name string) {
> for cCtx != nil {
> if cCtx.App != nil && cCtx.App.InvalidFlagAccessHandler != nil {
> cCtx.App.InvalidFlagAccessHandler(cCtx, name)
> break
> }
> cCtx = cCtx.parentContext
> }
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go
3a4
> "errors"
8a10,60
> // boolValue needs to implement the boolFlag internal interface in flag
> // to be able to capture bool fields and values
> //
> // type boolFlag interface {
> // Value
> // IsBoolFlag() bool
> // }
> type boolValue struct {
> destination *bool
> count *int
> }
>
> func newBoolValue(val bool, p *bool, count *int) *boolValue {
> *p = val
> return &boolValue{
> destination: p,
> count: count,
> }
> }
>
> func (b *boolValue) Set(s string) error {
> v, err := strconv.ParseBool(s)
> if err != nil {
> err = errors.New("parse error")
> return err
> }
> *b.destination = v
> if b.count != nil {
> *b.count = *b.count + 1
> }
> return err
> }
>
> func (b *boolValue) Get() interface{} { return *b.destination }
>
> func (b *boolValue) String() string {
> if b.destination != nil {
> return strconv.FormatBool(*b.destination)
> }
> return strconv.FormatBool(false)
> }
>
> func (b *boolValue) IsBoolFlag() bool { return true }
>
> func (b *boolValue) Count() int {
> if b.count != nil {
> return *b.count
> }
> return 0
> }
>
54c106,110
< f.HasBeenSet = true
---
> } else {
> // empty value implies that the env is defined but set to empty string, we have to assume that this is
> // what the user wants. If user doesnt want this then the env needs to be deleted or the flag removed from
> // file
> f.Value = false
55a112,122
> f.HasBeenSet = true
> }
>
> count := f.Count
> dest := f.Destination
>
> if count == nil {
> count = new(int)
> }
> if dest == nil {
> dest = new(bool)
59,63c126,127
< if f.Destination != nil {
< set.BoolVar(f.Destination, name, f.Value, f.Usage)
< continue
< }
< set.Bool(name, f.Value, f.Usage)
---
> value := newBoolValue(f.Value, dest, count)
> set.Var(value, name, f.Usage)
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go
59c59,64
< return fmt.Sprintf("%#v", f.slice)
---
> v := f.slice
> if v == nil {
> // treat nil the same as zero length non-nil
> v = make([]float64, 0)
> }
> return fmt.Sprintf("%#v", v)
81c86
< return withEnvHint(f.GetEnvVars(), stringifyFloat64SliceFlag(f))
---
> return withEnvHint(f.GetEnvVars(), f.stringify())
122a128,144
> // apply any default
> if f.Destination != nil && f.Value != nil {
> f.Destination.slice = make([]float64, len(f.Value.slice))
> copy(f.Destination.slice, f.Value.slice)
> }
>
> // resolve setValue (what we will assign to the set)
> var setValue *Float64Slice
> switch {
> case f.Destination != nil:
> setValue = f.Destination
> case f.Value != nil:
> setValue = f.Value.clone()
> default:
> setValue = new(Float64Slice)
> }
>
125,126d146
< f.Value = &Float64Slice{}
<
128,129c148,149
< if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
< return fmt.Errorf("could not parse %q as float64 slice value from %s for flag %s: %s", f.Value, source, f.Name, err)
---
> if err := setValue.Set(strings.TrimSpace(s)); err != nil {
> return fmt.Errorf("could not parse %q as float64 slice value from %s for flag %s: %s", val, source, f.Name, err)
135c155
< f.Value.hasBeenSet = false
---
> setValue.hasBeenSet = false
140,143d159
< if f.Value == nil {
< f.Value = &Float64Slice{}
< }
< copyValue := f.Value.clone()
145c161
< set.Var(copyValue, name, f.Usage)
---
> set.Var(setValue, name, f.Usage)
155a172,183
> func (f *Float64SliceFlag) stringify() string {
> var defaultVals []string
>
> if f.Value != nil && len(f.Value.Value()) > 0 {
> for _, i := range f.Value.Value() {
> defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), "."))
> }
> }
>
> return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
> }
>
168c196
< if slice, ok := f.Value.(*Float64Slice); ok {
---
> if slice, ok := unwrapFlagValue(f.Value).(*Float64Slice); ok {
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go
53c53
< func (f GenericFlag) Apply(set *flag.FlagSet) error {
---
> func (f *GenericFlag) Apply(set *flag.FlagSet) error {
diff -r --color a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go
10d9
< "strconv"
142a142,147
> // Countable is an interface to enable detection of flag values which support
> // repetitive flags
> type Countable interface {
> Count() int
> }
>
321,367d325
< func stringifyIntSliceFlag(f *IntSliceFlag) string {
< var defaultVals []string
< if f.Value != nil && len(f.Value.Value()) > 0 {
< for _, i := range f.Value.Value() {
< defaultVals = append(defaultVals, strconv.Itoa(i))
< }
< }
<
< return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
< }
<
< func stringifyInt64SliceFlag(f *Int64SliceFlag) string {
< var defaultVals []string
< if f.Value != nil && len(f.Value.Value()) > 0 {
< for _, i := range f.Value.Value() {
< defaultVals = append(defaultVals, strconv.FormatInt(i, 10))
< }
< }
<
< return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
< }
<
< func stringifyFloat64SliceFlag(f *Float64SliceFlag) string {
< var defaultVals []string
<
< if f.Value != nil && len(f.Value.Value()) > 0 {
< for _, i := range f.Value.Value() {
< defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), "."))
< }
< }
<
< return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
< }
<
< func stringifyStringSliceFlag(f *StringSliceFlag) string {
< var defaultVals []string
< if f.Value != nil && len(f.Value.Value()) > 0 {
< for _, s := range f.Value.Value() {
< if len(s) > 0 {
< defaultVals = append(defaultVals, strconv.Quote(s))
< }
< }
< }
<
< return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
< }
<
380,384c338,339
< multiInputString := "(accepts multiple inputs)"
< if usageWithDefault != "" {
< multiInputString = "\t" + multiInputString
< }
< return fmt.Sprintf("%s\t%s%s", prefixedNames(names, placeholder), usageWithDefault, multiInputString)
---
> pn := prefixedNames(names, placeholder)
> return fmt.Sprintf("%s [ %s ]\t%s", pn, pn, usageWithDefault)
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go
47c47
< valInt, err := strconv.ParseInt(val, 0, 64)
---
> valInt, err := strconv.ParseInt(val, f.Base, 64)
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go
60c60,65
< return fmt.Sprintf("%#v", i.slice)
---
> v := i.slice
> if v == nil {
> // treat nil the same as zero length non-nil
> v = make([]int64, 0)
> }
> return fmt.Sprintf("%#v", v)
82c87
< return withEnvHint(f.GetEnvVars(), stringifyInt64SliceFlag(f))
---
> return withEnvHint(f.GetEnvVars(), f.stringify())
124,125c129,144
< if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found {
< f.Value = &Int64Slice{}
---
> // apply any default
> if f.Destination != nil && f.Value != nil {
> f.Destination.slice = make([]int64, len(f.Value.slice))
> copy(f.Destination.slice, f.Value.slice)
> }
>
> // resolve setValue (what we will assign to the set)
> var setValue *Int64Slice
> switch {
> case f.Destination != nil:
> setValue = f.Destination
> case f.Value != nil:
> setValue = f.Value.clone()
> default:
> setValue = new(Int64Slice)
> }
126a146
> if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" {
128c148
< if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
---
> if err := setValue.Set(strings.TrimSpace(s)); err != nil {
135c155
< f.Value.hasBeenSet = false
---
> setValue.hasBeenSet = false
139,142d158
< if f.Value == nil {
< f.Value = &Int64Slice{}
< }
< copyValue := f.Value.clone()
144c160
< set.Var(copyValue, name, f.Usage)
---
> set.Var(setValue, name, f.Usage)
154a171,181
> func (f *Int64SliceFlag) stringify() string {
> var defaultVals []string
> if f.Value != nil && len(f.Value.Value()) > 0 {
> for _, i := range f.Value.Value() {
> defaultVals = append(defaultVals, strconv.FormatInt(i, 10))
> }
> }
>
> return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
> }
>
167c194
< if slice, ok := f.Value.(*Int64Slice); ok {
---
> if slice, ok := unwrapFlagValue(f.Value).(*Int64Slice); ok {
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go
47c47
< valInt, err := strconv.ParseInt(val, 0, 64)
---
> valInt, err := strconv.ParseInt(val, f.Base, 64)
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_int_slice.go b/vendor/github.com/urfave/cli/v2/flag_int_slice.go
71c71,76
< return fmt.Sprintf("%#v", i.slice)
---
> v := i.slice
> if v == nil {
> // treat nil the same as zero length non-nil
> v = make([]int, 0)
> }
> return fmt.Sprintf("%#v", v)
93c98
< return withEnvHint(f.GetEnvVars(), stringifyIntSliceFlag(f))
---
> return withEnvHint(f.GetEnvVars(), f.stringify())
135,136c140,155
< if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found {
< f.Value = &IntSlice{}
---
> // apply any default
> if f.Destination != nil && f.Value != nil {
> f.Destination.slice = make([]int, len(f.Value.slice))
> copy(f.Destination.slice, f.Value.slice)
> }
>
> // resolve setValue (what we will assign to the set)
> var setValue *IntSlice
> switch {
> case f.Destination != nil:
> setValue = f.Destination
> case f.Value != nil:
> setValue = f.Value.clone()
> default:
> setValue = new(IntSlice)
> }
137a157
> if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" {
139c159
< if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
---
> if err := setValue.Set(strings.TrimSpace(s)); err != nil {
146c166
< f.Value.hasBeenSet = false
---
> setValue.hasBeenSet = false
150,153d169
< if f.Value == nil {
< f.Value = &IntSlice{}
< }
< copyValue := f.Value.clone()
155c171
< set.Var(copyValue, name, f.Usage)
---
> set.Var(setValue, name, f.Usage)
165a182,192
> func (f *IntSliceFlag) stringify() string {
> var defaultVals []string
> if f.Value != nil && len(f.Value.Value()) > 0 {
> for _, i := range f.Value.Value() {
> defaultVals = append(defaultVals, strconv.Itoa(i))
> }
> }
>
> return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
> }
>
178c205
< if slice, ok := f.Value.(*IntSlice); ok {
---
> if slice, ok := unwrapFlagValue(f.Value).(*IntSlice); ok {
diff -r --color a/vendor/github.com/urfave/cli/v2/flag-spec.yaml b/vendor/github.com/urfave/cli/v2/flag-spec.yaml
2,4c2,3
< # ./internal/genflags/cmd/genflags/main.go which uses the
< # `genflags.Spec` type that maps to this file structure.
<
---
> # ./cmd/urfave-cli-genflags/main.go which uses the
> # `Spec` type that maps to this file structure.
6,17c5
< bool: {}
< float64: {}
< int64: {}
< int: {}
< time.Duration: {}
< uint64: {}
< uint: {}
<
< string:
< struct_fields:
< - { name: TakesFile, type: bool }
< Generic:
---
> bool:
19,23c7,10
< - { name: TakesFile, type: bool }
< Path:
< struct_fields:
< - { name: TakesFile, type: bool }
<
---
> - name: Count
> type: int
> pointer: true
> float64:
27a15,26
> int:
> struct_fields:
> - name: Base
> type: int
> IntSlice:
> value_pointer: true
> skip_interfaces:
> - fmt.Stringer
> int64:
> struct_fields:
> - name: Base
> type: int
32c31,43
< IntSlice:
---
> uint:
> struct_fields:
> - name: Base
> type: int
> UintSlice:
> value_pointer: true
> skip_interfaces:
> - fmt.Stringer
> uint64:
> struct_fields:
> - name: Base
> type: int
> Uint64Slice:
35a47,50
> string:
> struct_fields:
> - name: TakesFile
> type: bool
41c56,58
< - { name: TakesFile, type: bool }
---
> - name: TakesFile
> type: bool
> time.Duration:
45,50c62,74
< - { name: Layout, type: string }
<
< # TODO: enable UintSlice
< # UintSlice: {}
< # TODO: enable Uint64Slice once #1334 lands
< # Uint64Slice: {}
---
> - name: Layout
> type: string
> - name: Timezone
> type: "*time.Location"
> Generic:
> no_destination_pointer: true
> struct_fields:
> - name: TakesFile
> type: bool
> Path:
> struct_fields:
> - name: TakesFile
> type: bool
diff -r --color a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go
6a7
> "strconv"
76c77
< return withEnvHint(f.GetEnvVars(), stringifyStringSliceFlag
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment