.
This commit is contained in:
22
qwen/nodejs/node_modules/@noble/hashes/src/_assert.ts
generated
vendored
Normal file
22
qwen/nodejs/node_modules/@noble/hashes/src/_assert.ts
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
/**
|
||||
* Internal assertion helpers.
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import {
|
||||
abytes as ab,
|
||||
aexists as ae,
|
||||
anumber as an,
|
||||
aoutput as ao,
|
||||
type IHash as H,
|
||||
} from './utils.ts';
|
||||
/** @deprecated Use import from `noble/hashes/utils` module */
|
||||
export const abytes: typeof ab = ab;
|
||||
/** @deprecated Use import from `noble/hashes/utils` module */
|
||||
export const aexists: typeof ae = ae;
|
||||
/** @deprecated Use import from `noble/hashes/utils` module */
|
||||
export const anumber: typeof an = an;
|
||||
/** @deprecated Use import from `noble/hashes/utils` module */
|
||||
export const aoutput: typeof ao = ao;
|
||||
/** @deprecated Use import from `noble/hashes/utils` module */
|
||||
export type Hash = H;
|
||||
50
qwen/nodejs/node_modules/@noble/hashes/src/_blake.ts
generated
vendored
Normal file
50
qwen/nodejs/node_modules/@noble/hashes/src/_blake.ts
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Internal helpers for blake hash.
|
||||
* @module
|
||||
*/
|
||||
import { rotr } from './utils.ts';
|
||||
|
||||
/**
|
||||
* Internal blake variable.
|
||||
* For BLAKE2b, the two extra permutations for rounds 10 and 11 are SIGMA[10..11] = SIGMA[0..1].
|
||||
*/
|
||||
// prettier-ignore
|
||||
export const BSIGMA: Uint8Array = /* @__PURE__ */ Uint8Array.from([
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3,
|
||||
11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4,
|
||||
7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8,
|
||||
9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13,
|
||||
2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9,
|
||||
12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11,
|
||||
13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10,
|
||||
6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5,
|
||||
10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3,
|
||||
// Blake1, unused in others
|
||||
11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4,
|
||||
7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8,
|
||||
9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13,
|
||||
2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9,
|
||||
]);
|
||||
|
||||
// prettier-ignore
|
||||
export type Num4 = { a: number; b: number; c: number; d: number; };
|
||||
|
||||
// Mixing function G splitted in two halfs
|
||||
export function G1s(a: number, b: number, c: number, d: number, x: number): Num4 {
|
||||
a = (a + b + x) | 0;
|
||||
d = rotr(d ^ a, 16);
|
||||
c = (c + d) | 0;
|
||||
b = rotr(b ^ c, 12);
|
||||
return { a, b, c, d };
|
||||
}
|
||||
|
||||
export function G2s(a: number, b: number, c: number, d: number, x: number): Num4 {
|
||||
a = (a + b + x) | 0;
|
||||
d = rotr(d ^ a, 8);
|
||||
c = (c + d) | 0;
|
||||
b = rotr(b ^ c, 7);
|
||||
return { a, b, c, d };
|
||||
}
|
||||
176
qwen/nodejs/node_modules/@noble/hashes/src/_md.ts
generated
vendored
Normal file
176
qwen/nodejs/node_modules/@noble/hashes/src/_md.ts
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
/**
|
||||
* Internal Merkle-Damgard hash utils.
|
||||
* @module
|
||||
*/
|
||||
import { type Input, Hash, abytes, aexists, aoutput, clean, createView, toBytes } from './utils.ts';
|
||||
|
||||
/** Polyfill for Safari 14. https://caniuse.com/mdn-javascript_builtins_dataview_setbiguint64 */
|
||||
export function setBigUint64(
|
||||
view: DataView,
|
||||
byteOffset: number,
|
||||
value: bigint,
|
||||
isLE: boolean
|
||||
): void {
|
||||
if (typeof view.setBigUint64 === 'function') return view.setBigUint64(byteOffset, value, isLE);
|
||||
const _32n = BigInt(32);
|
||||
const _u32_max = BigInt(0xffffffff);
|
||||
const wh = Number((value >> _32n) & _u32_max);
|
||||
const wl = Number(value & _u32_max);
|
||||
const h = isLE ? 4 : 0;
|
||||
const l = isLE ? 0 : 4;
|
||||
view.setUint32(byteOffset + h, wh, isLE);
|
||||
view.setUint32(byteOffset + l, wl, isLE);
|
||||
}
|
||||
|
||||
/** Choice: a ? b : c */
|
||||
export function Chi(a: number, b: number, c: number): number {
|
||||
return (a & b) ^ (~a & c);
|
||||
}
|
||||
|
||||
/** Majority function, true if any two inputs is true. */
|
||||
export function Maj(a: number, b: number, c: number): number {
|
||||
return (a & b) ^ (a & c) ^ (b & c);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merkle-Damgard hash construction base class.
|
||||
* Could be used to create MD5, RIPEMD, SHA1, SHA2.
|
||||
*/
|
||||
export abstract class HashMD<T extends HashMD<T>> extends Hash<T> {
|
||||
protected abstract process(buf: DataView, offset: number): void;
|
||||
protected abstract get(): number[];
|
||||
protected abstract set(...args: number[]): void;
|
||||
abstract destroy(): void;
|
||||
protected abstract roundClean(): void;
|
||||
|
||||
readonly blockLen: number;
|
||||
readonly outputLen: number;
|
||||
readonly padOffset: number;
|
||||
readonly isLE: boolean;
|
||||
|
||||
// For partial updates less than block size
|
||||
protected buffer: Uint8Array;
|
||||
protected view: DataView;
|
||||
protected finished = false;
|
||||
protected length = 0;
|
||||
protected pos = 0;
|
||||
protected destroyed = false;
|
||||
|
||||
constructor(blockLen: number, outputLen: number, padOffset: number, isLE: boolean) {
|
||||
super();
|
||||
this.blockLen = blockLen;
|
||||
this.outputLen = outputLen;
|
||||
this.padOffset = padOffset;
|
||||
this.isLE = isLE;
|
||||
this.buffer = new Uint8Array(blockLen);
|
||||
this.view = createView(this.buffer);
|
||||
}
|
||||
update(data: Input): this {
|
||||
aexists(this);
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
const { view, buffer, blockLen } = this;
|
||||
const len = data.length;
|
||||
for (let pos = 0; pos < len; ) {
|
||||
const take = Math.min(blockLen - this.pos, len - pos);
|
||||
// Fast path: we have at least one block in input, cast it to view and process
|
||||
if (take === blockLen) {
|
||||
const dataView = createView(data);
|
||||
for (; blockLen <= len - pos; pos += blockLen) this.process(dataView, pos);
|
||||
continue;
|
||||
}
|
||||
buffer.set(data.subarray(pos, pos + take), this.pos);
|
||||
this.pos += take;
|
||||
pos += take;
|
||||
if (this.pos === blockLen) {
|
||||
this.process(view, 0);
|
||||
this.pos = 0;
|
||||
}
|
||||
}
|
||||
this.length += data.length;
|
||||
this.roundClean();
|
||||
return this;
|
||||
}
|
||||
digestInto(out: Uint8Array): void {
|
||||
aexists(this);
|
||||
aoutput(out, this);
|
||||
this.finished = true;
|
||||
// Padding
|
||||
// We can avoid allocation of buffer for padding completely if it
|
||||
// was previously not allocated here. But it won't change performance.
|
||||
const { buffer, view, blockLen, isLE } = this;
|
||||
let { pos } = this;
|
||||
// append the bit '1' to the message
|
||||
buffer[pos++] = 0b10000000;
|
||||
clean(this.buffer.subarray(pos));
|
||||
// we have less than padOffset left in buffer, so we cannot put length in
|
||||
// current block, need process it and pad again
|
||||
if (this.padOffset > blockLen - pos) {
|
||||
this.process(view, 0);
|
||||
pos = 0;
|
||||
}
|
||||
// Pad until full block byte with zeros
|
||||
for (let i = pos; i < blockLen; i++) buffer[i] = 0;
|
||||
// Note: sha512 requires length to be 128bit integer, but length in JS will overflow before that
|
||||
// You need to write around 2 exabytes (u64_max / 8 / (1024**6)) for this to happen.
|
||||
// So we just write lowest 64 bits of that value.
|
||||
setBigUint64(view, blockLen - 8, BigInt(this.length * 8), isLE);
|
||||
this.process(view, 0);
|
||||
const oview = createView(out);
|
||||
const len = this.outputLen;
|
||||
// NOTE: we do division by 4 later, which should be fused in single op with modulo by JIT
|
||||
if (len % 4) throw new Error('_sha2: outputLen should be aligned to 32bit');
|
||||
const outLen = len / 4;
|
||||
const state = this.get();
|
||||
if (outLen > state.length) throw new Error('_sha2: outputLen bigger than state');
|
||||
for (let i = 0; i < outLen; i++) oview.setUint32(4 * i, state[i], isLE);
|
||||
}
|
||||
digest(): Uint8Array {
|
||||
const { buffer, outputLen } = this;
|
||||
this.digestInto(buffer);
|
||||
const res = buffer.slice(0, outputLen);
|
||||
this.destroy();
|
||||
return res;
|
||||
}
|
||||
_cloneInto(to?: T): T {
|
||||
to ||= new (this.constructor as any)() as T;
|
||||
to.set(...this.get());
|
||||
const { blockLen, buffer, length, finished, destroyed, pos } = this;
|
||||
to.destroyed = destroyed;
|
||||
to.finished = finished;
|
||||
to.length = length;
|
||||
to.pos = pos;
|
||||
if (length % blockLen) to.buffer.set(buffer);
|
||||
return to;
|
||||
}
|
||||
clone(): T {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initial SHA-2 state: fractional parts of square roots of first 16 primes 2..53.
|
||||
* Check out `test/misc/sha2-gen-iv.js` for recomputation guide.
|
||||
*/
|
||||
|
||||
/** Initial SHA256 state. Bits 0..32 of frac part of sqrt of primes 2..19 */
|
||||
export const SHA256_IV: Uint32Array = /* @__PURE__ */ Uint32Array.from([
|
||||
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
|
||||
]);
|
||||
|
||||
/** Initial SHA224 state. Bits 32..64 of frac part of sqrt of primes 23..53 */
|
||||
export const SHA224_IV: Uint32Array = /* @__PURE__ */ Uint32Array.from([
|
||||
0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4,
|
||||
]);
|
||||
|
||||
/** Initial SHA384 state. Bits 0..64 of frac part of sqrt of primes 23..53 */
|
||||
export const SHA384_IV: Uint32Array = /* @__PURE__ */ Uint32Array.from([
|
||||
0xcbbb9d5d, 0xc1059ed8, 0x629a292a, 0x367cd507, 0x9159015a, 0x3070dd17, 0x152fecd8, 0xf70e5939,
|
||||
0x67332667, 0xffc00b31, 0x8eb44a87, 0x68581511, 0xdb0c2e0d, 0x64f98fa7, 0x47b5481d, 0xbefa4fa4,
|
||||
]);
|
||||
|
||||
/** Initial SHA512 state. Bits 0..64 of frac part of sqrt of primes 2..19 */
|
||||
export const SHA512_IV: Uint32Array = /* @__PURE__ */ Uint32Array.from([
|
||||
0x6a09e667, 0xf3bcc908, 0xbb67ae85, 0x84caa73b, 0x3c6ef372, 0xfe94f82b, 0xa54ff53a, 0x5f1d36f1,
|
||||
0x510e527f, 0xade682d1, 0x9b05688c, 0x2b3e6c1f, 0x1f83d9ab, 0xfb41bd6b, 0x5be0cd19, 0x137e2179,
|
||||
]);
|
||||
91
qwen/nodejs/node_modules/@noble/hashes/src/_u64.ts
generated
vendored
Normal file
91
qwen/nodejs/node_modules/@noble/hashes/src/_u64.ts
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
/**
|
||||
* Internal helpers for u64. BigUint64Array is too slow as per 2025, so we implement it using Uint32Array.
|
||||
* @todo re-check https://issues.chromium.org/issues/42212588
|
||||
* @module
|
||||
*/
|
||||
const U32_MASK64 = /* @__PURE__ */ BigInt(2 ** 32 - 1);
|
||||
const _32n = /* @__PURE__ */ BigInt(32);
|
||||
|
||||
function fromBig(
|
||||
n: bigint,
|
||||
le = false
|
||||
): {
|
||||
h: number;
|
||||
l: number;
|
||||
} {
|
||||
if (le) return { h: Number(n & U32_MASK64), l: Number((n >> _32n) & U32_MASK64) };
|
||||
return { h: Number((n >> _32n) & U32_MASK64) | 0, l: Number(n & U32_MASK64) | 0 };
|
||||
}
|
||||
|
||||
function split(lst: bigint[], le = false): Uint32Array[] {
|
||||
const len = lst.length;
|
||||
let Ah = new Uint32Array(len);
|
||||
let Al = new Uint32Array(len);
|
||||
for (let i = 0; i < len; i++) {
|
||||
const { h, l } = fromBig(lst[i], le);
|
||||
[Ah[i], Al[i]] = [h, l];
|
||||
}
|
||||
return [Ah, Al];
|
||||
}
|
||||
|
||||
const toBig = (h: number, l: number): bigint => (BigInt(h >>> 0) << _32n) | BigInt(l >>> 0);
|
||||
// for Shift in [0, 32)
|
||||
const shrSH = (h: number, _l: number, s: number): number => h >>> s;
|
||||
const shrSL = (h: number, l: number, s: number): number => (h << (32 - s)) | (l >>> s);
|
||||
// Right rotate for Shift in [1, 32)
|
||||
const rotrSH = (h: number, l: number, s: number): number => (h >>> s) | (l << (32 - s));
|
||||
const rotrSL = (h: number, l: number, s: number): number => (h << (32 - s)) | (l >>> s);
|
||||
// Right rotate for Shift in (32, 64), NOTE: 32 is special case.
|
||||
const rotrBH = (h: number, l: number, s: number): number => (h << (64 - s)) | (l >>> (s - 32));
|
||||
const rotrBL = (h: number, l: number, s: number): number => (h >>> (s - 32)) | (l << (64 - s));
|
||||
// Right rotate for shift===32 (just swaps l&h)
|
||||
const rotr32H = (_h: number, l: number): number => l;
|
||||
const rotr32L = (h: number, _l: number): number => h;
|
||||
// Left rotate for Shift in [1, 32)
|
||||
const rotlSH = (h: number, l: number, s: number): number => (h << s) | (l >>> (32 - s));
|
||||
const rotlSL = (h: number, l: number, s: number): number => (l << s) | (h >>> (32 - s));
|
||||
// Left rotate for Shift in (32, 64), NOTE: 32 is special case.
|
||||
const rotlBH = (h: number, l: number, s: number): number => (l << (s - 32)) | (h >>> (64 - s));
|
||||
const rotlBL = (h: number, l: number, s: number): number => (h << (s - 32)) | (l >>> (64 - s));
|
||||
|
||||
// JS uses 32-bit signed integers for bitwise operations which means we cannot
|
||||
// simple take carry out of low bit sum by shift, we need to use division.
|
||||
function add(
|
||||
Ah: number,
|
||||
Al: number,
|
||||
Bh: number,
|
||||
Bl: number
|
||||
): {
|
||||
h: number;
|
||||
l: number;
|
||||
} {
|
||||
const l = (Al >>> 0) + (Bl >>> 0);
|
||||
return { h: (Ah + Bh + ((l / 2 ** 32) | 0)) | 0, l: l | 0 };
|
||||
}
|
||||
// Addition with more than 2 elements
|
||||
const add3L = (Al: number, Bl: number, Cl: number): number => (Al >>> 0) + (Bl >>> 0) + (Cl >>> 0);
|
||||
const add3H = (low: number, Ah: number, Bh: number, Ch: number): number =>
|
||||
(Ah + Bh + Ch + ((low / 2 ** 32) | 0)) | 0;
|
||||
const add4L = (Al: number, Bl: number, Cl: number, Dl: number): number =>
|
||||
(Al >>> 0) + (Bl >>> 0) + (Cl >>> 0) + (Dl >>> 0);
|
||||
const add4H = (low: number, Ah: number, Bh: number, Ch: number, Dh: number): number =>
|
||||
(Ah + Bh + Ch + Dh + ((low / 2 ** 32) | 0)) | 0;
|
||||
const add5L = (Al: number, Bl: number, Cl: number, Dl: number, El: number): number =>
|
||||
(Al >>> 0) + (Bl >>> 0) + (Cl >>> 0) + (Dl >>> 0) + (El >>> 0);
|
||||
const add5H = (low: number, Ah: number, Bh: number, Ch: number, Dh: number, Eh: number): number =>
|
||||
(Ah + Bh + Ch + Dh + Eh + ((low / 2 ** 32) | 0)) | 0;
|
||||
|
||||
// prettier-ignore
|
||||
export {
|
||||
add, add3H, add3L, add4H, add4L, add5H, add5L, fromBig, rotlBH, rotlBL, rotlSH, rotlSL, rotr32H, rotr32L, rotrBH, rotrBL, rotrSH, rotrSL, shrSH, shrSL, split, toBig
|
||||
};
|
||||
// prettier-ignore
|
||||
const u64: { fromBig: typeof fromBig; split: typeof split; toBig: (h: number, l: number) => bigint; shrSH: (h: number, _l: number, s: number) => number; shrSL: (h: number, l: number, s: number) => number; rotrSH: (h: number, l: number, s: number) => number; rotrSL: (h: number, l: number, s: number) => number; rotrBH: (h: number, l: number, s: number) => number; rotrBL: (h: number, l: number, s: number) => number; rotr32H: (_h: number, l: number) => number; rotr32L: (h: number, _l: number) => number; rotlSH: (h: number, l: number, s: number) => number; rotlSL: (h: number, l: number, s: number) => number; rotlBH: (h: number, l: number, s: number) => number; rotlBL: (h: number, l: number, s: number) => number; add: typeof add; add3L: (Al: number, Bl: number, Cl: number) => number; add3H: (low: number, Ah: number, Bh: number, Ch: number) => number; add4L: (Al: number, Bl: number, Cl: number, Dl: number) => number; add4H: (low: number, Ah: number, Bh: number, Ch: number, Dh: number) => number; add5H: (low: number, Ah: number, Bh: number, Ch: number, Dh: number, Eh: number) => number; add5L: (Al: number, Bl: number, Cl: number, Dl: number, El: number) => number; } = {
|
||||
fromBig, split, toBig,
|
||||
shrSH, shrSL,
|
||||
rotrSH, rotrSL, rotrBH, rotrBL,
|
||||
rotr32H, rotr32L,
|
||||
rotlSH, rotlSL, rotlBH, rotlBL,
|
||||
add, add3L, add3H, add4L, add4H, add5H, add5L,
|
||||
};
|
||||
export default u64;
|
||||
497
qwen/nodejs/node_modules/@noble/hashes/src/argon2.ts
generated
vendored
Normal file
497
qwen/nodejs/node_modules/@noble/hashes/src/argon2.ts
generated
vendored
Normal file
@@ -0,0 +1,497 @@
|
||||
/**
|
||||
* Argon2 KDF from RFC 9106. Can be used to create a key from password and salt.
|
||||
* We suggest to use Scrypt. JS Argon is 2-10x slower than native code because of 64-bitness:
|
||||
* * argon uses uint64, but JS doesn't have fast uint64array
|
||||
* * uint64 multiplication is 1/3 of time
|
||||
* * `P` function would be very nice with u64, because most of value will be in registers,
|
||||
* hovewer with u32 it will require 32 registers, which is too much.
|
||||
* * JS arrays do slow bound checks, so reading from `A2_BUF` slows it down
|
||||
* @module
|
||||
*/
|
||||
import { add3H, add3L, rotr32H, rotr32L, rotrBH, rotrBL, rotrSH, rotrSL } from './_u64.ts';
|
||||
import { blake2b } from './blake2.ts';
|
||||
import { abytes, clean, kdfInputToBytes, nextTick, u32, u8, type KDFInput } from './utils.ts';
|
||||
|
||||
const AT = { Argond2d: 0, Argon2i: 1, Argon2id: 2 } as const;
|
||||
type Types = (typeof AT)[keyof typeof AT];
|
||||
|
||||
const ARGON2_SYNC_POINTS = 4;
|
||||
const abytesOrZero = (buf?: KDFInput) => {
|
||||
if (buf === undefined) return Uint8Array.of();
|
||||
return kdfInputToBytes(buf);
|
||||
};
|
||||
|
||||
// u32 * u32 = u64
|
||||
function mul(a: number, b: number) {
|
||||
const aL = a & 0xffff;
|
||||
const aH = a >>> 16;
|
||||
const bL = b & 0xffff;
|
||||
const bH = b >>> 16;
|
||||
const ll = Math.imul(aL, bL);
|
||||
const hl = Math.imul(aH, bL);
|
||||
const lh = Math.imul(aL, bH);
|
||||
const hh = Math.imul(aH, bH);
|
||||
const carry = (ll >>> 16) + (hl & 0xffff) + lh;
|
||||
const high = (hh + (hl >>> 16) + (carry >>> 16)) | 0;
|
||||
const low = (carry << 16) | (ll & 0xffff);
|
||||
return { h: high, l: low };
|
||||
}
|
||||
|
||||
function mul2(a: number, b: number) {
|
||||
// 2 * a * b (via shifts)
|
||||
const { h, l } = mul(a, b);
|
||||
return { h: ((h << 1) | (l >>> 31)) & 0xffff_ffff, l: (l << 1) & 0xffff_ffff };
|
||||
}
|
||||
|
||||
// BlaMka permutation for Argon2
|
||||
// A + B + (2 * u32(A) * u32(B))
|
||||
function blamka(Ah: number, Al: number, Bh: number, Bl: number) {
|
||||
const { h: Ch, l: Cl } = mul2(Al, Bl);
|
||||
// A + B + (2 * A * B)
|
||||
const Rll = add3L(Al, Bl, Cl);
|
||||
return { h: add3H(Rll, Ah, Bh, Ch), l: Rll | 0 };
|
||||
}
|
||||
|
||||
// Temporary block buffer
|
||||
const A2_BUF = new Uint32Array(256); // 1024 bytes (matrix 16x16)
|
||||
|
||||
function G(a: number, b: number, c: number, d: number) {
|
||||
let Al = A2_BUF[2*a], Ah = A2_BUF[2*a + 1]; // prettier-ignore
|
||||
let Bl = A2_BUF[2*b], Bh = A2_BUF[2*b + 1]; // prettier-ignore
|
||||
let Cl = A2_BUF[2*c], Ch = A2_BUF[2*c + 1]; // prettier-ignore
|
||||
let Dl = A2_BUF[2*d], Dh = A2_BUF[2*d + 1]; // prettier-ignore
|
||||
|
||||
({ h: Ah, l: Al } = blamka(Ah, Al, Bh, Bl));
|
||||
({ Dh, Dl } = { Dh: Dh ^ Ah, Dl: Dl ^ Al });
|
||||
({ Dh, Dl } = { Dh: rotr32H(Dh, Dl), Dl: rotr32L(Dh, Dl) });
|
||||
|
||||
({ h: Ch, l: Cl } = blamka(Ch, Cl, Dh, Dl));
|
||||
({ Bh, Bl } = { Bh: Bh ^ Ch, Bl: Bl ^ Cl });
|
||||
({ Bh, Bl } = { Bh: rotrSH(Bh, Bl, 24), Bl: rotrSL(Bh, Bl, 24) });
|
||||
|
||||
({ h: Ah, l: Al } = blamka(Ah, Al, Bh, Bl));
|
||||
({ Dh, Dl } = { Dh: Dh ^ Ah, Dl: Dl ^ Al });
|
||||
({ Dh, Dl } = { Dh: rotrSH(Dh, Dl, 16), Dl: rotrSL(Dh, Dl, 16) });
|
||||
|
||||
({ h: Ch, l: Cl } = blamka(Ch, Cl, Dh, Dl));
|
||||
({ Bh, Bl } = { Bh: Bh ^ Ch, Bl: Bl ^ Cl });
|
||||
({ Bh, Bl } = { Bh: rotrBH(Bh, Bl, 63), Bl: rotrBL(Bh, Bl, 63) });
|
||||
|
||||
(A2_BUF[2 * a] = Al), (A2_BUF[2 * a + 1] = Ah);
|
||||
(A2_BUF[2 * b] = Bl), (A2_BUF[2 * b + 1] = Bh);
|
||||
(A2_BUF[2 * c] = Cl), (A2_BUF[2 * c + 1] = Ch);
|
||||
(A2_BUF[2 * d] = Dl), (A2_BUF[2 * d + 1] = Dh);
|
||||
}
|
||||
|
||||
// prettier-ignore
|
||||
function P(
|
||||
v00: number, v01: number, v02: number, v03: number, v04: number, v05: number, v06: number, v07: number,
|
||||
v08: number, v09: number, v10: number, v11: number, v12: number, v13: number, v14: number, v15: number,
|
||||
) {
|
||||
G(v00, v04, v08, v12);
|
||||
G(v01, v05, v09, v13);
|
||||
G(v02, v06, v10, v14);
|
||||
G(v03, v07, v11, v15);
|
||||
G(v00, v05, v10, v15);
|
||||
G(v01, v06, v11, v12);
|
||||
G(v02, v07, v08, v13);
|
||||
G(v03, v04, v09, v14);
|
||||
}
|
||||
|
||||
function block(x: Uint32Array, xPos: number, yPos: number, outPos: number, needXor: boolean) {
|
||||
for (let i = 0; i < 256; i++) A2_BUF[i] = x[xPos + i] ^ x[yPos + i];
|
||||
// columns (8)
|
||||
for (let i = 0; i < 128; i += 16) {
|
||||
// prettier-ignore
|
||||
P(
|
||||
i, i + 1, i + 2, i + 3, i + 4, i + 5, i + 6, i + 7,
|
||||
i + 8, i + 9, i + 10, i + 11, i + 12, i + 13, i + 14, i + 15
|
||||
);
|
||||
}
|
||||
// rows (8)
|
||||
for (let i = 0; i < 16; i += 2) {
|
||||
// prettier-ignore
|
||||
P(
|
||||
i, i + 1, i + 16, i + 17, i + 32, i + 33, i + 48, i + 49,
|
||||
i + 64, i + 65, i + 80, i + 81, i + 96, i + 97, i + 112, i + 113
|
||||
);
|
||||
}
|
||||
|
||||
if (needXor) for (let i = 0; i < 256; i++) x[outPos + i] ^= A2_BUF[i] ^ x[xPos + i] ^ x[yPos + i];
|
||||
else for (let i = 0; i < 256; i++) x[outPos + i] = A2_BUF[i] ^ x[xPos + i] ^ x[yPos + i];
|
||||
clean(A2_BUF);
|
||||
}
|
||||
|
||||
// Variable-Length Hash Function H'
|
||||
function Hp(A: Uint32Array, dkLen: number) {
|
||||
const A8 = u8(A);
|
||||
const T = new Uint32Array(1);
|
||||
const T8 = u8(T);
|
||||
T[0] = dkLen;
|
||||
// Fast path
|
||||
if (dkLen <= 64) return blake2b.create({ dkLen }).update(T8).update(A8).digest();
|
||||
const out = new Uint8Array(dkLen);
|
||||
let V = blake2b.create({}).update(T8).update(A8).digest();
|
||||
let pos = 0;
|
||||
// First block
|
||||
out.set(V.subarray(0, 32));
|
||||
pos += 32;
|
||||
// Rest blocks
|
||||
for (; dkLen - pos > 64; pos += 32) {
|
||||
const Vh = blake2b.create({}).update(V);
|
||||
Vh.digestInto(V);
|
||||
Vh.destroy();
|
||||
out.set(V.subarray(0, 32), pos);
|
||||
}
|
||||
// Last block
|
||||
out.set(blake2b(V, { dkLen: dkLen - pos }), pos);
|
||||
clean(V, T);
|
||||
return u32(out);
|
||||
}
|
||||
|
||||
// Used only inside process block!
|
||||
function indexAlpha(
|
||||
r: number,
|
||||
s: number,
|
||||
laneLen: number,
|
||||
segmentLen: number,
|
||||
index: number,
|
||||
randL: number,
|
||||
sameLane: boolean = false
|
||||
) {
|
||||
// This is ugly, but close enough to reference implementation.
|
||||
let area: number;
|
||||
if (r === 0) {
|
||||
if (s === 0) area = index - 1;
|
||||
else if (sameLane) area = s * segmentLen + index - 1;
|
||||
else area = s * segmentLen + (index == 0 ? -1 : 0);
|
||||
} else if (sameLane) area = laneLen - segmentLen + index - 1;
|
||||
else area = laneLen - segmentLen + (index == 0 ? -1 : 0);
|
||||
const startPos = r !== 0 && s !== ARGON2_SYNC_POINTS - 1 ? (s + 1) * segmentLen : 0;
|
||||
const rel = area - 1 - mul(area, mul(randL, randL).h).h;
|
||||
return (startPos + rel) % laneLen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Argon2 options.
|
||||
* * t: time cost, m: mem cost in kb, p: parallelization.
|
||||
* * key: optional key. personalization: arbitrary extra data.
|
||||
* * dkLen: desired number of output bytes.
|
||||
*/
|
||||
export type ArgonOpts = {
|
||||
t: number; // Time cost, iterations count
|
||||
m: number; // Memory cost (in KB)
|
||||
p: number; // Parallelization parameter
|
||||
version?: number; // Default: 0x13 (19)
|
||||
key?: KDFInput; // Optional key
|
||||
personalization?: KDFInput; // Optional arbitrary extra data
|
||||
dkLen?: number; // Desired number of returned bytes
|
||||
asyncTick?: number; // Maximum time in ms for which async function can block execution
|
||||
maxmem?: number;
|
||||
onProgress?: (progress: number) => void;
|
||||
};
|
||||
|
||||
const maxUint32 = Math.pow(2, 32);
|
||||
function isU32(num: number) {
|
||||
return Number.isSafeInteger(num) && num >= 0 && num < maxUint32;
|
||||
}
|
||||
|
||||
function argon2Opts(opts: ArgonOpts) {
|
||||
const merged: any = {
|
||||
version: 0x13,
|
||||
dkLen: 32,
|
||||
maxmem: maxUint32 - 1,
|
||||
asyncTick: 10,
|
||||
};
|
||||
for (let [k, v] of Object.entries(opts)) if (v != null) merged[k] = v;
|
||||
|
||||
const { dkLen, p, m, t, version, onProgress } = merged;
|
||||
if (!isU32(dkLen) || dkLen < 4) throw new Error('dkLen should be at least 4 bytes');
|
||||
if (!isU32(p) || p < 1 || p >= Math.pow(2, 24)) throw new Error('p should be 1 <= p < 2^24');
|
||||
if (!isU32(m)) throw new Error('m should be 0 <= m < 2^32');
|
||||
if (!isU32(t) || t < 1) throw new Error('t (iterations) should be 1 <= t < 2^32');
|
||||
if (onProgress !== undefined && typeof onProgress !== 'function')
|
||||
throw new Error('progressCb should be function');
|
||||
/*
|
||||
Memory size m MUST be an integer number of kibibytes from 8*p to 2^(32)-1. The actual number of blocks is m', which is m rounded down to the nearest multiple of 4*p.
|
||||
*/
|
||||
if (!isU32(m) || m < 8 * p) throw new Error('memory should be at least 8*p bytes');
|
||||
if (version !== 0x10 && version !== 0x13) throw new Error('unknown version=' + version);
|
||||
return merged;
|
||||
}
|
||||
|
||||
function argon2Init(password: KDFInput, salt: KDFInput, type: Types, opts: ArgonOpts) {
|
||||
password = kdfInputToBytes(password);
|
||||
salt = kdfInputToBytes(salt);
|
||||
abytes(password);
|
||||
abytes(salt);
|
||||
if (!isU32(password.length)) throw new Error('password should be less than 4 GB');
|
||||
if (!isU32(salt.length) || salt.length < 8)
|
||||
throw new Error('salt should be at least 8 bytes and less than 4 GB');
|
||||
if (!Object.values(AT).includes(type)) throw new Error('invalid type');
|
||||
let { p, dkLen, m, t, version, key, personalization, maxmem, onProgress, asyncTick } =
|
||||
argon2Opts(opts);
|
||||
|
||||
// Validation
|
||||
key = abytesOrZero(key);
|
||||
personalization = abytesOrZero(personalization);
|
||||
// H_0 = H^(64)(LE32(p) || LE32(T) || LE32(m) || LE32(t) ||
|
||||
// LE32(v) || LE32(y) || LE32(length(P)) || P ||
|
||||
// LE32(length(S)) || S || LE32(length(K)) || K ||
|
||||
// LE32(length(X)) || X)
|
||||
const h = blake2b.create({});
|
||||
const BUF = new Uint32Array(1);
|
||||
const BUF8 = u8(BUF);
|
||||
for (let item of [p, dkLen, m, t, version, type]) {
|
||||
BUF[0] = item;
|
||||
h.update(BUF8);
|
||||
}
|
||||
for (let i of [password, salt, key, personalization]) {
|
||||
BUF[0] = i.length; // BUF is u32 array, this is valid
|
||||
h.update(BUF8).update(i);
|
||||
}
|
||||
const H0 = new Uint32Array(18);
|
||||
const H0_8 = u8(H0);
|
||||
h.digestInto(H0_8);
|
||||
// 256 u32 = 1024 (BLOCK_SIZE), fills A2_BUF on processing
|
||||
|
||||
// Params
|
||||
const lanes = p;
|
||||
// m' = 4 * p * floor (m / 4p)
|
||||
const mP = 4 * p * Math.floor(m / (ARGON2_SYNC_POINTS * p));
|
||||
//q = m' / p columns
|
||||
const laneLen = Math.floor(mP / p);
|
||||
const segmentLen = Math.floor(laneLen / ARGON2_SYNC_POINTS);
|
||||
const memUsed = mP * 256;
|
||||
if (!isU32(maxmem) || memUsed > maxmem)
|
||||
throw new Error(
|
||||
'mem should be less than 2**32, got: maxmem=' + maxmem + ', memused=' + memUsed
|
||||
);
|
||||
const B = new Uint32Array(memUsed);
|
||||
// Fill first blocks
|
||||
for (let l = 0; l < p; l++) {
|
||||
const i = 256 * laneLen * l;
|
||||
// B[i][0] = H'^(1024)(H_0 || LE32(0) || LE32(i))
|
||||
H0[17] = l;
|
||||
H0[16] = 0;
|
||||
B.set(Hp(H0, 1024), i);
|
||||
// B[i][1] = H'^(1024)(H_0 || LE32(1) || LE32(i))
|
||||
H0[16] = 1;
|
||||
B.set(Hp(H0, 1024), i + 256);
|
||||
}
|
||||
let perBlock = () => {};
|
||||
if (onProgress) {
|
||||
const totalBlock = t * ARGON2_SYNC_POINTS * p * segmentLen;
|
||||
// Invoke callback if progress changes from 10.01 to 10.02
|
||||
// Allows to draw smooth progress bar on up to 8K screen
|
||||
const callbackPer = Math.max(Math.floor(totalBlock / 10000), 1);
|
||||
let blockCnt = 0;
|
||||
perBlock = () => {
|
||||
blockCnt++;
|
||||
if (onProgress && (!(blockCnt % callbackPer) || blockCnt === totalBlock))
|
||||
onProgress(blockCnt / totalBlock);
|
||||
};
|
||||
}
|
||||
clean(BUF, H0);
|
||||
return { type, mP, p, t, version, B, laneLen, lanes, segmentLen, dkLen, perBlock, asyncTick };
|
||||
}
|
||||
|
||||
function argon2Output(B: Uint32Array, p: number, laneLen: number, dkLen: number) {
|
||||
const B_final = new Uint32Array(256);
|
||||
for (let l = 0; l < p; l++)
|
||||
for (let j = 0; j < 256; j++) B_final[j] ^= B[256 * (laneLen * l + laneLen - 1) + j];
|
||||
const res = u8(Hp(B_final, dkLen));
|
||||
clean(B_final);
|
||||
return res;
|
||||
}
|
||||
|
||||
function processBlock(
|
||||
B: Uint32Array,
|
||||
address: Uint32Array,
|
||||
l: number,
|
||||
r: number,
|
||||
s: number,
|
||||
index: number,
|
||||
laneLen: number,
|
||||
segmentLen: number,
|
||||
lanes: number,
|
||||
offset: number,
|
||||
prev: number,
|
||||
dataIndependent: boolean,
|
||||
needXor: boolean
|
||||
) {
|
||||
if (offset % laneLen) prev = offset - 1;
|
||||
let randL, randH;
|
||||
if (dataIndependent) {
|
||||
let i128 = index % 128;
|
||||
if (i128 === 0) {
|
||||
address[256 + 12]++;
|
||||
block(address, 256, 2 * 256, 0, false);
|
||||
block(address, 0, 2 * 256, 0, false);
|
||||
}
|
||||
randL = address[2 * i128];
|
||||
randH = address[2 * i128 + 1];
|
||||
} else {
|
||||
const T = 256 * prev;
|
||||
randL = B[T];
|
||||
randH = B[T + 1];
|
||||
}
|
||||
// address block
|
||||
const refLane = r === 0 && s === 0 ? l : randH % lanes;
|
||||
const refPos = indexAlpha(r, s, laneLen, segmentLen, index, randL, refLane == l);
|
||||
const refBlock = laneLen * refLane + refPos;
|
||||
// B[i][j] = G(B[i][j-1], B[l][z])
|
||||
block(B, 256 * prev, 256 * refBlock, offset * 256, needXor);
|
||||
}
|
||||
|
||||
function argon2(type: Types, password: KDFInput, salt: KDFInput, opts: ArgonOpts) {
|
||||
const { mP, p, t, version, B, laneLen, lanes, segmentLen, dkLen, perBlock } = argon2Init(
|
||||
password,
|
||||
salt,
|
||||
type,
|
||||
opts
|
||||
);
|
||||
// Pre-loop setup
|
||||
// [address, input, zero_block] format so we can pass single U32 to block function
|
||||
const address = new Uint32Array(3 * 256);
|
||||
address[256 + 6] = mP;
|
||||
address[256 + 8] = t;
|
||||
address[256 + 10] = type;
|
||||
for (let r = 0; r < t; r++) {
|
||||
const needXor = r !== 0 && version === 0x13;
|
||||
address[256 + 0] = r;
|
||||
for (let s = 0; s < ARGON2_SYNC_POINTS; s++) {
|
||||
address[256 + 4] = s;
|
||||
const dataIndependent = type == AT.Argon2i || (type == AT.Argon2id && r === 0 && s < 2);
|
||||
for (let l = 0; l < p; l++) {
|
||||
address[256 + 2] = l;
|
||||
address[256 + 12] = 0;
|
||||
let startPos = 0;
|
||||
if (r === 0 && s === 0) {
|
||||
startPos = 2;
|
||||
if (dataIndependent) {
|
||||
address[256 + 12]++;
|
||||
block(address, 256, 2 * 256, 0, false);
|
||||
block(address, 0, 2 * 256, 0, false);
|
||||
}
|
||||
}
|
||||
// current block postion
|
||||
let offset = l * laneLen + s * segmentLen + startPos;
|
||||
// previous block position
|
||||
let prev = offset % laneLen ? offset - 1 : offset + laneLen - 1;
|
||||
for (let index = startPos; index < segmentLen; index++, offset++, prev++) {
|
||||
perBlock();
|
||||
processBlock(
|
||||
B,
|
||||
address,
|
||||
l,
|
||||
r,
|
||||
s,
|
||||
index,
|
||||
laneLen,
|
||||
segmentLen,
|
||||
lanes,
|
||||
offset,
|
||||
prev,
|
||||
dataIndependent,
|
||||
needXor
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
clean(address);
|
||||
return argon2Output(B, p, laneLen, dkLen);
|
||||
}
|
||||
|
||||
/** argon2d GPU-resistant version. */
|
||||
export const argon2d = (password: KDFInput, salt: KDFInput, opts: ArgonOpts): Uint8Array =>
|
||||
argon2(AT.Argond2d, password, salt, opts);
|
||||
/** argon2i side-channel-resistant version. */
|
||||
export const argon2i = (password: KDFInput, salt: KDFInput, opts: ArgonOpts): Uint8Array =>
|
||||
argon2(AT.Argon2i, password, salt, opts);
|
||||
/** argon2id, combining i+d, the most popular version from RFC 9106 */
|
||||
export const argon2id = (password: KDFInput, salt: KDFInput, opts: ArgonOpts): Uint8Array =>
|
||||
argon2(AT.Argon2id, password, salt, opts);
|
||||
|
||||
async function argon2Async(type: Types, password: KDFInput, salt: KDFInput, opts: ArgonOpts) {
|
||||
const { mP, p, t, version, B, laneLen, lanes, segmentLen, dkLen, perBlock, asyncTick } =
|
||||
argon2Init(password, salt, type, opts);
|
||||
// Pre-loop setup
|
||||
// [address, input, zero_block] format so we can pass single U32 to block function
|
||||
const address = new Uint32Array(3 * 256);
|
||||
address[256 + 6] = mP;
|
||||
address[256 + 8] = t;
|
||||
address[256 + 10] = type;
|
||||
let ts = Date.now();
|
||||
for (let r = 0; r < t; r++) {
|
||||
const needXor = r !== 0 && version === 0x13;
|
||||
address[256 + 0] = r;
|
||||
for (let s = 0; s < ARGON2_SYNC_POINTS; s++) {
|
||||
address[256 + 4] = s;
|
||||
const dataIndependent = type == AT.Argon2i || (type == AT.Argon2id && r === 0 && s < 2);
|
||||
for (let l = 0; l < p; l++) {
|
||||
address[256 + 2] = l;
|
||||
address[256 + 12] = 0;
|
||||
let startPos = 0;
|
||||
if (r === 0 && s === 0) {
|
||||
startPos = 2;
|
||||
if (dataIndependent) {
|
||||
address[256 + 12]++;
|
||||
block(address, 256, 2 * 256, 0, false);
|
||||
block(address, 0, 2 * 256, 0, false);
|
||||
}
|
||||
}
|
||||
// current block postion
|
||||
let offset = l * laneLen + s * segmentLen + startPos;
|
||||
// previous block position
|
||||
let prev = offset % laneLen ? offset - 1 : offset + laneLen - 1;
|
||||
for (let index = startPos; index < segmentLen; index++, offset++, prev++) {
|
||||
perBlock();
|
||||
processBlock(
|
||||
B,
|
||||
address,
|
||||
l,
|
||||
r,
|
||||
s,
|
||||
index,
|
||||
laneLen,
|
||||
segmentLen,
|
||||
lanes,
|
||||
offset,
|
||||
prev,
|
||||
dataIndependent,
|
||||
needXor
|
||||
);
|
||||
// Date.now() is not monotonic, so in case if clock goes backwards we return return control too
|
||||
const diff = Date.now() - ts;
|
||||
if (!(diff >= 0 && diff < asyncTick)) {
|
||||
await nextTick();
|
||||
ts += diff;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
clean(address);
|
||||
return argon2Output(B, p, laneLen, dkLen);
|
||||
}
|
||||
|
||||
/** argon2d async GPU-resistant version. */
|
||||
export const argon2dAsync = (
|
||||
password: KDFInput,
|
||||
salt: KDFInput,
|
||||
opts: ArgonOpts
|
||||
): Promise<Uint8Array> => argon2Async(AT.Argond2d, password, salt, opts);
|
||||
/** argon2i async side-channel-resistant version. */
|
||||
export const argon2iAsync = (
|
||||
password: KDFInput,
|
||||
salt: KDFInput,
|
||||
opts: ArgonOpts
|
||||
): Promise<Uint8Array> => argon2Async(AT.Argon2i, password, salt, opts);
|
||||
/** argon2id async, combining i+d, the most popular version from RFC 9106 */
|
||||
export const argon2idAsync = (
|
||||
password: KDFInput,
|
||||
salt: KDFInput,
|
||||
opts: ArgonOpts
|
||||
): Promise<Uint8Array> => argon2Async(AT.Argon2id, password, salt, opts);
|
||||
534
qwen/nodejs/node_modules/@noble/hashes/src/blake1.ts
generated
vendored
Normal file
534
qwen/nodejs/node_modules/@noble/hashes/src/blake1.ts
generated
vendored
Normal file
@@ -0,0 +1,534 @@
|
||||
/**
|
||||
* Blake1 legacy hash function, one of SHA3 proposals.
|
||||
* Rarely used. Check out blake2 or blake3 instead.
|
||||
* https://www.aumasson.jp/blake/blake.pdf
|
||||
*
|
||||
* In the best case, there are 0 allocations.
|
||||
*
|
||||
* Differences from blake2:
|
||||
*
|
||||
* - BE instead of LE
|
||||
* - Paddings, similar to MD5, RIPEMD, SHA1, SHA2, but:
|
||||
* - length flag is located before actual length
|
||||
* - padding block is compressed differently (no lengths)
|
||||
* Instead of msg[sigma[k]], we have `msg[sigma[k]] ^ constants[sigma[k-1]]`
|
||||
* (-1 for g1, g2 without -1)
|
||||
* - Salt is XOR-ed into constants instead of state
|
||||
* - Salt is XOR-ed with output in `compress`
|
||||
* - Additional rows (+64 bytes) in SIGMA for new rounds
|
||||
* - Different round count:
|
||||
* - 14 / 10 rounds in blake256 / blake2s
|
||||
* - 16 / 12 rounds in blake512 / blake2b
|
||||
* - blake512: G1b: rotr 24 -> 25, G2b: rotr 63 -> 11
|
||||
* @module
|
||||
*/
|
||||
import { BSIGMA, G1s, G2s } from './_blake.ts';
|
||||
import { setBigUint64, SHA224_IV, SHA256_IV, SHA384_IV, SHA512_IV } from './_md.ts';
|
||||
import * as u64 from './_u64.ts';
|
||||
// prettier-ignore
|
||||
import {
|
||||
abytes, aexists, aoutput,
|
||||
clean, createOptHasher,
|
||||
createView, Hash, toBytes,
|
||||
type CHashO, type Input,
|
||||
} from './utils.ts';
|
||||
|
||||
/** Blake1 options. Basically just "salt" */
|
||||
export type BlakeOpts = {
|
||||
salt?: Uint8Array;
|
||||
};
|
||||
|
||||
// Empty zero-filled salt
|
||||
const EMPTY_SALT = /* @__PURE__ */ new Uint32Array(8);
|
||||
|
||||
abstract class BLAKE1<T extends BLAKE1<T>> extends Hash<T> {
|
||||
protected finished = false;
|
||||
protected length = 0;
|
||||
protected pos = 0;
|
||||
protected destroyed = false;
|
||||
// For partial updates less than block size
|
||||
protected buffer: Uint8Array;
|
||||
protected view: DataView;
|
||||
protected salt: Uint32Array;
|
||||
abstract compress(view: DataView, offset: number, withLength?: boolean): void;
|
||||
protected abstract get(): number[];
|
||||
protected abstract set(...args: number[]): void;
|
||||
|
||||
readonly blockLen: number;
|
||||
readonly outputLen: number;
|
||||
private lengthFlag: number;
|
||||
private counterLen: number;
|
||||
protected constants: Uint32Array;
|
||||
|
||||
constructor(
|
||||
blockLen: number,
|
||||
outputLen: number,
|
||||
lengthFlag: number,
|
||||
counterLen: number,
|
||||
saltLen: number,
|
||||
constants: Uint32Array,
|
||||
opts: BlakeOpts = {}
|
||||
) {
|
||||
super();
|
||||
const { salt } = opts;
|
||||
this.blockLen = blockLen;
|
||||
this.outputLen = outputLen;
|
||||
this.lengthFlag = lengthFlag;
|
||||
this.counterLen = counterLen;
|
||||
this.buffer = new Uint8Array(blockLen);
|
||||
this.view = createView(this.buffer);
|
||||
if (salt) {
|
||||
let slt = salt;
|
||||
slt = toBytes(slt);
|
||||
abytes(slt);
|
||||
if (slt.length !== 4 * saltLen) throw new Error('wrong salt length');
|
||||
const salt32 = (this.salt = new Uint32Array(saltLen));
|
||||
const sv = createView(slt);
|
||||
this.constants = constants.slice();
|
||||
for (let i = 0, offset = 0; i < salt32.length; i++, offset += 4) {
|
||||
salt32[i] = sv.getUint32(offset, false);
|
||||
this.constants[i] ^= salt32[i];
|
||||
}
|
||||
} else {
|
||||
this.salt = EMPTY_SALT;
|
||||
this.constants = constants;
|
||||
}
|
||||
}
|
||||
update(data: Input): this {
|
||||
aexists(this);
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
// From _md, but update length before each compress
|
||||
const { view, buffer, blockLen } = this;
|
||||
const len = data.length;
|
||||
let dataView;
|
||||
for (let pos = 0; pos < len; ) {
|
||||
const take = Math.min(blockLen - this.pos, len - pos);
|
||||
// Fast path: we have at least one block in input, cast it to view and process
|
||||
if (take === blockLen) {
|
||||
if (!dataView) dataView = createView(data);
|
||||
for (; blockLen <= len - pos; pos += blockLen) {
|
||||
this.length += blockLen;
|
||||
this.compress(dataView, pos);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
buffer.set(data.subarray(pos, pos + take), this.pos);
|
||||
this.pos += take;
|
||||
pos += take;
|
||||
if (this.pos === blockLen) {
|
||||
this.length += blockLen;
|
||||
this.compress(view, 0, true);
|
||||
this.pos = 0;
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
if (this.salt !== EMPTY_SALT) {
|
||||
clean(this.salt, this.constants);
|
||||
}
|
||||
}
|
||||
_cloneInto(to?: T): T {
|
||||
to ||= new (this.constructor as any)() as T;
|
||||
to.set(...this.get());
|
||||
const { buffer, length, finished, destroyed, constants, salt, pos } = this;
|
||||
to.buffer.set(buffer);
|
||||
to.constants = constants.slice();
|
||||
to.destroyed = destroyed;
|
||||
to.finished = finished;
|
||||
to.length = length;
|
||||
to.pos = pos;
|
||||
to.salt = salt.slice();
|
||||
return to;
|
||||
}
|
||||
clone(): T {
|
||||
return this._cloneInto();
|
||||
}
|
||||
digestInto(out: Uint8Array): void {
|
||||
aexists(this);
|
||||
aoutput(out, this);
|
||||
this.finished = true;
|
||||
// Padding
|
||||
const { buffer, blockLen, counterLen, lengthFlag, view } = this;
|
||||
clean(buffer.subarray(this.pos)); // clean buf
|
||||
const counter = BigInt((this.length + this.pos) * 8);
|
||||
const counterPos = blockLen - counterLen - 1;
|
||||
buffer[this.pos] |= 0b1000_0000; // End block flag
|
||||
this.length += this.pos; // add unwritten length
|
||||
// Not enough in buffer for length: write what we have.
|
||||
if (this.pos > counterPos) {
|
||||
this.compress(view, 0);
|
||||
clean(buffer);
|
||||
this.pos = 0;
|
||||
}
|
||||
// Difference with md: here we have lengthFlag!
|
||||
buffer[counterPos] |= lengthFlag; // Length flag
|
||||
// We always set 8 byte length flag. Because length will overflow significantly sooner.
|
||||
setBigUint64(view, blockLen - 8, counter, false);
|
||||
this.compress(view, 0, this.pos !== 0); // don't add length if length is not empty block?
|
||||
// Write output
|
||||
clean(buffer);
|
||||
const v = createView(out);
|
||||
const state = this.get();
|
||||
for (let i = 0; i < this.outputLen / 4; ++i) v.setUint32(i * 4, state[i]);
|
||||
}
|
||||
digest(): Uint8Array {
|
||||
const { buffer, outputLen } = this;
|
||||
this.digestInto(buffer);
|
||||
const res = buffer.slice(0, outputLen);
|
||||
this.destroy();
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
// Constants
|
||||
const B64C = /* @__PURE__ */ Uint32Array.from([
|
||||
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
|
||||
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
|
||||
0x9216d5d9, 0x8979fb1b, 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
|
||||
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, 0x636920d8, 0x71574e69,
|
||||
]);
|
||||
// first half of C512
|
||||
const B32C = B64C.slice(0, 16);
|
||||
|
||||
const B256_IV = SHA256_IV.slice();
|
||||
const B224_IV = SHA224_IV.slice();
|
||||
const B384_IV = SHA384_IV.slice();
|
||||
const B512_IV = SHA512_IV.slice();
|
||||
|
||||
function generateTBL256() {
|
||||
const TBL = [];
|
||||
for (let i = 0, j = 0; i < 14; i++, j += 16) {
|
||||
for (let offset = 1; offset < 16; offset += 2) {
|
||||
TBL.push(B32C[BSIGMA[j + offset]]);
|
||||
TBL.push(B32C[BSIGMA[j + offset - 1]]);
|
||||
}
|
||||
}
|
||||
return new Uint32Array(TBL);
|
||||
}
|
||||
const TBL256 = /* @__PURE__ */ generateTBL256(); // C256[SIGMA[X]] precompute
|
||||
|
||||
// Reusable temporary buffer
|
||||
const BLAKE256_W = /* @__PURE__ */ new Uint32Array(16);
|
||||
|
||||
class Blake1_32 extends BLAKE1<Blake1_32> {
|
||||
private v0: number;
|
||||
private v1: number;
|
||||
private v2: number;
|
||||
private v3: number;
|
||||
private v4: number;
|
||||
private v5: number;
|
||||
private v6: number;
|
||||
private v7: number;
|
||||
constructor(outputLen: number, IV: Uint32Array, lengthFlag: number, opts: BlakeOpts = {}) {
|
||||
super(64, outputLen, lengthFlag, 8, 4, B32C, opts);
|
||||
this.v0 = IV[0] | 0;
|
||||
this.v1 = IV[1] | 0;
|
||||
this.v2 = IV[2] | 0;
|
||||
this.v3 = IV[3] | 0;
|
||||
this.v4 = IV[4] | 0;
|
||||
this.v5 = IV[5] | 0;
|
||||
this.v6 = IV[6] | 0;
|
||||
this.v7 = IV[7] | 0;
|
||||
}
|
||||
protected get(): [number, number, number, number, number, number, number, number] {
|
||||
const { v0, v1, v2, v3, v4, v5, v6, v7 } = this;
|
||||
return [v0, v1, v2, v3, v4, v5, v6, v7];
|
||||
}
|
||||
// prettier-ignore
|
||||
protected set(
|
||||
v0: number, v1: number, v2: number, v3: number, v4: number, v5: number, v6: number, v7: number
|
||||
): void {
|
||||
this.v0 = v0 | 0;
|
||||
this.v1 = v1 | 0;
|
||||
this.v2 = v2 | 0;
|
||||
this.v3 = v3 | 0;
|
||||
this.v4 = v4 | 0;
|
||||
this.v5 = v5 | 0;
|
||||
this.v6 = v6 | 0;
|
||||
this.v7 = v7 | 0;
|
||||
}
|
||||
destroy(): void {
|
||||
super.destroy();
|
||||
this.set(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
compress(view: DataView, offset: number, withLength = true): void {
|
||||
for (let i = 0; i < 16; i++, offset += 4) BLAKE256_W[i] = view.getUint32(offset, false);
|
||||
// NOTE: we cannot re-use compress from blake2s, since there is additional xor over u256[SIGMA[e]]
|
||||
let v00 = this.v0 | 0;
|
||||
let v01 = this.v1 | 0;
|
||||
let v02 = this.v2 | 0;
|
||||
let v03 = this.v3 | 0;
|
||||
let v04 = this.v4 | 0;
|
||||
let v05 = this.v5 | 0;
|
||||
let v06 = this.v6 | 0;
|
||||
let v07 = this.v7 | 0;
|
||||
let v08 = this.constants[0] | 0;
|
||||
let v09 = this.constants[1] | 0;
|
||||
let v10 = this.constants[2] | 0;
|
||||
let v11 = this.constants[3] | 0;
|
||||
const { h, l } = u64.fromBig(BigInt(withLength ? this.length * 8 : 0));
|
||||
let v12 = (this.constants[4] ^ l) >>> 0;
|
||||
let v13 = (this.constants[5] ^ l) >>> 0;
|
||||
let v14 = (this.constants[6] ^ h) >>> 0;
|
||||
let v15 = (this.constants[7] ^ h) >>> 0;
|
||||
// prettier-ignore
|
||||
for (let i = 0, k = 0, j = 0; i < 14; i++) {
|
||||
({ a: v00, b: v04, c: v08, d: v12 } = G1s(v00, v04, v08, v12, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v00, b: v04, c: v08, d: v12 } = G2s(v00, v04, v08, v12, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v01, b: v05, c: v09, d: v13 } = G1s(v01, v05, v09, v13, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v01, b: v05, c: v09, d: v13 } = G2s(v01, v05, v09, v13, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v02, b: v06, c: v10, d: v14 } = G1s(v02, v06, v10, v14, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v02, b: v06, c: v10, d: v14 } = G2s(v02, v06, v10, v14, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v03, b: v07, c: v11, d: v15 } = G1s(v03, v07, v11, v15, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v03, b: v07, c: v11, d: v15 } = G2s(v03, v07, v11, v15, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v00, b: v05, c: v10, d: v15 } = G1s(v00, v05, v10, v15, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v00, b: v05, c: v10, d: v15 } = G2s(v00, v05, v10, v15, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v01, b: v06, c: v11, d: v12 } = G1s(v01, v06, v11, v12, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v01, b: v06, c: v11, d: v12 } = G2s(v01, v06, v11, v12, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v02, b: v07, c: v08, d: v13 } = G1s(v02, v07, v08, v13, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v02, b: v07, c: v08, d: v13 } = G2s(v02, v07, v08, v13, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v03, b: v04, c: v09, d: v14 } = G1s(v03, v04, v09, v14, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
({ a: v03, b: v04, c: v09, d: v14 } = G2s(v03, v04, v09, v14, BLAKE256_W[BSIGMA[k++]] ^ TBL256[j++]));
|
||||
}
|
||||
this.v0 = (this.v0 ^ v00 ^ v08 ^ this.salt[0]) >>> 0;
|
||||
this.v1 = (this.v1 ^ v01 ^ v09 ^ this.salt[1]) >>> 0;
|
||||
this.v2 = (this.v2 ^ v02 ^ v10 ^ this.salt[2]) >>> 0;
|
||||
this.v3 = (this.v3 ^ v03 ^ v11 ^ this.salt[3]) >>> 0;
|
||||
this.v4 = (this.v4 ^ v04 ^ v12 ^ this.salt[0]) >>> 0;
|
||||
this.v5 = (this.v5 ^ v05 ^ v13 ^ this.salt[1]) >>> 0;
|
||||
this.v6 = (this.v6 ^ v06 ^ v14 ^ this.salt[2]) >>> 0;
|
||||
this.v7 = (this.v7 ^ v07 ^ v15 ^ this.salt[3]) >>> 0;
|
||||
clean(BLAKE256_W);
|
||||
}
|
||||
}
|
||||
|
||||
const BBUF = /* @__PURE__ */ new Uint32Array(32);
|
||||
const BLAKE512_W = /* @__PURE__ */ new Uint32Array(32);
|
||||
|
||||
function generateTBL512() {
|
||||
const TBL = [];
|
||||
for (let r = 0, k = 0; r < 16; r++, k += 16) {
|
||||
for (let offset = 1; offset < 16; offset += 2) {
|
||||
TBL.push(B64C[BSIGMA[k + offset] * 2 + 0]);
|
||||
TBL.push(B64C[BSIGMA[k + offset] * 2 + 1]);
|
||||
TBL.push(B64C[BSIGMA[k + offset - 1] * 2 + 0]);
|
||||
TBL.push(B64C[BSIGMA[k + offset - 1] * 2 + 1]);
|
||||
}
|
||||
}
|
||||
return new Uint32Array(TBL);
|
||||
}
|
||||
const TBL512 = /* @__PURE__ */ generateTBL512(); // C512[SIGMA[X]] precompute
|
||||
|
||||
// Mixing function G splitted in two halfs
|
||||
function G1b(a: number, b: number, c: number, d: number, msg: Uint32Array, k: number) {
|
||||
const Xpos = 2 * BSIGMA[k];
|
||||
const Xl = msg[Xpos + 1] ^ TBL512[k * 2 + 1], Xh = msg[Xpos] ^ TBL512[k * 2]; // prettier-ignore
|
||||
let Al = BBUF[2 * a + 1], Ah = BBUF[2 * a]; // prettier-ignore
|
||||
let Bl = BBUF[2 * b + 1], Bh = BBUF[2 * b]; // prettier-ignore
|
||||
let Cl = BBUF[2 * c + 1], Ch = BBUF[2 * c]; // prettier-ignore
|
||||
let Dl = BBUF[2 * d + 1], Dh = BBUF[2 * d]; // prettier-ignore
|
||||
// v[a] = (v[a] + v[b] + x) | 0;
|
||||
let ll = u64.add3L(Al, Bl, Xl);
|
||||
Ah = u64.add3H(ll, Ah, Bh, Xh) >>> 0;
|
||||
Al = (ll | 0) >>> 0;
|
||||
// v[d] = rotr(v[d] ^ v[a], 32)
|
||||
({ Dh, Dl } = { Dh: Dh ^ Ah, Dl: Dl ^ Al });
|
||||
({ Dh, Dl } = { Dh: u64.rotr32H(Dh, Dl), Dl: u64.rotr32L(Dh, Dl) });
|
||||
// v[c] = (v[c] + v[d]) | 0;
|
||||
({ h: Ch, l: Cl } = u64.add(Ch, Cl, Dh, Dl));
|
||||
// v[b] = rotr(v[b] ^ v[c], 25)
|
||||
({ Bh, Bl } = { Bh: Bh ^ Ch, Bl: Bl ^ Cl });
|
||||
({ Bh, Bl } = { Bh: u64.rotrSH(Bh, Bl, 25), Bl: u64.rotrSL(Bh, Bl, 25) });
|
||||
(BBUF[2 * a + 1] = Al), (BBUF[2 * a] = Ah);
|
||||
(BBUF[2 * b + 1] = Bl), (BBUF[2 * b] = Bh);
|
||||
(BBUF[2 * c + 1] = Cl), (BBUF[2 * c] = Ch);
|
||||
(BBUF[2 * d + 1] = Dl), (BBUF[2 * d] = Dh);
|
||||
}
|
||||
|
||||
function G2b(a: number, b: number, c: number, d: number, msg: Uint32Array, k: number) {
|
||||
const Xpos = 2 * BSIGMA[k];
|
||||
const Xl = msg[Xpos + 1] ^ TBL512[k * 2 + 1], Xh = msg[Xpos] ^ TBL512[k * 2]; // prettier-ignore
|
||||
let Al = BBUF[2 * a + 1], Ah = BBUF[2 * a]; // prettier-ignore
|
||||
let Bl = BBUF[2 * b + 1], Bh = BBUF[2 * b]; // prettier-ignore
|
||||
let Cl = BBUF[2 * c + 1], Ch = BBUF[2 * c]; // prettier-ignore
|
||||
let Dl = BBUF[2 * d + 1], Dh = BBUF[2 * d]; // prettier-ignore
|
||||
// v[a] = (v[a] + v[b] + x) | 0;
|
||||
let ll = u64.add3L(Al, Bl, Xl);
|
||||
Ah = u64.add3H(ll, Ah, Bh, Xh);
|
||||
Al = ll | 0;
|
||||
// v[d] = rotr(v[d] ^ v[a], 16)
|
||||
({ Dh, Dl } = { Dh: Dh ^ Ah, Dl: Dl ^ Al });
|
||||
({ Dh, Dl } = { Dh: u64.rotrSH(Dh, Dl, 16), Dl: u64.rotrSL(Dh, Dl, 16) });
|
||||
// v[c] = (v[c] + v[d]) | 0;
|
||||
({ h: Ch, l: Cl } = u64.add(Ch, Cl, Dh, Dl));
|
||||
// v[b] = rotr(v[b] ^ v[c], 11)
|
||||
({ Bh, Bl } = { Bh: Bh ^ Ch, Bl: Bl ^ Cl });
|
||||
({ Bh, Bl } = { Bh: u64.rotrSH(Bh, Bl, 11), Bl: u64.rotrSL(Bh, Bl, 11) });
|
||||
(BBUF[2 * a + 1] = Al), (BBUF[2 * a] = Ah);
|
||||
(BBUF[2 * b + 1] = Bl), (BBUF[2 * b] = Bh);
|
||||
(BBUF[2 * c + 1] = Cl), (BBUF[2 * c] = Ch);
|
||||
(BBUF[2 * d + 1] = Dl), (BBUF[2 * d] = Dh);
|
||||
}
|
||||
|
||||
class Blake1_64 extends BLAKE1<Blake1_64> {
|
||||
private v0l: number;
|
||||
private v0h: number;
|
||||
private v1l: number;
|
||||
private v1h: number;
|
||||
private v2l: number;
|
||||
private v2h: number;
|
||||
private v3l: number;
|
||||
private v3h: number;
|
||||
private v4l: number;
|
||||
private v4h: number;
|
||||
private v5l: number;
|
||||
private v5h: number;
|
||||
private v6l: number;
|
||||
private v6h: number;
|
||||
private v7l: number;
|
||||
private v7h: number;
|
||||
constructor(outputLen: number, IV: Uint32Array, lengthFlag: number, opts: BlakeOpts = {}) {
|
||||
super(128, outputLen, lengthFlag, 16, 8, B64C, opts);
|
||||
this.v0l = IV[0] | 0;
|
||||
this.v0h = IV[1] | 0;
|
||||
this.v1l = IV[2] | 0;
|
||||
this.v1h = IV[3] | 0;
|
||||
this.v2l = IV[4] | 0;
|
||||
this.v2h = IV[5] | 0;
|
||||
this.v3l = IV[6] | 0;
|
||||
this.v3h = IV[7] | 0;
|
||||
this.v4l = IV[8] | 0;
|
||||
this.v4h = IV[9] | 0;
|
||||
this.v5l = IV[10] | 0;
|
||||
this.v5h = IV[11] | 0;
|
||||
this.v6l = IV[12] | 0;
|
||||
this.v6h = IV[13] | 0;
|
||||
this.v7l = IV[14] | 0;
|
||||
this.v7h = IV[15] | 0;
|
||||
}
|
||||
// prettier-ignore
|
||||
protected get(): [
|
||||
number, number, number, number, number, number, number, number,
|
||||
number, number, number, number, number, number, number, number
|
||||
] {
|
||||
let { v0l, v0h, v1l, v1h, v2l, v2h, v3l, v3h, v4l, v4h, v5l, v5h, v6l, v6h, v7l, v7h } = this;
|
||||
return [v0l, v0h, v1l, v1h, v2l, v2h, v3l, v3h, v4l, v4h, v5l, v5h, v6l, v6h, v7l, v7h];
|
||||
}
|
||||
// prettier-ignore
|
||||
protected set(
|
||||
v0l: number, v0h: number, v1l: number, v1h: number,
|
||||
v2l: number, v2h: number, v3l: number, v3h: number,
|
||||
v4l: number, v4h: number, v5l: number, v5h: number,
|
||||
v6l: number, v6h: number, v7l: number, v7h: number
|
||||
): void {
|
||||
this.v0l = v0l | 0;
|
||||
this.v0h = v0h | 0;
|
||||
this.v1l = v1l | 0;
|
||||
this.v1h = v1h | 0;
|
||||
this.v2l = v2l | 0;
|
||||
this.v2h = v2h | 0;
|
||||
this.v3l = v3l | 0;
|
||||
this.v3h = v3h | 0;
|
||||
this.v4l = v4l | 0;
|
||||
this.v4h = v4h | 0;
|
||||
this.v5l = v5l | 0;
|
||||
this.v5h = v5h | 0;
|
||||
this.v6l = v6l | 0;
|
||||
this.v6h = v6h | 0;
|
||||
this.v7l = v7l | 0;
|
||||
this.v7h = v7h | 0;
|
||||
}
|
||||
destroy(): void {
|
||||
super.destroy();
|
||||
this.set(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
compress(view: DataView, offset: number, withLength = true): void {
|
||||
for (let i = 0; i < 32; i++, offset += 4) BLAKE512_W[i] = view.getUint32(offset, false);
|
||||
|
||||
this.get().forEach((v, i) => (BBUF[i] = v)); // First half from state.
|
||||
BBUF.set(this.constants.subarray(0, 16), 16);
|
||||
if (withLength) {
|
||||
const { h, l } = u64.fromBig(BigInt(this.length * 8));
|
||||
BBUF[24] = (BBUF[24] ^ h) >>> 0;
|
||||
BBUF[25] = (BBUF[25] ^ l) >>> 0;
|
||||
BBUF[26] = (BBUF[26] ^ h) >>> 0;
|
||||
BBUF[27] = (BBUF[27] ^ l) >>> 0;
|
||||
}
|
||||
for (let i = 0, k = 0; i < 16; i++) {
|
||||
G1b(0, 4, 8, 12, BLAKE512_W, k++);
|
||||
G2b(0, 4, 8, 12, BLAKE512_W, k++);
|
||||
G1b(1, 5, 9, 13, BLAKE512_W, k++);
|
||||
G2b(1, 5, 9, 13, BLAKE512_W, k++);
|
||||
G1b(2, 6, 10, 14, BLAKE512_W, k++);
|
||||
G2b(2, 6, 10, 14, BLAKE512_W, k++);
|
||||
G1b(3, 7, 11, 15, BLAKE512_W, k++);
|
||||
G2b(3, 7, 11, 15, BLAKE512_W, k++);
|
||||
|
||||
G1b(0, 5, 10, 15, BLAKE512_W, k++);
|
||||
G2b(0, 5, 10, 15, BLAKE512_W, k++);
|
||||
G1b(1, 6, 11, 12, BLAKE512_W, k++);
|
||||
G2b(1, 6, 11, 12, BLAKE512_W, k++);
|
||||
G1b(2, 7, 8, 13, BLAKE512_W, k++);
|
||||
G2b(2, 7, 8, 13, BLAKE512_W, k++);
|
||||
G1b(3, 4, 9, 14, BLAKE512_W, k++);
|
||||
G2b(3, 4, 9, 14, BLAKE512_W, k++);
|
||||
}
|
||||
this.v0l ^= BBUF[0] ^ BBUF[16] ^ this.salt[0];
|
||||
this.v0h ^= BBUF[1] ^ BBUF[17] ^ this.salt[1];
|
||||
this.v1l ^= BBUF[2] ^ BBUF[18] ^ this.salt[2];
|
||||
this.v1h ^= BBUF[3] ^ BBUF[19] ^ this.salt[3];
|
||||
this.v2l ^= BBUF[4] ^ BBUF[20] ^ this.salt[4];
|
||||
this.v2h ^= BBUF[5] ^ BBUF[21] ^ this.salt[5];
|
||||
this.v3l ^= BBUF[6] ^ BBUF[22] ^ this.salt[6];
|
||||
this.v3h ^= BBUF[7] ^ BBUF[23] ^ this.salt[7];
|
||||
this.v4l ^= BBUF[8] ^ BBUF[24] ^ this.salt[0];
|
||||
this.v4h ^= BBUF[9] ^ BBUF[25] ^ this.salt[1];
|
||||
this.v5l ^= BBUF[10] ^ BBUF[26] ^ this.salt[2];
|
||||
this.v5h ^= BBUF[11] ^ BBUF[27] ^ this.salt[3];
|
||||
this.v6l ^= BBUF[12] ^ BBUF[28] ^ this.salt[4];
|
||||
this.v6h ^= BBUF[13] ^ BBUF[29] ^ this.salt[5];
|
||||
this.v7l ^= BBUF[14] ^ BBUF[30] ^ this.salt[6];
|
||||
this.v7h ^= BBUF[15] ^ BBUF[31] ^ this.salt[7];
|
||||
clean(BBUF, BLAKE512_W);
|
||||
}
|
||||
}
|
||||
|
||||
export class BLAKE224 extends Blake1_32 {
|
||||
constructor(opts: BlakeOpts = {}) {
|
||||
super(28, B224_IV, 0b0000_0000, opts);
|
||||
}
|
||||
}
|
||||
export class BLAKE256 extends Blake1_32 {
|
||||
constructor(opts: BlakeOpts = {}) {
|
||||
super(32, B256_IV, 0b0000_0001, opts);
|
||||
}
|
||||
}
|
||||
export class BLAKE384 extends Blake1_64 {
|
||||
constructor(opts: BlakeOpts = {}) {
|
||||
super(48, B384_IV, 0b0000_0000, opts);
|
||||
}
|
||||
}
|
||||
export class BLAKE512 extends Blake1_64 {
|
||||
constructor(opts: BlakeOpts = {}) {
|
||||
super(64, B512_IV, 0b0000_0001, opts);
|
||||
}
|
||||
}
|
||||
/** blake1-224 hash function */
|
||||
export const blake224: CHashO = /* @__PURE__ */ createOptHasher<BLAKE224, BlakeOpts>(
|
||||
(opts) => new BLAKE224(opts)
|
||||
);
|
||||
/** blake1-256 hash function */
|
||||
export const blake256: CHashO = /* @__PURE__ */ createOptHasher<BLAKE256, BlakeOpts>(
|
||||
(opts) => new BLAKE256(opts)
|
||||
);
|
||||
/** blake1-384 hash function */
|
||||
export const blake384: CHashO = /* @__PURE__ */ createOptHasher<BLAKE512, BlakeOpts>(
|
||||
(opts) => new BLAKE384(opts)
|
||||
);
|
||||
/** blake1-512 hash function */
|
||||
export const blake512: CHashO = /* @__PURE__ */ createOptHasher<BLAKE512, BlakeOpts>(
|
||||
(opts) => new BLAKE512(opts)
|
||||
);
|
||||
486
qwen/nodejs/node_modules/@noble/hashes/src/blake2.ts
generated
vendored
Normal file
486
qwen/nodejs/node_modules/@noble/hashes/src/blake2.ts
generated
vendored
Normal file
@@ -0,0 +1,486 @@
|
||||
/**
|
||||
* blake2b (64-bit) & blake2s (8 to 32-bit) hash functions.
|
||||
* b could have been faster, but there is no fast u64 in js, so s is 1.5x faster.
|
||||
* @module
|
||||
*/
|
||||
import { BSIGMA, G1s, G2s } from './_blake.ts';
|
||||
import { SHA256_IV } from './_md.ts';
|
||||
import * as u64 from './_u64.ts';
|
||||
// prettier-ignore
|
||||
import {
|
||||
abytes, aexists, anumber, aoutput,
|
||||
clean, createOptHasher, Hash, swap32IfBE, swap8IfBE, toBytes, u32,
|
||||
type CHashO, type Input
|
||||
} from './utils.ts';
|
||||
|
||||
/** Blake hash options. dkLen is output length. key is used in MAC mode. salt is used in KDF mode. */
|
||||
export type Blake2Opts = {
|
||||
dkLen?: number;
|
||||
key?: Input;
|
||||
salt?: Input;
|
||||
personalization?: Input;
|
||||
};
|
||||
|
||||
// Same as SHA512_IV, but swapped endianness: LE instead of BE. iv[1] is iv[0], etc.
|
||||
const B2B_IV = /* @__PURE__ */ Uint32Array.from([
|
||||
0xf3bcc908, 0x6a09e667, 0x84caa73b, 0xbb67ae85, 0xfe94f82b, 0x3c6ef372, 0x5f1d36f1, 0xa54ff53a,
|
||||
0xade682d1, 0x510e527f, 0x2b3e6c1f, 0x9b05688c, 0xfb41bd6b, 0x1f83d9ab, 0x137e2179, 0x5be0cd19,
|
||||
]);
|
||||
// Temporary buffer
|
||||
const BBUF = /* @__PURE__ */ new Uint32Array(32);
|
||||
|
||||
// Mixing function G splitted in two halfs
|
||||
function G1b(a: number, b: number, c: number, d: number, msg: Uint32Array, x: number) {
|
||||
// NOTE: V is LE here
|
||||
const Xl = msg[x], Xh = msg[x + 1]; // prettier-ignore
|
||||
let Al = BBUF[2 * a], Ah = BBUF[2 * a + 1]; // prettier-ignore
|
||||
let Bl = BBUF[2 * b], Bh = BBUF[2 * b + 1]; // prettier-ignore
|
||||
let Cl = BBUF[2 * c], Ch = BBUF[2 * c + 1]; // prettier-ignore
|
||||
let Dl = BBUF[2 * d], Dh = BBUF[2 * d + 1]; // prettier-ignore
|
||||
// v[a] = (v[a] + v[b] + x) | 0;
|
||||
let ll = u64.add3L(Al, Bl, Xl);
|
||||
Ah = u64.add3H(ll, Ah, Bh, Xh);
|
||||
Al = ll | 0;
|
||||
// v[d] = rotr(v[d] ^ v[a], 32)
|
||||
({ Dh, Dl } = { Dh: Dh ^ Ah, Dl: Dl ^ Al });
|
||||
({ Dh, Dl } = { Dh: u64.rotr32H(Dh, Dl), Dl: u64.rotr32L(Dh, Dl) });
|
||||
// v[c] = (v[c] + v[d]) | 0;
|
||||
({ h: Ch, l: Cl } = u64.add(Ch, Cl, Dh, Dl));
|
||||
// v[b] = rotr(v[b] ^ v[c], 24)
|
||||
({ Bh, Bl } = { Bh: Bh ^ Ch, Bl: Bl ^ Cl });
|
||||
({ Bh, Bl } = { Bh: u64.rotrSH(Bh, Bl, 24), Bl: u64.rotrSL(Bh, Bl, 24) });
|
||||
(BBUF[2 * a] = Al), (BBUF[2 * a + 1] = Ah);
|
||||
(BBUF[2 * b] = Bl), (BBUF[2 * b + 1] = Bh);
|
||||
(BBUF[2 * c] = Cl), (BBUF[2 * c + 1] = Ch);
|
||||
(BBUF[2 * d] = Dl), (BBUF[2 * d + 1] = Dh);
|
||||
}
|
||||
|
||||
function G2b(a: number, b: number, c: number, d: number, msg: Uint32Array, x: number) {
|
||||
// NOTE: V is LE here
|
||||
const Xl = msg[x], Xh = msg[x + 1]; // prettier-ignore
|
||||
let Al = BBUF[2 * a], Ah = BBUF[2 * a + 1]; // prettier-ignore
|
||||
let Bl = BBUF[2 * b], Bh = BBUF[2 * b + 1]; // prettier-ignore
|
||||
let Cl = BBUF[2 * c], Ch = BBUF[2 * c + 1]; // prettier-ignore
|
||||
let Dl = BBUF[2 * d], Dh = BBUF[2 * d + 1]; // prettier-ignore
|
||||
// v[a] = (v[a] + v[b] + x) | 0;
|
||||
let ll = u64.add3L(Al, Bl, Xl);
|
||||
Ah = u64.add3H(ll, Ah, Bh, Xh);
|
||||
Al = ll | 0;
|
||||
// v[d] = rotr(v[d] ^ v[a], 16)
|
||||
({ Dh, Dl } = { Dh: Dh ^ Ah, Dl: Dl ^ Al });
|
||||
({ Dh, Dl } = { Dh: u64.rotrSH(Dh, Dl, 16), Dl: u64.rotrSL(Dh, Dl, 16) });
|
||||
// v[c] = (v[c] + v[d]) | 0;
|
||||
({ h: Ch, l: Cl } = u64.add(Ch, Cl, Dh, Dl));
|
||||
// v[b] = rotr(v[b] ^ v[c], 63)
|
||||
({ Bh, Bl } = { Bh: Bh ^ Ch, Bl: Bl ^ Cl });
|
||||
({ Bh, Bl } = { Bh: u64.rotrBH(Bh, Bl, 63), Bl: u64.rotrBL(Bh, Bl, 63) });
|
||||
(BBUF[2 * a] = Al), (BBUF[2 * a + 1] = Ah);
|
||||
(BBUF[2 * b] = Bl), (BBUF[2 * b + 1] = Bh);
|
||||
(BBUF[2 * c] = Cl), (BBUF[2 * c + 1] = Ch);
|
||||
(BBUF[2 * d] = Dl), (BBUF[2 * d + 1] = Dh);
|
||||
}
|
||||
|
||||
function checkBlake2Opts(
|
||||
outputLen: number,
|
||||
opts: Blake2Opts | undefined = {},
|
||||
keyLen: number,
|
||||
saltLen: number,
|
||||
persLen: number
|
||||
) {
|
||||
anumber(keyLen);
|
||||
if (outputLen < 0 || outputLen > keyLen) throw new Error('outputLen bigger than keyLen');
|
||||
const { key, salt, personalization } = opts;
|
||||
if (key !== undefined && (key.length < 1 || key.length > keyLen))
|
||||
throw new Error('key length must be undefined or 1..' + keyLen);
|
||||
if (salt !== undefined && salt.length !== saltLen)
|
||||
throw new Error('salt must be undefined or ' + saltLen);
|
||||
if (personalization !== undefined && personalization.length !== persLen)
|
||||
throw new Error('personalization must be undefined or ' + persLen);
|
||||
}
|
||||
|
||||
/** Class, from which others are subclassed. */
|
||||
export abstract class BLAKE2<T extends BLAKE2<T>> extends Hash<T> {
|
||||
protected abstract compress(msg: Uint32Array, offset: number, isLast: boolean): void;
|
||||
protected abstract get(): number[];
|
||||
protected abstract set(...args: number[]): void;
|
||||
abstract destroy(): void;
|
||||
protected buffer: Uint8Array;
|
||||
protected buffer32: Uint32Array;
|
||||
protected finished = false;
|
||||
protected destroyed = false;
|
||||
protected length: number = 0;
|
||||
protected pos: number = 0;
|
||||
readonly blockLen: number;
|
||||
readonly outputLen: number;
|
||||
|
||||
constructor(blockLen: number, outputLen: number) {
|
||||
super();
|
||||
anumber(blockLen);
|
||||
anumber(outputLen);
|
||||
this.blockLen = blockLen;
|
||||
this.outputLen = outputLen;
|
||||
this.buffer = new Uint8Array(blockLen);
|
||||
this.buffer32 = u32(this.buffer);
|
||||
}
|
||||
update(data: Input): this {
|
||||
aexists(this);
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
// Main difference with other hashes: there is flag for last block,
|
||||
// so we cannot process current block before we know that there
|
||||
// is the next one. This significantly complicates logic and reduces ability
|
||||
// to do zero-copy processing
|
||||
const { blockLen, buffer, buffer32 } = this;
|
||||
const len = data.length;
|
||||
const offset = data.byteOffset;
|
||||
const buf = data.buffer;
|
||||
for (let pos = 0; pos < len; ) {
|
||||
// If buffer is full and we still have input (don't process last block, same as blake2s)
|
||||
if (this.pos === blockLen) {
|
||||
swap32IfBE(buffer32);
|
||||
this.compress(buffer32, 0, false);
|
||||
swap32IfBE(buffer32);
|
||||
this.pos = 0;
|
||||
}
|
||||
const take = Math.min(blockLen - this.pos, len - pos);
|
||||
const dataOffset = offset + pos;
|
||||
// full block && aligned to 4 bytes && not last in input
|
||||
if (take === blockLen && !(dataOffset % 4) && pos + take < len) {
|
||||
const data32 = new Uint32Array(buf, dataOffset, Math.floor((len - pos) / 4));
|
||||
swap32IfBE(data32);
|
||||
for (let pos32 = 0; pos + blockLen < len; pos32 += buffer32.length, pos += blockLen) {
|
||||
this.length += blockLen;
|
||||
this.compress(data32, pos32, false);
|
||||
}
|
||||
swap32IfBE(data32);
|
||||
continue;
|
||||
}
|
||||
buffer.set(data.subarray(pos, pos + take), this.pos);
|
||||
this.pos += take;
|
||||
this.length += take;
|
||||
pos += take;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
digestInto(out: Uint8Array): void {
|
||||
aexists(this);
|
||||
aoutput(out, this);
|
||||
const { pos, buffer32 } = this;
|
||||
this.finished = true;
|
||||
// Padding
|
||||
clean(this.buffer.subarray(pos));
|
||||
swap32IfBE(buffer32);
|
||||
this.compress(buffer32, 0, true);
|
||||
swap32IfBE(buffer32);
|
||||
const out32 = u32(out);
|
||||
this.get().forEach((v, i) => (out32[i] = swap8IfBE(v)));
|
||||
}
|
||||
digest(): Uint8Array {
|
||||
const { buffer, outputLen } = this;
|
||||
this.digestInto(buffer);
|
||||
const res = buffer.slice(0, outputLen);
|
||||
this.destroy();
|
||||
return res;
|
||||
}
|
||||
_cloneInto(to?: T): T {
|
||||
const { buffer, length, finished, destroyed, outputLen, pos } = this;
|
||||
to ||= new (this.constructor as any)({ dkLen: outputLen }) as T;
|
||||
to.set(...this.get());
|
||||
to.buffer.set(buffer);
|
||||
to.destroyed = destroyed;
|
||||
to.finished = finished;
|
||||
to.length = length;
|
||||
to.pos = pos;
|
||||
// @ts-ignore
|
||||
to.outputLen = outputLen;
|
||||
return to;
|
||||
}
|
||||
clone(): T {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
|
||||
export class BLAKE2b extends BLAKE2<BLAKE2b> {
|
||||
// Same as SHA-512, but LE
|
||||
private v0l = B2B_IV[0] | 0;
|
||||
private v0h = B2B_IV[1] | 0;
|
||||
private v1l = B2B_IV[2] | 0;
|
||||
private v1h = B2B_IV[3] | 0;
|
||||
private v2l = B2B_IV[4] | 0;
|
||||
private v2h = B2B_IV[5] | 0;
|
||||
private v3l = B2B_IV[6] | 0;
|
||||
private v3h = B2B_IV[7] | 0;
|
||||
private v4l = B2B_IV[8] | 0;
|
||||
private v4h = B2B_IV[9] | 0;
|
||||
private v5l = B2B_IV[10] | 0;
|
||||
private v5h = B2B_IV[11] | 0;
|
||||
private v6l = B2B_IV[12] | 0;
|
||||
private v6h = B2B_IV[13] | 0;
|
||||
private v7l = B2B_IV[14] | 0;
|
||||
private v7h = B2B_IV[15] | 0;
|
||||
|
||||
constructor(opts: Blake2Opts = {}) {
|
||||
const olen = opts.dkLen === undefined ? 64 : opts.dkLen;
|
||||
super(128, olen);
|
||||
checkBlake2Opts(olen, opts, 64, 16, 16);
|
||||
let { key, personalization, salt } = opts;
|
||||
let keyLength = 0;
|
||||
if (key !== undefined) {
|
||||
key = toBytes(key);
|
||||
keyLength = key.length;
|
||||
}
|
||||
this.v0l ^= this.outputLen | (keyLength << 8) | (0x01 << 16) | (0x01 << 24);
|
||||
if (salt !== undefined) {
|
||||
salt = toBytes(salt);
|
||||
const slt = u32(salt);
|
||||
this.v4l ^= swap8IfBE(slt[0]);
|
||||
this.v4h ^= swap8IfBE(slt[1]);
|
||||
this.v5l ^= swap8IfBE(slt[2]);
|
||||
this.v5h ^= swap8IfBE(slt[3]);
|
||||
}
|
||||
if (personalization !== undefined) {
|
||||
personalization = toBytes(personalization);
|
||||
const pers = u32(personalization);
|
||||
this.v6l ^= swap8IfBE(pers[0]);
|
||||
this.v6h ^= swap8IfBE(pers[1]);
|
||||
this.v7l ^= swap8IfBE(pers[2]);
|
||||
this.v7h ^= swap8IfBE(pers[3]);
|
||||
}
|
||||
if (key !== undefined) {
|
||||
// Pad to blockLen and update
|
||||
const tmp = new Uint8Array(this.blockLen);
|
||||
tmp.set(key);
|
||||
this.update(tmp);
|
||||
}
|
||||
}
|
||||
// prettier-ignore
|
||||
protected get(): [
|
||||
number, number, number, number, number, number, number, number,
|
||||
number, number, number, number, number, number, number, number
|
||||
] {
|
||||
let { v0l, v0h, v1l, v1h, v2l, v2h, v3l, v3h, v4l, v4h, v5l, v5h, v6l, v6h, v7l, v7h } = this;
|
||||
return [v0l, v0h, v1l, v1h, v2l, v2h, v3l, v3h, v4l, v4h, v5l, v5h, v6l, v6h, v7l, v7h];
|
||||
}
|
||||
// prettier-ignore
|
||||
protected set(
|
||||
v0l: number, v0h: number, v1l: number, v1h: number,
|
||||
v2l: number, v2h: number, v3l: number, v3h: number,
|
||||
v4l: number, v4h: number, v5l: number, v5h: number,
|
||||
v6l: number, v6h: number, v7l: number, v7h: number
|
||||
): void {
|
||||
this.v0l = v0l | 0;
|
||||
this.v0h = v0h | 0;
|
||||
this.v1l = v1l | 0;
|
||||
this.v1h = v1h | 0;
|
||||
this.v2l = v2l | 0;
|
||||
this.v2h = v2h | 0;
|
||||
this.v3l = v3l | 0;
|
||||
this.v3h = v3h | 0;
|
||||
this.v4l = v4l | 0;
|
||||
this.v4h = v4h | 0;
|
||||
this.v5l = v5l | 0;
|
||||
this.v5h = v5h | 0;
|
||||
this.v6l = v6l | 0;
|
||||
this.v6h = v6h | 0;
|
||||
this.v7l = v7l | 0;
|
||||
this.v7h = v7h | 0;
|
||||
}
|
||||
protected compress(msg: Uint32Array, offset: number, isLast: boolean): void {
|
||||
this.get().forEach((v, i) => (BBUF[i] = v)); // First half from state.
|
||||
BBUF.set(B2B_IV, 16); // Second half from IV.
|
||||
let { h, l } = u64.fromBig(BigInt(this.length));
|
||||
BBUF[24] = B2B_IV[8] ^ l; // Low word of the offset.
|
||||
BBUF[25] = B2B_IV[9] ^ h; // High word.
|
||||
// Invert all bits for last block
|
||||
if (isLast) {
|
||||
BBUF[28] = ~BBUF[28];
|
||||
BBUF[29] = ~BBUF[29];
|
||||
}
|
||||
let j = 0;
|
||||
const s = BSIGMA;
|
||||
for (let i = 0; i < 12; i++) {
|
||||
G1b(0, 4, 8, 12, msg, offset + 2 * s[j++]);
|
||||
G2b(0, 4, 8, 12, msg, offset + 2 * s[j++]);
|
||||
G1b(1, 5, 9, 13, msg, offset + 2 * s[j++]);
|
||||
G2b(1, 5, 9, 13, msg, offset + 2 * s[j++]);
|
||||
G1b(2, 6, 10, 14, msg, offset + 2 * s[j++]);
|
||||
G2b(2, 6, 10, 14, msg, offset + 2 * s[j++]);
|
||||
G1b(3, 7, 11, 15, msg, offset + 2 * s[j++]);
|
||||
G2b(3, 7, 11, 15, msg, offset + 2 * s[j++]);
|
||||
|
||||
G1b(0, 5, 10, 15, msg, offset + 2 * s[j++]);
|
||||
G2b(0, 5, 10, 15, msg, offset + 2 * s[j++]);
|
||||
G1b(1, 6, 11, 12, msg, offset + 2 * s[j++]);
|
||||
G2b(1, 6, 11, 12, msg, offset + 2 * s[j++]);
|
||||
G1b(2, 7, 8, 13, msg, offset + 2 * s[j++]);
|
||||
G2b(2, 7, 8, 13, msg, offset + 2 * s[j++]);
|
||||
G1b(3, 4, 9, 14, msg, offset + 2 * s[j++]);
|
||||
G2b(3, 4, 9, 14, msg, offset + 2 * s[j++]);
|
||||
}
|
||||
this.v0l ^= BBUF[0] ^ BBUF[16];
|
||||
this.v0h ^= BBUF[1] ^ BBUF[17];
|
||||
this.v1l ^= BBUF[2] ^ BBUF[18];
|
||||
this.v1h ^= BBUF[3] ^ BBUF[19];
|
||||
this.v2l ^= BBUF[4] ^ BBUF[20];
|
||||
this.v2h ^= BBUF[5] ^ BBUF[21];
|
||||
this.v3l ^= BBUF[6] ^ BBUF[22];
|
||||
this.v3h ^= BBUF[7] ^ BBUF[23];
|
||||
this.v4l ^= BBUF[8] ^ BBUF[24];
|
||||
this.v4h ^= BBUF[9] ^ BBUF[25];
|
||||
this.v5l ^= BBUF[10] ^ BBUF[26];
|
||||
this.v5h ^= BBUF[11] ^ BBUF[27];
|
||||
this.v6l ^= BBUF[12] ^ BBUF[28];
|
||||
this.v6h ^= BBUF[13] ^ BBUF[29];
|
||||
this.v7l ^= BBUF[14] ^ BBUF[30];
|
||||
this.v7h ^= BBUF[15] ^ BBUF[31];
|
||||
clean(BBUF);
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
clean(this.buffer32);
|
||||
this.set(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Blake2b hash function. 64-bit. 1.5x slower than blake2s in JS.
|
||||
* @param msg - message that would be hashed
|
||||
* @param opts - dkLen output length, key for MAC mode, salt, personalization
|
||||
*/
|
||||
export const blake2b: CHashO = /* @__PURE__ */ createOptHasher<BLAKE2b, Blake2Opts>(
|
||||
(opts) => new BLAKE2b(opts)
|
||||
);
|
||||
|
||||
// =================
|
||||
// Blake2S
|
||||
// =================
|
||||
|
||||
// prettier-ignore
|
||||
export type Num16 = {
|
||||
v0: number; v1: number; v2: number; v3: number;
|
||||
v4: number; v5: number; v6: number; v7: number;
|
||||
v8: number; v9: number; v10: number; v11: number;
|
||||
v12: number; v13: number; v14: number; v15: number;
|
||||
};
|
||||
|
||||
// prettier-ignore
|
||||
export function compress(s: Uint8Array, offset: number, msg: Uint32Array, rounds: number,
|
||||
v0: number, v1: number, v2: number, v3: number, v4: number, v5: number, v6: number, v7: number,
|
||||
v8: number, v9: number, v10: number, v11: number, v12: number, v13: number, v14: number, v15: number,
|
||||
): Num16 {
|
||||
let j = 0;
|
||||
for (let i = 0; i < rounds; i++) {
|
||||
({ a: v0, b: v4, c: v8, d: v12 } = G1s(v0, v4, v8, v12, msg[offset + s[j++]]));
|
||||
({ a: v0, b: v4, c: v8, d: v12 } = G2s(v0, v4, v8, v12, msg[offset + s[j++]]));
|
||||
({ a: v1, b: v5, c: v9, d: v13 } = G1s(v1, v5, v9, v13, msg[offset + s[j++]]));
|
||||
({ a: v1, b: v5, c: v9, d: v13 } = G2s(v1, v5, v9, v13, msg[offset + s[j++]]));
|
||||
({ a: v2, b: v6, c: v10, d: v14 } = G1s(v2, v6, v10, v14, msg[offset + s[j++]]));
|
||||
({ a: v2, b: v6, c: v10, d: v14 } = G2s(v2, v6, v10, v14, msg[offset + s[j++]]));
|
||||
({ a: v3, b: v7, c: v11, d: v15 } = G1s(v3, v7, v11, v15, msg[offset + s[j++]]));
|
||||
({ a: v3, b: v7, c: v11, d: v15 } = G2s(v3, v7, v11, v15, msg[offset + s[j++]]));
|
||||
|
||||
({ a: v0, b: v5, c: v10, d: v15 } = G1s(v0, v5, v10, v15, msg[offset + s[j++]]));
|
||||
({ a: v0, b: v5, c: v10, d: v15 } = G2s(v0, v5, v10, v15, msg[offset + s[j++]]));
|
||||
({ a: v1, b: v6, c: v11, d: v12 } = G1s(v1, v6, v11, v12, msg[offset + s[j++]]));
|
||||
({ a: v1, b: v6, c: v11, d: v12 } = G2s(v1, v6, v11, v12, msg[offset + s[j++]]));
|
||||
({ a: v2, b: v7, c: v8, d: v13 } = G1s(v2, v7, v8, v13, msg[offset + s[j++]]));
|
||||
({ a: v2, b: v7, c: v8, d: v13 } = G2s(v2, v7, v8, v13, msg[offset + s[j++]]));
|
||||
({ a: v3, b: v4, c: v9, d: v14 } = G1s(v3, v4, v9, v14, msg[offset + s[j++]]));
|
||||
({ a: v3, b: v4, c: v9, d: v14 } = G2s(v3, v4, v9, v14, msg[offset + s[j++]]));
|
||||
}
|
||||
return { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 };
|
||||
}
|
||||
|
||||
const B2S_IV = SHA256_IV;
|
||||
export class BLAKE2s extends BLAKE2<BLAKE2s> {
|
||||
// Internal state, same as SHA-256
|
||||
private v0 = B2S_IV[0] | 0;
|
||||
private v1 = B2S_IV[1] | 0;
|
||||
private v2 = B2S_IV[2] | 0;
|
||||
private v3 = B2S_IV[3] | 0;
|
||||
private v4 = B2S_IV[4] | 0;
|
||||
private v5 = B2S_IV[5] | 0;
|
||||
private v6 = B2S_IV[6] | 0;
|
||||
private v7 = B2S_IV[7] | 0;
|
||||
|
||||
constructor(opts: Blake2Opts = {}) {
|
||||
const olen = opts.dkLen === undefined ? 32 : opts.dkLen;
|
||||
super(64, olen);
|
||||
checkBlake2Opts(olen, opts, 32, 8, 8);
|
||||
let { key, personalization, salt } = opts;
|
||||
let keyLength = 0;
|
||||
if (key !== undefined) {
|
||||
key = toBytes(key);
|
||||
keyLength = key.length;
|
||||
}
|
||||
this.v0 ^= this.outputLen | (keyLength << 8) | (0x01 << 16) | (0x01 << 24);
|
||||
if (salt !== undefined) {
|
||||
salt = toBytes(salt);
|
||||
const slt = u32(salt as Uint8Array);
|
||||
this.v4 ^= swap8IfBE(slt[0]);
|
||||
this.v5 ^= swap8IfBE(slt[1]);
|
||||
}
|
||||
if (personalization !== undefined) {
|
||||
personalization = toBytes(personalization);
|
||||
const pers = u32(personalization as Uint8Array);
|
||||
this.v6 ^= swap8IfBE(pers[0]);
|
||||
this.v7 ^= swap8IfBE(pers[1]);
|
||||
}
|
||||
if (key !== undefined) {
|
||||
// Pad to blockLen and update
|
||||
abytes(key);
|
||||
const tmp = new Uint8Array(this.blockLen);
|
||||
tmp.set(key);
|
||||
this.update(tmp);
|
||||
}
|
||||
}
|
||||
protected get(): [number, number, number, number, number, number, number, number] {
|
||||
const { v0, v1, v2, v3, v4, v5, v6, v7 } = this;
|
||||
return [v0, v1, v2, v3, v4, v5, v6, v7];
|
||||
}
|
||||
// prettier-ignore
|
||||
protected set(
|
||||
v0: number, v1: number, v2: number, v3: number, v4: number, v5: number, v6: number, v7: number
|
||||
): void {
|
||||
this.v0 = v0 | 0;
|
||||
this.v1 = v1 | 0;
|
||||
this.v2 = v2 | 0;
|
||||
this.v3 = v3 | 0;
|
||||
this.v4 = v4 | 0;
|
||||
this.v5 = v5 | 0;
|
||||
this.v6 = v6 | 0;
|
||||
this.v7 = v7 | 0;
|
||||
}
|
||||
protected compress(msg: Uint32Array, offset: number, isLast: boolean): void {
|
||||
const { h, l } = u64.fromBig(BigInt(this.length));
|
||||
// prettier-ignore
|
||||
const { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 } =
|
||||
compress(
|
||||
BSIGMA, offset, msg, 10,
|
||||
this.v0, this.v1, this.v2, this.v3, this.v4, this.v5, this.v6, this.v7,
|
||||
B2S_IV[0], B2S_IV[1], B2S_IV[2], B2S_IV[3], l ^ B2S_IV[4], h ^ B2S_IV[5], isLast ? ~B2S_IV[6] : B2S_IV[6], B2S_IV[7]
|
||||
);
|
||||
this.v0 ^= v0 ^ v8;
|
||||
this.v1 ^= v1 ^ v9;
|
||||
this.v2 ^= v2 ^ v10;
|
||||
this.v3 ^= v3 ^ v11;
|
||||
this.v4 ^= v4 ^ v12;
|
||||
this.v5 ^= v5 ^ v13;
|
||||
this.v6 ^= v6 ^ v14;
|
||||
this.v7 ^= v7 ^ v15;
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
clean(this.buffer32);
|
||||
this.set(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Blake2s hash function. Focuses on 8-bit to 32-bit platforms. 1.5x faster than blake2b in JS.
|
||||
* @param msg - message that would be hashed
|
||||
* @param opts - dkLen output length, key for MAC mode, salt, personalization
|
||||
*/
|
||||
export const blake2s: CHashO = /* @__PURE__ */ createOptHasher<BLAKE2s, Blake2Opts>(
|
||||
(opts) => new BLAKE2s(opts)
|
||||
);
|
||||
10
qwen/nodejs/node_modules/@noble/hashes/src/blake2b.ts
generated
vendored
Normal file
10
qwen/nodejs/node_modules/@noble/hashes/src/blake2b.ts
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
/**
|
||||
* Blake2b hash function. Focuses on 64-bit platforms, but in JS speed different from Blake2s is negligible.
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import { BLAKE2b as B2B, blake2b as b2b } from './blake2.ts';
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const BLAKE2b: typeof B2B = B2B;
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const blake2b: typeof b2b = b2b;
|
||||
20
qwen/nodejs/node_modules/@noble/hashes/src/blake2s.ts
generated
vendored
Normal file
20
qwen/nodejs/node_modules/@noble/hashes/src/blake2s.ts
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
/**
|
||||
* Blake2s hash function. Focuses on 8-bit to 32-bit platforms. blake2b for 64-bit, but in JS it is slower.
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import { G1s as G1s_n, G2s as G2s_n } from './_blake.ts';
|
||||
import { SHA256_IV } from './_md.ts';
|
||||
import { BLAKE2s as B2S, blake2s as b2s, compress as compress_n } from './blake2.ts';
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const B2S_IV: Uint32Array = SHA256_IV;
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const G1s: typeof G1s_n = G1s_n;
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const G2s: typeof G2s_n = G2s_n;
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const compress: typeof compress_n = compress_n;
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const BLAKE2s: typeof B2S = B2S;
|
||||
/** @deprecated Use import from `noble/hashes/blake2` module */
|
||||
export const blake2s: typeof b2s = b2s;
|
||||
272
qwen/nodejs/node_modules/@noble/hashes/src/blake3.ts
generated
vendored
Normal file
272
qwen/nodejs/node_modules/@noble/hashes/src/blake3.ts
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
/**
|
||||
* Blake3 fast hash is Blake2 with reduced security (round count). Can also be used as MAC & KDF.
|
||||
*
|
||||
* It is advertised as "the fastest cryptographic hash". However, it isn't true in JS.
|
||||
* Why is this so slow? While it should be 6x faster than blake2b, perf diff is only 20%:
|
||||
*
|
||||
* * There is only 30% reduction in number of rounds from blake2s
|
||||
* * Speed-up comes from tree structure, which is parallelized using SIMD & threading.
|
||||
* These features are not present in JS, so we only get overhead from trees.
|
||||
* * Parallelization only happens on 1024-byte chunks: there is no benefit for small inputs.
|
||||
* * It is still possible to make it faster using: a) loop unrolling b) web workers c) wasm
|
||||
* @module
|
||||
*/
|
||||
import { SHA256_IV } from './_md.ts';
|
||||
import { fromBig } from './_u64.ts';
|
||||
import { BLAKE2, compress } from './blake2.ts';
|
||||
// prettier-ignore
|
||||
import {
|
||||
abytes, aexists, anumber, aoutput,
|
||||
clean, createXOFer, swap32IfBE, toBytes, u32, u8,
|
||||
type CHashXO, type HashXOF, type Input
|
||||
} from './utils.ts';
|
||||
|
||||
// Flag bitset
|
||||
const B3_Flags = {
|
||||
CHUNK_START: 0b1,
|
||||
CHUNK_END: 0b10,
|
||||
PARENT: 0b100,
|
||||
ROOT: 0b1000,
|
||||
KEYED_HASH: 0b10000,
|
||||
DERIVE_KEY_CONTEXT: 0b100000,
|
||||
DERIVE_KEY_MATERIAL: 0b1000000,
|
||||
} as const;
|
||||
|
||||
const B3_IV = SHA256_IV.slice();
|
||||
|
||||
const B3_SIGMA: Uint8Array = /* @__PURE__ */ (() => {
|
||||
const Id = Array.from({ length: 16 }, (_, i) => i);
|
||||
const permute = (arr: number[]) =>
|
||||
[2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8].map((i) => arr[i]);
|
||||
const res: number[] = [];
|
||||
for (let i = 0, v = Id; i < 7; i++, v = permute(v)) res.push(...v);
|
||||
return Uint8Array.from(res);
|
||||
})();
|
||||
|
||||
/**
|
||||
* Ensure to use EITHER `key` OR `context`, not both.
|
||||
*
|
||||
* * `key`: 32-byte MAC key.
|
||||
* * `context`: string for KDF. Should be hardcoded, globally unique, and application - specific.
|
||||
* A good default format for the context string is "[application] [commit timestamp] [purpose]".
|
||||
*/
|
||||
export type Blake3Opts = { dkLen?: number; key?: Input; context?: Input };
|
||||
|
||||
/** Blake3 hash. Can be used as MAC and KDF. */
|
||||
export class BLAKE3 extends BLAKE2<BLAKE3> implements HashXOF<BLAKE3> {
|
||||
private chunkPos = 0; // Position of current block in chunk
|
||||
private chunksDone = 0; // How many chunks we already have
|
||||
private flags = 0 | 0;
|
||||
private IV: Uint32Array;
|
||||
private state: Uint32Array;
|
||||
private stack: Uint32Array[] = [];
|
||||
// Output
|
||||
private posOut = 0;
|
||||
private bufferOut32 = new Uint32Array(16);
|
||||
private bufferOut: Uint8Array;
|
||||
private chunkOut = 0; // index of output chunk
|
||||
private enableXOF = true;
|
||||
|
||||
constructor(opts: Blake3Opts = {}, flags = 0) {
|
||||
super(64, opts.dkLen === undefined ? 32 : opts.dkLen);
|
||||
const { key, context } = opts;
|
||||
const hasContext = context !== undefined;
|
||||
if (key !== undefined) {
|
||||
if (hasContext) throw new Error('Only "key" or "context" can be specified at same time');
|
||||
const k = toBytes(key).slice();
|
||||
abytes(k, 32);
|
||||
this.IV = u32(k);
|
||||
swap32IfBE(this.IV);
|
||||
this.flags = flags | B3_Flags.KEYED_HASH;
|
||||
} else if (hasContext) {
|
||||
const ctx = toBytes(context);
|
||||
const contextKey = new BLAKE3({ dkLen: 32 }, B3_Flags.DERIVE_KEY_CONTEXT)
|
||||
.update(ctx)
|
||||
.digest();
|
||||
this.IV = u32(contextKey);
|
||||
swap32IfBE(this.IV);
|
||||
this.flags = flags | B3_Flags.DERIVE_KEY_MATERIAL;
|
||||
} else {
|
||||
this.IV = B3_IV.slice();
|
||||
this.flags = flags;
|
||||
}
|
||||
this.state = this.IV.slice();
|
||||
this.bufferOut = u8(this.bufferOut32);
|
||||
}
|
||||
// Unused
|
||||
protected get(): [] {
|
||||
return [];
|
||||
}
|
||||
protected set(): void {}
|
||||
private b2Compress(counter: number, flags: number, buf: Uint32Array, bufPos: number = 0) {
|
||||
const { state: s, pos } = this;
|
||||
const { h, l } = fromBig(BigInt(counter), true);
|
||||
// prettier-ignore
|
||||
const { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 } =
|
||||
compress(
|
||||
B3_SIGMA, bufPos, buf, 7,
|
||||
s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
|
||||
B3_IV[0], B3_IV[1], B3_IV[2], B3_IV[3], h, l, pos, flags
|
||||
);
|
||||
s[0] = v0 ^ v8;
|
||||
s[1] = v1 ^ v9;
|
||||
s[2] = v2 ^ v10;
|
||||
s[3] = v3 ^ v11;
|
||||
s[4] = v4 ^ v12;
|
||||
s[5] = v5 ^ v13;
|
||||
s[6] = v6 ^ v14;
|
||||
s[7] = v7 ^ v15;
|
||||
}
|
||||
protected compress(buf: Uint32Array, bufPos: number = 0, isLast: boolean = false): void {
|
||||
// Compress last block
|
||||
let flags = this.flags;
|
||||
if (!this.chunkPos) flags |= B3_Flags.CHUNK_START;
|
||||
if (this.chunkPos === 15 || isLast) flags |= B3_Flags.CHUNK_END;
|
||||
if (!isLast) this.pos = this.blockLen;
|
||||
this.b2Compress(this.chunksDone, flags, buf, bufPos);
|
||||
this.chunkPos += 1;
|
||||
// If current block is last in chunk (16 blocks), then compress chunks
|
||||
if (this.chunkPos === 16 || isLast) {
|
||||
let chunk = this.state;
|
||||
this.state = this.IV.slice();
|
||||
// If not the last one, compress only when there are trailing zeros in chunk counter
|
||||
// chunks used as binary tree where current stack is path. Zero means current leaf is finished and can be compressed.
|
||||
// 1 (001) - leaf not finished (just push current chunk to stack)
|
||||
// 2 (010) - leaf finished at depth=1 (merge with last elm on stack and push back)
|
||||
// 3 (011) - last leaf not finished
|
||||
// 4 (100) - leafs finished at depth=1 and depth=2
|
||||
for (let last, chunks = this.chunksDone + 1; isLast || !(chunks & 1); chunks >>= 1) {
|
||||
if (!(last = this.stack.pop())) break;
|
||||
this.buffer32.set(last, 0);
|
||||
this.buffer32.set(chunk, 8);
|
||||
this.pos = this.blockLen;
|
||||
this.b2Compress(0, this.flags | B3_Flags.PARENT, this.buffer32, 0);
|
||||
chunk = this.state;
|
||||
this.state = this.IV.slice();
|
||||
}
|
||||
this.chunksDone++;
|
||||
this.chunkPos = 0;
|
||||
this.stack.push(chunk);
|
||||
}
|
||||
this.pos = 0;
|
||||
}
|
||||
_cloneInto(to?: BLAKE3): BLAKE3 {
|
||||
to = super._cloneInto(to) as BLAKE3;
|
||||
const { IV, flags, state, chunkPos, posOut, chunkOut, stack, chunksDone } = this;
|
||||
to.state.set(state.slice());
|
||||
to.stack = stack.map((i) => Uint32Array.from(i));
|
||||
to.IV.set(IV);
|
||||
to.flags = flags;
|
||||
to.chunkPos = chunkPos;
|
||||
to.chunksDone = chunksDone;
|
||||
to.posOut = posOut;
|
||||
to.chunkOut = chunkOut;
|
||||
to.enableXOF = this.enableXOF;
|
||||
to.bufferOut32.set(this.bufferOut32);
|
||||
return to;
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
clean(this.state, this.buffer32, this.IV, this.bufferOut32);
|
||||
clean(...this.stack);
|
||||
}
|
||||
// Same as b2Compress, but doesn't modify state and returns 16 u32 array (instead of 8)
|
||||
private b2CompressOut() {
|
||||
const { state: s, pos, flags, buffer32, bufferOut32: out32 } = this;
|
||||
const { h, l } = fromBig(BigInt(this.chunkOut++));
|
||||
swap32IfBE(buffer32);
|
||||
// prettier-ignore
|
||||
const { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 } =
|
||||
compress(
|
||||
B3_SIGMA, 0, buffer32, 7,
|
||||
s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
|
||||
B3_IV[0], B3_IV[1], B3_IV[2], B3_IV[3], l, h, pos, flags
|
||||
);
|
||||
out32[0] = v0 ^ v8;
|
||||
out32[1] = v1 ^ v9;
|
||||
out32[2] = v2 ^ v10;
|
||||
out32[3] = v3 ^ v11;
|
||||
out32[4] = v4 ^ v12;
|
||||
out32[5] = v5 ^ v13;
|
||||
out32[6] = v6 ^ v14;
|
||||
out32[7] = v7 ^ v15;
|
||||
out32[8] = s[0] ^ v8;
|
||||
out32[9] = s[1] ^ v9;
|
||||
out32[10] = s[2] ^ v10;
|
||||
out32[11] = s[3] ^ v11;
|
||||
out32[12] = s[4] ^ v12;
|
||||
out32[13] = s[5] ^ v13;
|
||||
out32[14] = s[6] ^ v14;
|
||||
out32[15] = s[7] ^ v15;
|
||||
swap32IfBE(buffer32);
|
||||
swap32IfBE(out32);
|
||||
this.posOut = 0;
|
||||
}
|
||||
protected finish(): void {
|
||||
if (this.finished) return;
|
||||
this.finished = true;
|
||||
// Padding
|
||||
clean(this.buffer.subarray(this.pos));
|
||||
// Process last chunk
|
||||
let flags = this.flags | B3_Flags.ROOT;
|
||||
if (this.stack.length) {
|
||||
flags |= B3_Flags.PARENT;
|
||||
swap32IfBE(this.buffer32);
|
||||
this.compress(this.buffer32, 0, true);
|
||||
swap32IfBE(this.buffer32);
|
||||
this.chunksDone = 0;
|
||||
this.pos = this.blockLen;
|
||||
} else {
|
||||
flags |= (!this.chunkPos ? B3_Flags.CHUNK_START : 0) | B3_Flags.CHUNK_END;
|
||||
}
|
||||
this.flags = flags;
|
||||
this.b2CompressOut();
|
||||
}
|
||||
private writeInto(out: Uint8Array) {
|
||||
aexists(this, false);
|
||||
abytes(out);
|
||||
this.finish();
|
||||
const { blockLen, bufferOut } = this;
|
||||
for (let pos = 0, len = out.length; pos < len; ) {
|
||||
if (this.posOut >= blockLen) this.b2CompressOut();
|
||||
const take = Math.min(blockLen - this.posOut, len - pos);
|
||||
out.set(bufferOut.subarray(this.posOut, this.posOut + take), pos);
|
||||
this.posOut += take;
|
||||
pos += take;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
xofInto(out: Uint8Array): Uint8Array {
|
||||
if (!this.enableXOF) throw new Error('XOF is not possible after digest call');
|
||||
return this.writeInto(out);
|
||||
}
|
||||
xof(bytes: number): Uint8Array {
|
||||
anumber(bytes);
|
||||
return this.xofInto(new Uint8Array(bytes));
|
||||
}
|
||||
digestInto(out: Uint8Array): Uint8Array {
|
||||
aoutput(out, this);
|
||||
if (this.finished) throw new Error('digest() was already called');
|
||||
this.enableXOF = false;
|
||||
this.writeInto(out);
|
||||
this.destroy();
|
||||
return out;
|
||||
}
|
||||
digest(): Uint8Array {
|
||||
return this.digestInto(new Uint8Array(this.outputLen));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* BLAKE3 hash function. Can be used as MAC and KDF.
|
||||
* @param msg - message that would be hashed
|
||||
* @param opts - `dkLen` for output length, `key` for MAC mode, `context` for KDF mode
|
||||
* @example
|
||||
* const data = new Uint8Array(32);
|
||||
* const hash = blake3(data);
|
||||
* const mac = blake3(data, { key: new Uint8Array(32) });
|
||||
* const kdf = blake3(data, { context: 'application name' });
|
||||
*/
|
||||
export const blake3: CHashXO = /* @__PURE__ */ createXOFer<BLAKE3, Blake3Opts>(
|
||||
(opts) => new BLAKE3(opts)
|
||||
);
|
||||
9
qwen/nodejs/node_modules/@noble/hashes/src/crypto.ts
generated
vendored
Normal file
9
qwen/nodejs/node_modules/@noble/hashes/src/crypto.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
/**
|
||||
* Internal webcrypto alias.
|
||||
* We use WebCrypto aka globalThis.crypto, which exists in browsers and node.js 16+.
|
||||
* See utils.ts for details.
|
||||
* @module
|
||||
*/
|
||||
declare const globalThis: Record<string, any> | undefined;
|
||||
export const crypto: any =
|
||||
typeof globalThis === 'object' && 'crypto' in globalThis ? globalThis.crypto : undefined;
|
||||
15
qwen/nodejs/node_modules/@noble/hashes/src/cryptoNode.ts
generated
vendored
Normal file
15
qwen/nodejs/node_modules/@noble/hashes/src/cryptoNode.ts
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* Internal webcrypto alias.
|
||||
* We prefer WebCrypto aka globalThis.crypto, which exists in node.js 16+.
|
||||
* Falls back to Node.js built-in crypto for Node.js <=v14.
|
||||
* See utils.ts for details.
|
||||
* @module
|
||||
*/
|
||||
// @ts-ignore
|
||||
import * as nc from 'node:crypto';
|
||||
export const crypto: any =
|
||||
nc && typeof nc === 'object' && 'webcrypto' in nc
|
||||
? (nc.webcrypto as any)
|
||||
: nc && typeof nc === 'object' && 'randomBytes' in nc
|
||||
? nc
|
||||
: undefined;
|
||||
187
qwen/nodejs/node_modules/@noble/hashes/src/eskdf.ts
generated
vendored
Normal file
187
qwen/nodejs/node_modules/@noble/hashes/src/eskdf.ts
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
/**
|
||||
* Experimental KDF for AES.
|
||||
*/
|
||||
import { hkdf } from './hkdf.ts';
|
||||
import { pbkdf2 as _pbkdf2 } from './pbkdf2.ts';
|
||||
import { scrypt as _scrypt } from './scrypt.ts';
|
||||
import { sha256 } from './sha256.ts';
|
||||
import { abytes, bytesToHex, clean, createView, hexToBytes, kdfInputToBytes } from './utils.ts';
|
||||
|
||||
// A tiny KDF for various applications like AES key-gen.
|
||||
// Uses HKDF in a non-standard way, so it's not "KDF-secure", only "PRF-secure".
|
||||
// Which is good enough: assume sha2-256 retained preimage resistance.
|
||||
|
||||
const SCRYPT_FACTOR = 2 ** 19;
|
||||
const PBKDF2_FACTOR = 2 ** 17;
|
||||
|
||||
// Scrypt KDF
|
||||
export function scrypt(password: string, salt: string): Uint8Array {
|
||||
return _scrypt(password, salt, { N: SCRYPT_FACTOR, r: 8, p: 1, dkLen: 32 });
|
||||
}
|
||||
|
||||
// PBKDF2-HMAC-SHA256
|
||||
export function pbkdf2(password: string, salt: string): Uint8Array {
|
||||
return _pbkdf2(sha256, password, salt, { c: PBKDF2_FACTOR, dkLen: 32 });
|
||||
}
|
||||
|
||||
// Combines two 32-byte byte arrays
|
||||
function xor32(a: Uint8Array, b: Uint8Array): Uint8Array {
|
||||
abytes(a, 32);
|
||||
abytes(b, 32);
|
||||
const arr = new Uint8Array(32);
|
||||
for (let i = 0; i < 32; i++) {
|
||||
arr[i] = a[i] ^ b[i];
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
function strHasLength(str: string, min: number, max: number): boolean {
|
||||
return typeof str === 'string' && str.length >= min && str.length <= max;
|
||||
}
|
||||
|
||||
/**
|
||||
* Derives main seed. Takes a lot of time. Prefer `eskdf` method instead.
|
||||
*/
|
||||
export function deriveMainSeed(username: string, password: string): Uint8Array {
|
||||
if (!strHasLength(username, 8, 255)) throw new Error('invalid username');
|
||||
if (!strHasLength(password, 8, 255)) throw new Error('invalid password');
|
||||
// Declared like this to throw off minifiers which auto-convert .fromCharCode(1) to actual string.
|
||||
// String with non-ascii may be problematic in some envs
|
||||
const codes = { _1: 1, _2: 2 };
|
||||
const sep = { s: String.fromCharCode(codes._1), p: String.fromCharCode(codes._2) };
|
||||
const scr = scrypt(password + sep.s, username + sep.s);
|
||||
const pbk = pbkdf2(password + sep.p, username + sep.p);
|
||||
const res = xor32(scr, pbk);
|
||||
clean(scr, pbk);
|
||||
return res;
|
||||
}
|
||||
|
||||
type AccountID = number | string;
|
||||
|
||||
/**
|
||||
* Converts protocol & accountId pair to HKDF salt & info params.
|
||||
*/
|
||||
function getSaltInfo(protocol: string, accountId: AccountID = 0) {
|
||||
// Note that length here also repeats two lines below
|
||||
// We do an additional length check here to reduce the scope of DoS attacks
|
||||
if (!(strHasLength(protocol, 3, 15) && /^[a-z0-9]{3,15}$/.test(protocol))) {
|
||||
throw new Error('invalid protocol');
|
||||
}
|
||||
|
||||
// Allow string account ids for some protocols
|
||||
const allowsStr = /^password\d{0,3}|ssh|tor|file$/.test(protocol);
|
||||
let salt: Uint8Array; // Extract salt. Default is undefined.
|
||||
if (typeof accountId === 'string') {
|
||||
if (!allowsStr) throw new Error('accountId must be a number');
|
||||
if (!strHasLength(accountId, 1, 255))
|
||||
throw new Error('accountId must be string of length 1..255');
|
||||
salt = kdfInputToBytes(accountId);
|
||||
} else if (Number.isSafeInteger(accountId)) {
|
||||
if (accountId < 0 || accountId > Math.pow(2, 32) - 1) throw new Error('invalid accountId');
|
||||
// Convert to Big Endian Uint32
|
||||
salt = new Uint8Array(4);
|
||||
createView(salt).setUint32(0, accountId, false);
|
||||
} else {
|
||||
throw new Error('accountId must be a number' + (allowsStr ? ' or string' : ''));
|
||||
}
|
||||
const info = kdfInputToBytes(protocol);
|
||||
return { salt, info };
|
||||
}
|
||||
|
||||
type OptsLength = { keyLength: number };
|
||||
type OptsMod = { modulus: bigint };
|
||||
type KeyOpts = undefined | OptsLength | OptsMod;
|
||||
|
||||
function countBytes(num: bigint): number {
|
||||
if (typeof num !== 'bigint' || num <= BigInt(128)) throw new Error('invalid number');
|
||||
return Math.ceil(num.toString(2).length / 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses keyLength and modulus options to extract length of result key.
|
||||
* If modulus is used, adds 64 bits to it as per FIPS 186 B.4.1 to combat modulo bias.
|
||||
*/
|
||||
function getKeyLength(options: KeyOpts): number {
|
||||
if (!options || typeof options !== 'object') return 32;
|
||||
const hasLen = 'keyLength' in options;
|
||||
const hasMod = 'modulus' in options;
|
||||
if (hasLen && hasMod) throw new Error('cannot combine keyLength and modulus options');
|
||||
if (!hasLen && !hasMod) throw new Error('must have either keyLength or modulus option');
|
||||
// FIPS 186 B.4.1 requires at least 64 more bits
|
||||
const l = hasMod ? countBytes(options.modulus) + 8 : options.keyLength;
|
||||
if (!(typeof l === 'number' && l >= 16 && l <= 8192)) throw new Error('invalid keyLength');
|
||||
return l;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts key to bigint and divides it by modulus. Big Endian.
|
||||
* Implements FIPS 186 B.4.1, which removes 0 and modulo bias from output.
|
||||
*/
|
||||
function modReduceKey(key: Uint8Array, modulus: bigint): Uint8Array {
|
||||
const _1 = BigInt(1);
|
||||
const num = BigInt('0x' + bytesToHex(key)); // check for ui8a, then bytesToNumber()
|
||||
const res = (num % (modulus - _1)) + _1; // Remove 0 from output
|
||||
if (res < _1) throw new Error('expected positive number'); // Guard against bad values
|
||||
const len = key.length - 8; // FIPS requires 64 more bits = 8 bytes
|
||||
const hex = res.toString(16).padStart(len * 2, '0'); // numberToHex()
|
||||
const bytes = hexToBytes(hex);
|
||||
if (bytes.length !== len) throw new Error('invalid length of result key');
|
||||
return bytes;
|
||||
}
|
||||
|
||||
// We are not using classes because constructor cannot be async
|
||||
export interface ESKDF {
|
||||
/**
|
||||
* Derives a child key. Child key will not be associated with any
|
||||
* other child key because of properties of underlying KDF.
|
||||
*
|
||||
* @param protocol - 3-15 character protocol name
|
||||
* @param accountId - numeric identifier of account
|
||||
* @param options - `keyLength: 64` or `modulus: 41920438n`
|
||||
* @example deriveChildKey('aes', 0)
|
||||
*/
|
||||
deriveChildKey: (protocol: string, accountId: AccountID, options?: KeyOpts) => Uint8Array;
|
||||
/**
|
||||
* Deletes the main seed from eskdf instance
|
||||
*/
|
||||
expire: () => void;
|
||||
/**
|
||||
* Account fingerprint
|
||||
*/
|
||||
fingerprint: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* ESKDF
|
||||
* @param username - username, email, or identifier, min: 8 characters, should have enough entropy
|
||||
* @param password - password, min: 8 characters, should have enough entropy
|
||||
* @example
|
||||
* const kdf = await eskdf('example-university', 'beginning-new-example');
|
||||
* const key = kdf.deriveChildKey('aes', 0);
|
||||
* console.log(kdf.fingerprint);
|
||||
* kdf.expire();
|
||||
*/
|
||||
export async function eskdf(username: string, password: string): Promise<ESKDF> {
|
||||
// We are using closure + object instead of class because
|
||||
// we want to make `seed` non-accessible for any external function.
|
||||
let seed: Uint8Array | undefined = deriveMainSeed(username, password);
|
||||
|
||||
function deriveCK(protocol: string, accountId: AccountID = 0, options?: KeyOpts): Uint8Array {
|
||||
abytes(seed, 32);
|
||||
const { salt, info } = getSaltInfo(protocol, accountId); // validate protocol & accountId
|
||||
const keyLength = getKeyLength(options); // validate options
|
||||
const key = hkdf(sha256, seed!, salt, info, keyLength);
|
||||
// Modulus has already been validated
|
||||
return options && 'modulus' in options ? modReduceKey(key, options.modulus) : key;
|
||||
}
|
||||
function expire() {
|
||||
if (seed) seed.fill(1);
|
||||
seed = undefined;
|
||||
}
|
||||
// prettier-ignore
|
||||
const fingerprint = Array.from(deriveCK('fingerprint', 0))
|
||||
.slice(0, 6)
|
||||
.map((char) => char.toString(16).padStart(2, '0').toUpperCase())
|
||||
.join(':');
|
||||
return Object.freeze({ deriveChildKey: deriveCK, expire, fingerprint });
|
||||
}
|
||||
88
qwen/nodejs/node_modules/@noble/hashes/src/hkdf.ts
generated
vendored
Normal file
88
qwen/nodejs/node_modules/@noble/hashes/src/hkdf.ts
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/**
|
||||
* HKDF (RFC 5869): extract + expand in one step.
|
||||
* See https://soatok.blog/2021/11/17/understanding-hkdf/.
|
||||
* @module
|
||||
*/
|
||||
import { hmac } from './hmac.ts';
|
||||
import { ahash, anumber, type CHash, clean, type Input, toBytes } from './utils.ts';
|
||||
|
||||
/**
|
||||
* HKDF-extract from spec. Less important part. `HKDF-Extract(IKM, salt) -> PRK`
|
||||
* Arguments position differs from spec (IKM is first one, since it is not optional)
|
||||
* @param hash - hash function that would be used (e.g. sha256)
|
||||
* @param ikm - input keying material, the initial key
|
||||
* @param salt - optional salt value (a non-secret random value)
|
||||
*/
|
||||
export function extract(hash: CHash, ikm: Input, salt?: Input): Uint8Array {
|
||||
ahash(hash);
|
||||
// NOTE: some libraries treat zero-length array as 'not provided';
|
||||
// we don't, since we have undefined as 'not provided'
|
||||
// https://github.com/RustCrypto/KDFs/issues/15
|
||||
if (salt === undefined) salt = new Uint8Array(hash.outputLen);
|
||||
return hmac(hash, toBytes(salt), toBytes(ikm));
|
||||
}
|
||||
|
||||
const HKDF_COUNTER = /* @__PURE__ */ Uint8Array.from([0]);
|
||||
const EMPTY_BUFFER = /* @__PURE__ */ Uint8Array.of();
|
||||
|
||||
/**
|
||||
* HKDF-expand from the spec. The most important part. `HKDF-Expand(PRK, info, L) -> OKM`
|
||||
* @param hash - hash function that would be used (e.g. sha256)
|
||||
* @param prk - a pseudorandom key of at least HashLen octets (usually, the output from the extract step)
|
||||
* @param info - optional context and application specific information (can be a zero-length string)
|
||||
* @param length - length of output keying material in bytes
|
||||
*/
|
||||
export function expand(hash: CHash, prk: Input, info?: Input, length: number = 32): Uint8Array {
|
||||
ahash(hash);
|
||||
anumber(length);
|
||||
const olen = hash.outputLen;
|
||||
if (length > 255 * olen) throw new Error('Length should be <= 255*HashLen');
|
||||
const blocks = Math.ceil(length / olen);
|
||||
if (info === undefined) info = EMPTY_BUFFER;
|
||||
// first L(ength) octets of T
|
||||
const okm = new Uint8Array(blocks * olen);
|
||||
// Re-use HMAC instance between blocks
|
||||
const HMAC = hmac.create(hash, prk);
|
||||
const HMACTmp = HMAC._cloneInto();
|
||||
const T = new Uint8Array(HMAC.outputLen);
|
||||
for (let counter = 0; counter < blocks; counter++) {
|
||||
HKDF_COUNTER[0] = counter + 1;
|
||||
// T(0) = empty string (zero length)
|
||||
// T(N) = HMAC-Hash(PRK, T(N-1) | info | N)
|
||||
HMACTmp.update(counter === 0 ? EMPTY_BUFFER : T)
|
||||
.update(info)
|
||||
.update(HKDF_COUNTER)
|
||||
.digestInto(T);
|
||||
okm.set(T, olen * counter);
|
||||
HMAC._cloneInto(HMACTmp);
|
||||
}
|
||||
HMAC.destroy();
|
||||
HMACTmp.destroy();
|
||||
clean(T, HKDF_COUNTER);
|
||||
return okm.slice(0, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* HKDF (RFC 5869): derive keys from an initial input.
|
||||
* Combines hkdf_extract + hkdf_expand in one step
|
||||
* @param hash - hash function that would be used (e.g. sha256)
|
||||
* @param ikm - input keying material, the initial key
|
||||
* @param salt - optional salt value (a non-secret random value)
|
||||
* @param info - optional context and application specific information (can be a zero-length string)
|
||||
* @param length - length of output keying material in bytes
|
||||
* @example
|
||||
* import { hkdf } from '@noble/hashes/hkdf';
|
||||
* import { sha256 } from '@noble/hashes/sha2';
|
||||
* import { randomBytes } from '@noble/hashes/utils';
|
||||
* const inputKey = randomBytes(32);
|
||||
* const salt = randomBytes(32);
|
||||
* const info = 'application-key';
|
||||
* const hk1 = hkdf(sha256, inputKey, salt, info, 32);
|
||||
*/
|
||||
export const hkdf = (
|
||||
hash: CHash,
|
||||
ikm: Input,
|
||||
salt: Input | undefined,
|
||||
info: Input | undefined,
|
||||
length: number
|
||||
): Uint8Array => expand(hash, extract(hash, ikm, salt), info, length);
|
||||
94
qwen/nodejs/node_modules/@noble/hashes/src/hmac.ts
generated
vendored
Normal file
94
qwen/nodejs/node_modules/@noble/hashes/src/hmac.ts
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
/**
|
||||
* HMAC: RFC2104 message authentication code.
|
||||
* @module
|
||||
*/
|
||||
import { abytes, aexists, ahash, clean, Hash, toBytes, type CHash, type Input } from './utils.ts';
|
||||
|
||||
export class HMAC<T extends Hash<T>> extends Hash<HMAC<T>> {
|
||||
oHash: T;
|
||||
iHash: T;
|
||||
blockLen: number;
|
||||
outputLen: number;
|
||||
private finished = false;
|
||||
private destroyed = false;
|
||||
|
||||
constructor(hash: CHash, _key: Input) {
|
||||
super();
|
||||
ahash(hash);
|
||||
const key = toBytes(_key);
|
||||
this.iHash = hash.create() as T;
|
||||
if (typeof this.iHash.update !== 'function')
|
||||
throw new Error('Expected instance of class which extends utils.Hash');
|
||||
this.blockLen = this.iHash.blockLen;
|
||||
this.outputLen = this.iHash.outputLen;
|
||||
const blockLen = this.blockLen;
|
||||
const pad = new Uint8Array(blockLen);
|
||||
// blockLen can be bigger than outputLen
|
||||
pad.set(key.length > blockLen ? hash.create().update(key).digest() : key);
|
||||
for (let i = 0; i < pad.length; i++) pad[i] ^= 0x36;
|
||||
this.iHash.update(pad);
|
||||
// By doing update (processing of first block) of outer hash here we can re-use it between multiple calls via clone
|
||||
this.oHash = hash.create() as T;
|
||||
// Undo internal XOR && apply outer XOR
|
||||
for (let i = 0; i < pad.length; i++) pad[i] ^= 0x36 ^ 0x5c;
|
||||
this.oHash.update(pad);
|
||||
clean(pad);
|
||||
}
|
||||
update(buf: Input): this {
|
||||
aexists(this);
|
||||
this.iHash.update(buf);
|
||||
return this;
|
||||
}
|
||||
digestInto(out: Uint8Array): void {
|
||||
aexists(this);
|
||||
abytes(out, this.outputLen);
|
||||
this.finished = true;
|
||||
this.iHash.digestInto(out);
|
||||
this.oHash.update(out);
|
||||
this.oHash.digestInto(out);
|
||||
this.destroy();
|
||||
}
|
||||
digest(): Uint8Array {
|
||||
const out = new Uint8Array(this.oHash.outputLen);
|
||||
this.digestInto(out);
|
||||
return out;
|
||||
}
|
||||
_cloneInto(to?: HMAC<T>): HMAC<T> {
|
||||
// Create new instance without calling constructor since key already in state and we don't know it.
|
||||
to ||= Object.create(Object.getPrototypeOf(this), {});
|
||||
const { oHash, iHash, finished, destroyed, blockLen, outputLen } = this;
|
||||
to = to as this;
|
||||
to.finished = finished;
|
||||
to.destroyed = destroyed;
|
||||
to.blockLen = blockLen;
|
||||
to.outputLen = outputLen;
|
||||
to.oHash = oHash._cloneInto(to.oHash);
|
||||
to.iHash = iHash._cloneInto(to.iHash);
|
||||
return to;
|
||||
}
|
||||
clone(): HMAC<T> {
|
||||
return this._cloneInto();
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
this.oHash.destroy();
|
||||
this.iHash.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* HMAC: RFC2104 message authentication code.
|
||||
* @param hash - function that would be used e.g. sha256
|
||||
* @param key - message key
|
||||
* @param message - message data
|
||||
* @example
|
||||
* import { hmac } from '@noble/hashes/hmac';
|
||||
* import { sha256 } from '@noble/hashes/sha2';
|
||||
* const mac1 = hmac(sha256, 'key', 'message');
|
||||
*/
|
||||
export const hmac: {
|
||||
(hash: CHash, key: Input, message: Input): Uint8Array;
|
||||
create(hash: CHash, key: Input): HMAC<any>;
|
||||
} = (hash: CHash, key: Input, message: Input): Uint8Array =>
|
||||
new HMAC<any>(hash, key).update(message).digest();
|
||||
hmac.create = (hash: CHash, key: Input) => new HMAC<any>(hash, key);
|
||||
31
qwen/nodejs/node_modules/@noble/hashes/src/index.ts
generated
vendored
Normal file
31
qwen/nodejs/node_modules/@noble/hashes/src/index.ts
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Audited & minimal JS implementation of hash functions, MACs and KDFs. Check out individual modules.
|
||||
* @module
|
||||
* @example
|
||||
```js
|
||||
import {
|
||||
sha256, sha384, sha512, sha224, sha512_224, sha512_256
|
||||
} from '@noble/hashes/sha2';
|
||||
import {
|
||||
sha3_224, sha3_256, sha3_384, sha3_512,
|
||||
keccak_224, keccak_256, keccak_384, keccak_512,
|
||||
shake128, shake256
|
||||
} from '@noble/hashes/sha3';
|
||||
import {
|
||||
cshake128, cshake256,
|
||||
turboshake128, turboshake256,
|
||||
kmac128, kmac256,
|
||||
tuplehash256, parallelhash256,
|
||||
k12, m14, keccakprg
|
||||
} from '@noble/hashes/sha3-addons';
|
||||
import { blake3 } from '@noble/hashes/blake3';
|
||||
import { blake2b, blake2s } from '@noble/hashes/blake2';
|
||||
import { hmac } from '@noble/hashes/hmac';
|
||||
import { hkdf } from '@noble/hashes/hkdf';
|
||||
import { pbkdf2, pbkdf2Async } from '@noble/hashes/pbkdf2';
|
||||
import { scrypt, scryptAsync } from '@noble/hashes/scrypt';
|
||||
import { md5, ripemd160, sha1 } from '@noble/hashes/legacy';
|
||||
import * as utils from '@noble/hashes/utils';
|
||||
```
|
||||
*/
|
||||
throw new Error('root module cannot be imported: import submodules instead. Check out README');
|
||||
293
qwen/nodejs/node_modules/@noble/hashes/src/legacy.ts
generated
vendored
Normal file
293
qwen/nodejs/node_modules/@noble/hashes/src/legacy.ts
generated
vendored
Normal file
@@ -0,0 +1,293 @@
|
||||
/**
|
||||
|
||||
SHA1 (RFC 3174), MD5 (RFC 1321) and RIPEMD160 (RFC 2286) legacy, weak hash functions.
|
||||
Don't use them in a new protocol. What "weak" means:
|
||||
|
||||
- Collisions can be made with 2^18 effort in MD5, 2^60 in SHA1, 2^80 in RIPEMD160.
|
||||
- No practical pre-image attacks (only theoretical, 2^123.4)
|
||||
- HMAC seems kinda ok: https://datatracker.ietf.org/doc/html/rfc6151
|
||||
* @module
|
||||
*/
|
||||
import { Chi, HashMD, Maj } from './_md.ts';
|
||||
import { type CHash, clean, createHasher, rotl } from './utils.ts';
|
||||
|
||||
/** Initial SHA1 state */
|
||||
const SHA1_IV = /* @__PURE__ */ Uint32Array.from([
|
||||
0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0,
|
||||
]);
|
||||
|
||||
// Reusable temporary buffer
|
||||
const SHA1_W = /* @__PURE__ */ new Uint32Array(80);
|
||||
|
||||
/** SHA1 legacy hash class. */
|
||||
export class SHA1 extends HashMD<SHA1> {
|
||||
private A = SHA1_IV[0] | 0;
|
||||
private B = SHA1_IV[1] | 0;
|
||||
private C = SHA1_IV[2] | 0;
|
||||
private D = SHA1_IV[3] | 0;
|
||||
private E = SHA1_IV[4] | 0;
|
||||
|
||||
constructor() {
|
||||
super(64, 20, 8, false);
|
||||
}
|
||||
protected get(): [number, number, number, number, number] {
|
||||
const { A, B, C, D, E } = this;
|
||||
return [A, B, C, D, E];
|
||||
}
|
||||
protected set(A: number, B: number, C: number, D: number, E: number): void {
|
||||
this.A = A | 0;
|
||||
this.B = B | 0;
|
||||
this.C = C | 0;
|
||||
this.D = D | 0;
|
||||
this.E = E | 0;
|
||||
}
|
||||
protected process(view: DataView, offset: number): void {
|
||||
for (let i = 0; i < 16; i++, offset += 4) SHA1_W[i] = view.getUint32(offset, false);
|
||||
for (let i = 16; i < 80; i++)
|
||||
SHA1_W[i] = rotl(SHA1_W[i - 3] ^ SHA1_W[i - 8] ^ SHA1_W[i - 14] ^ SHA1_W[i - 16], 1);
|
||||
// Compression function main loop, 80 rounds
|
||||
let { A, B, C, D, E } = this;
|
||||
for (let i = 0; i < 80; i++) {
|
||||
let F, K;
|
||||
if (i < 20) {
|
||||
F = Chi(B, C, D);
|
||||
K = 0x5a827999;
|
||||
} else if (i < 40) {
|
||||
F = B ^ C ^ D;
|
||||
K = 0x6ed9eba1;
|
||||
} else if (i < 60) {
|
||||
F = Maj(B, C, D);
|
||||
K = 0x8f1bbcdc;
|
||||
} else {
|
||||
F = B ^ C ^ D;
|
||||
K = 0xca62c1d6;
|
||||
}
|
||||
const T = (rotl(A, 5) + F + E + K + SHA1_W[i]) | 0;
|
||||
E = D;
|
||||
D = C;
|
||||
C = rotl(B, 30);
|
||||
B = A;
|
||||
A = T;
|
||||
}
|
||||
// Add the compressed chunk to the current hash value
|
||||
A = (A + this.A) | 0;
|
||||
B = (B + this.B) | 0;
|
||||
C = (C + this.C) | 0;
|
||||
D = (D + this.D) | 0;
|
||||
E = (E + this.E) | 0;
|
||||
this.set(A, B, C, D, E);
|
||||
}
|
||||
protected roundClean(): void {
|
||||
clean(SHA1_W);
|
||||
}
|
||||
destroy(): void {
|
||||
this.set(0, 0, 0, 0, 0);
|
||||
clean(this.buffer);
|
||||
}
|
||||
}
|
||||
|
||||
/** SHA1 (RFC 3174) legacy hash function. It was cryptographically broken. */
|
||||
export const sha1: CHash = /* @__PURE__ */ createHasher(() => new SHA1());
|
||||
|
||||
/** Per-round constants */
|
||||
const p32 = /* @__PURE__ */ Math.pow(2, 32);
|
||||
const K = /* @__PURE__ */ Array.from({ length: 64 }, (_, i) =>
|
||||
Math.floor(p32 * Math.abs(Math.sin(i + 1)))
|
||||
);
|
||||
|
||||
/** md5 initial state: same as sha1, but 4 u32 instead of 5. */
|
||||
const MD5_IV = /* @__PURE__ */ SHA1_IV.slice(0, 4);
|
||||
|
||||
// Reusable temporary buffer
|
||||
const MD5_W = /* @__PURE__ */ new Uint32Array(16);
|
||||
/** MD5 legacy hash class. */
|
||||
export class MD5 extends HashMD<MD5> {
|
||||
private A = MD5_IV[0] | 0;
|
||||
private B = MD5_IV[1] | 0;
|
||||
private C = MD5_IV[2] | 0;
|
||||
private D = MD5_IV[3] | 0;
|
||||
|
||||
constructor() {
|
||||
super(64, 16, 8, true);
|
||||
}
|
||||
protected get(): [number, number, number, number] {
|
||||
const { A, B, C, D } = this;
|
||||
return [A, B, C, D];
|
||||
}
|
||||
protected set(A: number, B: number, C: number, D: number): void {
|
||||
this.A = A | 0;
|
||||
this.B = B | 0;
|
||||
this.C = C | 0;
|
||||
this.D = D | 0;
|
||||
}
|
||||
protected process(view: DataView, offset: number): void {
|
||||
for (let i = 0; i < 16; i++, offset += 4) MD5_W[i] = view.getUint32(offset, true);
|
||||
// Compression function main loop, 64 rounds
|
||||
let { A, B, C, D } = this;
|
||||
for (let i = 0; i < 64; i++) {
|
||||
let F, g, s;
|
||||
if (i < 16) {
|
||||
F = Chi(B, C, D);
|
||||
g = i;
|
||||
s = [7, 12, 17, 22];
|
||||
} else if (i < 32) {
|
||||
F = Chi(D, B, C);
|
||||
g = (5 * i + 1) % 16;
|
||||
s = [5, 9, 14, 20];
|
||||
} else if (i < 48) {
|
||||
F = B ^ C ^ D;
|
||||
g = (3 * i + 5) % 16;
|
||||
s = [4, 11, 16, 23];
|
||||
} else {
|
||||
F = C ^ (B | ~D);
|
||||
g = (7 * i) % 16;
|
||||
s = [6, 10, 15, 21];
|
||||
}
|
||||
F = F + A + K[i] + MD5_W[g];
|
||||
A = D;
|
||||
D = C;
|
||||
C = B;
|
||||
B = B + rotl(F, s[i % 4]);
|
||||
}
|
||||
// Add the compressed chunk to the current hash value
|
||||
A = (A + this.A) | 0;
|
||||
B = (B + this.B) | 0;
|
||||
C = (C + this.C) | 0;
|
||||
D = (D + this.D) | 0;
|
||||
this.set(A, B, C, D);
|
||||
}
|
||||
protected roundClean(): void {
|
||||
clean(MD5_W);
|
||||
}
|
||||
destroy(): void {
|
||||
this.set(0, 0, 0, 0);
|
||||
clean(this.buffer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* MD5 (RFC 1321) legacy hash function. It was cryptographically broken.
|
||||
* MD5 architecture is similar to SHA1, with some differences:
|
||||
* - Reduced output length: 16 bytes (128 bit) instead of 20
|
||||
* - 64 rounds, instead of 80
|
||||
* - Little-endian: could be faster, but will require more code
|
||||
* - Non-linear index selection: huge speed-up for unroll
|
||||
* - Per round constants: more memory accesses, additional speed-up for unroll
|
||||
*/
|
||||
export const md5: CHash = /* @__PURE__ */ createHasher(() => new MD5());
|
||||
|
||||
// RIPEMD-160
|
||||
|
||||
const Rho160 = /* @__PURE__ */ Uint8Array.from([
|
||||
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
|
||||
]);
|
||||
const Id160 = /* @__PURE__ */ (() => Uint8Array.from(new Array(16).fill(0).map((_, i) => i)))();
|
||||
const Pi160 = /* @__PURE__ */ (() => Id160.map((i) => (9 * i + 5) % 16))();
|
||||
const idxLR = /* @__PURE__ */ (() => {
|
||||
const L = [Id160];
|
||||
const R = [Pi160];
|
||||
const res = [L, R];
|
||||
for (let i = 0; i < 4; i++) for (let j of res) j.push(j[i].map((k) => Rho160[k]));
|
||||
return res;
|
||||
})();
|
||||
const idxL = /* @__PURE__ */ (() => idxLR[0])();
|
||||
const idxR = /* @__PURE__ */ (() => idxLR[1])();
|
||||
// const [idxL, idxR] = idxLR;
|
||||
|
||||
const shifts160 = /* @__PURE__ */ [
|
||||
[11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8],
|
||||
[12, 13, 11, 15, 6, 9, 9, 7, 12, 15, 11, 13, 7, 8, 7, 7],
|
||||
[13, 15, 14, 11, 7, 7, 6, 8, 13, 14, 13, 12, 5, 5, 6, 9],
|
||||
[14, 11, 12, 14, 8, 6, 5, 5, 15, 12, 15, 14, 9, 9, 8, 6],
|
||||
[15, 12, 13, 13, 9, 5, 8, 6, 14, 11, 12, 11, 8, 6, 5, 5],
|
||||
].map((i) => Uint8Array.from(i));
|
||||
const shiftsL160 = /* @__PURE__ */ idxL.map((idx, i) => idx.map((j) => shifts160[i][j]));
|
||||
const shiftsR160 = /* @__PURE__ */ idxR.map((idx, i) => idx.map((j) => shifts160[i][j]));
|
||||
const Kl160 = /* @__PURE__ */ Uint32Array.from([
|
||||
0x00000000, 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xa953fd4e,
|
||||
]);
|
||||
const Kr160 = /* @__PURE__ */ Uint32Array.from([
|
||||
0x50a28be6, 0x5c4dd124, 0x6d703ef3, 0x7a6d76e9, 0x00000000,
|
||||
]);
|
||||
// It's called f() in spec.
|
||||
function ripemd_f(group: number, x: number, y: number, z: number): number {
|
||||
if (group === 0) return x ^ y ^ z;
|
||||
if (group === 1) return (x & y) | (~x & z);
|
||||
if (group === 2) return (x | ~y) ^ z;
|
||||
if (group === 3) return (x & z) | (y & ~z);
|
||||
return x ^ (y | ~z);
|
||||
}
|
||||
// Reusable temporary buffer
|
||||
const BUF_160 = /* @__PURE__ */ new Uint32Array(16);
|
||||
export class RIPEMD160 extends HashMD<RIPEMD160> {
|
||||
private h0 = 0x67452301 | 0;
|
||||
private h1 = 0xefcdab89 | 0;
|
||||
private h2 = 0x98badcfe | 0;
|
||||
private h3 = 0x10325476 | 0;
|
||||
private h4 = 0xc3d2e1f0 | 0;
|
||||
|
||||
constructor() {
|
||||
super(64, 20, 8, true);
|
||||
}
|
||||
protected get(): [number, number, number, number, number] {
|
||||
const { h0, h1, h2, h3, h4 } = this;
|
||||
return [h0, h1, h2, h3, h4];
|
||||
}
|
||||
protected set(h0: number, h1: number, h2: number, h3: number, h4: number): void {
|
||||
this.h0 = h0 | 0;
|
||||
this.h1 = h1 | 0;
|
||||
this.h2 = h2 | 0;
|
||||
this.h3 = h3 | 0;
|
||||
this.h4 = h4 | 0;
|
||||
}
|
||||
protected process(view: DataView, offset: number): void {
|
||||
for (let i = 0; i < 16; i++, offset += 4) BUF_160[i] = view.getUint32(offset, true);
|
||||
// prettier-ignore
|
||||
let al = this.h0 | 0, ar = al,
|
||||
bl = this.h1 | 0, br = bl,
|
||||
cl = this.h2 | 0, cr = cl,
|
||||
dl = this.h3 | 0, dr = dl,
|
||||
el = this.h4 | 0, er = el;
|
||||
|
||||
// Instead of iterating 0 to 80, we split it into 5 groups
|
||||
// And use the groups in constants, functions, etc. Much simpler
|
||||
for (let group = 0; group < 5; group++) {
|
||||
const rGroup = 4 - group;
|
||||
const hbl = Kl160[group], hbr = Kr160[group]; // prettier-ignore
|
||||
const rl = idxL[group], rr = idxR[group]; // prettier-ignore
|
||||
const sl = shiftsL160[group], sr = shiftsR160[group]; // prettier-ignore
|
||||
for (let i = 0; i < 16; i++) {
|
||||
const tl = (rotl(al + ripemd_f(group, bl, cl, dl) + BUF_160[rl[i]] + hbl, sl[i]) + el) | 0;
|
||||
al = el, el = dl, dl = rotl(cl, 10) | 0, cl = bl, bl = tl; // prettier-ignore
|
||||
}
|
||||
// 2 loops are 10% faster
|
||||
for (let i = 0; i < 16; i++) {
|
||||
const tr = (rotl(ar + ripemd_f(rGroup, br, cr, dr) + BUF_160[rr[i]] + hbr, sr[i]) + er) | 0;
|
||||
ar = er, er = dr, dr = rotl(cr, 10) | 0, cr = br, br = tr; // prettier-ignore
|
||||
}
|
||||
}
|
||||
// Add the compressed chunk to the current hash value
|
||||
this.set(
|
||||
(this.h1 + cl + dr) | 0,
|
||||
(this.h2 + dl + er) | 0,
|
||||
(this.h3 + el + ar) | 0,
|
||||
(this.h4 + al + br) | 0,
|
||||
(this.h0 + bl + cr) | 0
|
||||
);
|
||||
}
|
||||
protected roundClean(): void {
|
||||
clean(BUF_160);
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
clean(this.buffer);
|
||||
this.set(0, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* RIPEMD-160 - a legacy hash function from 1990s.
|
||||
* * https://homes.esat.kuleuven.be/~bosselae/ripemd160.html
|
||||
* * https://homes.esat.kuleuven.be/~bosselae/ripemd160/pdf/AB-9601/AB-9601.pdf
|
||||
*/
|
||||
export const ripemd160: CHash = /* @__PURE__ */ createHasher(() => new RIPEMD160());
|
||||
122
qwen/nodejs/node_modules/@noble/hashes/src/pbkdf2.ts
generated
vendored
Normal file
122
qwen/nodejs/node_modules/@noble/hashes/src/pbkdf2.ts
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* PBKDF (RFC 2898). Can be used to create a key from password and salt.
|
||||
* @module
|
||||
*/
|
||||
import { hmac } from './hmac.ts';
|
||||
// prettier-ignore
|
||||
import {
|
||||
ahash, anumber,
|
||||
asyncLoop, checkOpts, clean, createView, Hash, kdfInputToBytes,
|
||||
type CHash,
|
||||
type KDFInput
|
||||
} from './utils.ts';
|
||||
|
||||
export type Pbkdf2Opt = {
|
||||
c: number; // Iterations
|
||||
dkLen?: number; // Desired key length in bytes (Intended output length in octets of the derived key
|
||||
asyncTick?: number; // Maximum time in ms for which async function can block execution
|
||||
};
|
||||
// Common prologue and epilogue for sync/async functions
|
||||
function pbkdf2Init(hash: CHash, _password: KDFInput, _salt: KDFInput, _opts: Pbkdf2Opt) {
|
||||
ahash(hash);
|
||||
const opts = checkOpts({ dkLen: 32, asyncTick: 10 }, _opts);
|
||||
const { c, dkLen, asyncTick } = opts;
|
||||
anumber(c);
|
||||
anumber(dkLen);
|
||||
anumber(asyncTick);
|
||||
if (c < 1) throw new Error('iterations (c) should be >= 1');
|
||||
const password = kdfInputToBytes(_password);
|
||||
const salt = kdfInputToBytes(_salt);
|
||||
// DK = PBKDF2(PRF, Password, Salt, c, dkLen);
|
||||
const DK = new Uint8Array(dkLen);
|
||||
// U1 = PRF(Password, Salt + INT_32_BE(i))
|
||||
const PRF = hmac.create(hash, password);
|
||||
const PRFSalt = PRF._cloneInto().update(salt);
|
||||
return { c, dkLen, asyncTick, DK, PRF, PRFSalt };
|
||||
}
|
||||
|
||||
function pbkdf2Output<T extends Hash<T>>(
|
||||
PRF: Hash<T>,
|
||||
PRFSalt: Hash<T>,
|
||||
DK: Uint8Array,
|
||||
prfW: Hash<T>,
|
||||
u: Uint8Array
|
||||
) {
|
||||
PRF.destroy();
|
||||
PRFSalt.destroy();
|
||||
if (prfW) prfW.destroy();
|
||||
clean(u);
|
||||
return DK;
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2-HMAC: RFC 2898 key derivation function
|
||||
* @param hash - hash function that would be used e.g. sha256
|
||||
* @param password - password from which a derived key is generated
|
||||
* @param salt - cryptographic salt
|
||||
* @param opts - {c, dkLen} where c is work factor and dkLen is output message size
|
||||
* @example
|
||||
* const key = pbkdf2(sha256, 'password', 'salt', { dkLen: 32, c: Math.pow(2, 18) });
|
||||
*/
|
||||
export function pbkdf2(
|
||||
hash: CHash,
|
||||
password: KDFInput,
|
||||
salt: KDFInput,
|
||||
opts: Pbkdf2Opt
|
||||
): Uint8Array {
|
||||
const { c, dkLen, DK, PRF, PRFSalt } = pbkdf2Init(hash, password, salt, opts);
|
||||
let prfW: any; // Working copy
|
||||
const arr = new Uint8Array(4);
|
||||
const view = createView(arr);
|
||||
const u = new Uint8Array(PRF.outputLen);
|
||||
// DK = T1 + T2 + ⋯ + Tdklen/hlen
|
||||
for (let ti = 1, pos = 0; pos < dkLen; ti++, pos += PRF.outputLen) {
|
||||
// Ti = F(Password, Salt, c, i)
|
||||
const Ti = DK.subarray(pos, pos + PRF.outputLen);
|
||||
view.setInt32(0, ti, false);
|
||||
// F(Password, Salt, c, i) = U1 ^ U2 ^ ⋯ ^ Uc
|
||||
// U1 = PRF(Password, Salt + INT_32_BE(i))
|
||||
(prfW = PRFSalt._cloneInto(prfW)).update(arr).digestInto(u);
|
||||
Ti.set(u.subarray(0, Ti.length));
|
||||
for (let ui = 1; ui < c; ui++) {
|
||||
// Uc = PRF(Password, Uc−1)
|
||||
PRF._cloneInto(prfW).update(u).digestInto(u);
|
||||
for (let i = 0; i < Ti.length; i++) Ti[i] ^= u[i];
|
||||
}
|
||||
}
|
||||
return pbkdf2Output(PRF, PRFSalt, DK, prfW, u);
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2-HMAC: RFC 2898 key derivation function. Async version.
|
||||
* @example
|
||||
* await pbkdf2Async(sha256, 'password', 'salt', { dkLen: 32, c: 500_000 });
|
||||
*/
|
||||
export async function pbkdf2Async(
|
||||
hash: CHash,
|
||||
password: KDFInput,
|
||||
salt: KDFInput,
|
||||
opts: Pbkdf2Opt
|
||||
): Promise<Uint8Array> {
|
||||
const { c, dkLen, asyncTick, DK, PRF, PRFSalt } = pbkdf2Init(hash, password, salt, opts);
|
||||
let prfW: any; // Working copy
|
||||
const arr = new Uint8Array(4);
|
||||
const view = createView(arr);
|
||||
const u = new Uint8Array(PRF.outputLen);
|
||||
// DK = T1 + T2 + ⋯ + Tdklen/hlen
|
||||
for (let ti = 1, pos = 0; pos < dkLen; ti++, pos += PRF.outputLen) {
|
||||
// Ti = F(Password, Salt, c, i)
|
||||
const Ti = DK.subarray(pos, pos + PRF.outputLen);
|
||||
view.setInt32(0, ti, false);
|
||||
// F(Password, Salt, c, i) = U1 ^ U2 ^ ⋯ ^ Uc
|
||||
// U1 = PRF(Password, Salt + INT_32_BE(i))
|
||||
(prfW = PRFSalt._cloneInto(prfW)).update(arr).digestInto(u);
|
||||
Ti.set(u.subarray(0, Ti.length));
|
||||
await asyncLoop(c - 1, asyncTick, () => {
|
||||
// Uc = PRF(Password, Uc−1)
|
||||
PRF._cloneInto(prfW).update(u).digestInto(u);
|
||||
for (let i = 0; i < Ti.length; i++) Ti[i] ^= u[i];
|
||||
});
|
||||
}
|
||||
return pbkdf2Output(PRF, PRFSalt, DK, prfW, u);
|
||||
}
|
||||
12
qwen/nodejs/node_modules/@noble/hashes/src/ripemd160.ts
generated
vendored
Normal file
12
qwen/nodejs/node_modules/@noble/hashes/src/ripemd160.ts
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
/**
|
||||
* RIPEMD-160 legacy hash function.
|
||||
* https://homes.esat.kuleuven.be/~bosselae/ripemd160.html
|
||||
* https://homes.esat.kuleuven.be/~bosselae/ripemd160/pdf/AB-9601/AB-9601.pdf
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import { RIPEMD160 as RIPEMD160n, ripemd160 as ripemd160n } from './legacy.ts';
|
||||
/** @deprecated Use import from `noble/hashes/legacy` module */
|
||||
export const RIPEMD160: typeof RIPEMD160n = RIPEMD160n;
|
||||
/** @deprecated Use import from `noble/hashes/legacy` module */
|
||||
export const ripemd160: typeof ripemd160n = ripemd160n;
|
||||
257
qwen/nodejs/node_modules/@noble/hashes/src/scrypt.ts
generated
vendored
Normal file
257
qwen/nodejs/node_modules/@noble/hashes/src/scrypt.ts
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
/**
|
||||
* RFC 7914 Scrypt KDF. Can be used to create a key from password and salt.
|
||||
* @module
|
||||
*/
|
||||
import { pbkdf2 } from './pbkdf2.ts';
|
||||
import { sha256 } from './sha2.ts';
|
||||
// prettier-ignore
|
||||
import {
|
||||
anumber, asyncLoop,
|
||||
checkOpts, clean,
|
||||
type KDFInput, rotl,
|
||||
swap32IfBE,
|
||||
u32
|
||||
} from './utils.ts';
|
||||
|
||||
// The main Scrypt loop: uses Salsa extensively.
|
||||
// Six versions of the function were tried, this is the fastest one.
|
||||
// prettier-ignore
|
||||
function XorAndSalsa(
|
||||
prev: Uint32Array,
|
||||
pi: number,
|
||||
input: Uint32Array,
|
||||
ii: number,
|
||||
out: Uint32Array,
|
||||
oi: number
|
||||
) {
|
||||
// Based on https://cr.yp.to/salsa20.html
|
||||
// Xor blocks
|
||||
let y00 = prev[pi++] ^ input[ii++], y01 = prev[pi++] ^ input[ii++];
|
||||
let y02 = prev[pi++] ^ input[ii++], y03 = prev[pi++] ^ input[ii++];
|
||||
let y04 = prev[pi++] ^ input[ii++], y05 = prev[pi++] ^ input[ii++];
|
||||
let y06 = prev[pi++] ^ input[ii++], y07 = prev[pi++] ^ input[ii++];
|
||||
let y08 = prev[pi++] ^ input[ii++], y09 = prev[pi++] ^ input[ii++];
|
||||
let y10 = prev[pi++] ^ input[ii++], y11 = prev[pi++] ^ input[ii++];
|
||||
let y12 = prev[pi++] ^ input[ii++], y13 = prev[pi++] ^ input[ii++];
|
||||
let y14 = prev[pi++] ^ input[ii++], y15 = prev[pi++] ^ input[ii++];
|
||||
// Save state to temporary variables (salsa)
|
||||
let x00 = y00, x01 = y01, x02 = y02, x03 = y03,
|
||||
x04 = y04, x05 = y05, x06 = y06, x07 = y07,
|
||||
x08 = y08, x09 = y09, x10 = y10, x11 = y11,
|
||||
x12 = y12, x13 = y13, x14 = y14, x15 = y15;
|
||||
// Main loop (salsa)
|
||||
for (let i = 0; i < 8; i += 2) {
|
||||
x04 ^= rotl(x00 + x12 | 0, 7); x08 ^= rotl(x04 + x00 | 0, 9);
|
||||
x12 ^= rotl(x08 + x04 | 0, 13); x00 ^= rotl(x12 + x08 | 0, 18);
|
||||
x09 ^= rotl(x05 + x01 | 0, 7); x13 ^= rotl(x09 + x05 | 0, 9);
|
||||
x01 ^= rotl(x13 + x09 | 0, 13); x05 ^= rotl(x01 + x13 | 0, 18);
|
||||
x14 ^= rotl(x10 + x06 | 0, 7); x02 ^= rotl(x14 + x10 | 0, 9);
|
||||
x06 ^= rotl(x02 + x14 | 0, 13); x10 ^= rotl(x06 + x02 | 0, 18);
|
||||
x03 ^= rotl(x15 + x11 | 0, 7); x07 ^= rotl(x03 + x15 | 0, 9);
|
||||
x11 ^= rotl(x07 + x03 | 0, 13); x15 ^= rotl(x11 + x07 | 0, 18);
|
||||
x01 ^= rotl(x00 + x03 | 0, 7); x02 ^= rotl(x01 + x00 | 0, 9);
|
||||
x03 ^= rotl(x02 + x01 | 0, 13); x00 ^= rotl(x03 + x02 | 0, 18);
|
||||
x06 ^= rotl(x05 + x04 | 0, 7); x07 ^= rotl(x06 + x05 | 0, 9);
|
||||
x04 ^= rotl(x07 + x06 | 0, 13); x05 ^= rotl(x04 + x07 | 0, 18);
|
||||
x11 ^= rotl(x10 + x09 | 0, 7); x08 ^= rotl(x11 + x10 | 0, 9);
|
||||
x09 ^= rotl(x08 + x11 | 0, 13); x10 ^= rotl(x09 + x08 | 0, 18);
|
||||
x12 ^= rotl(x15 + x14 | 0, 7); x13 ^= rotl(x12 + x15 | 0, 9);
|
||||
x14 ^= rotl(x13 + x12 | 0, 13); x15 ^= rotl(x14 + x13 | 0, 18);
|
||||
}
|
||||
// Write output (salsa)
|
||||
out[oi++] = (y00 + x00) | 0; out[oi++] = (y01 + x01) | 0;
|
||||
out[oi++] = (y02 + x02) | 0; out[oi++] = (y03 + x03) | 0;
|
||||
out[oi++] = (y04 + x04) | 0; out[oi++] = (y05 + x05) | 0;
|
||||
out[oi++] = (y06 + x06) | 0; out[oi++] = (y07 + x07) | 0;
|
||||
out[oi++] = (y08 + x08) | 0; out[oi++] = (y09 + x09) | 0;
|
||||
out[oi++] = (y10 + x10) | 0; out[oi++] = (y11 + x11) | 0;
|
||||
out[oi++] = (y12 + x12) | 0; out[oi++] = (y13 + x13) | 0;
|
||||
out[oi++] = (y14 + x14) | 0; out[oi++] = (y15 + x15) | 0;
|
||||
}
|
||||
|
||||
function BlockMix(input: Uint32Array, ii: number, out: Uint32Array, oi: number, r: number) {
|
||||
// The block B is r 128-byte chunks (which is equivalent of 2r 64-byte chunks)
|
||||
let head = oi + 0;
|
||||
let tail = oi + 16 * r;
|
||||
for (let i = 0; i < 16; i++) out[tail + i] = input[ii + (2 * r - 1) * 16 + i]; // X ← B[2r−1]
|
||||
for (let i = 0; i < r; i++, head += 16, ii += 16) {
|
||||
// We write odd & even Yi at same time. Even: 0bXXXXX0 Odd: 0bXXXXX1
|
||||
XorAndSalsa(out, tail, input, ii, out, head); // head[i] = Salsa(blockIn[2*i] ^ tail[i-1])
|
||||
if (i > 0) tail += 16; // First iteration overwrites tmp value in tail
|
||||
XorAndSalsa(out, head, input, (ii += 16), out, tail); // tail[i] = Salsa(blockIn[2*i+1] ^ head[i])
|
||||
}
|
||||
}
|
||||
|
||||
export type ScryptOpts = {
|
||||
N: number; // cost factor
|
||||
r: number; // block size
|
||||
p: number; // parallelization
|
||||
dkLen?: number; // key length
|
||||
asyncTick?: number; // block execution max time
|
||||
maxmem?: number;
|
||||
onProgress?: (progress: number) => void;
|
||||
};
|
||||
|
||||
// Common prologue and epilogue for sync/async functions
|
||||
function scryptInit(password: KDFInput, salt: KDFInput, _opts?: ScryptOpts) {
|
||||
// Maxmem - 1GB+1KB by default
|
||||
const opts = checkOpts(
|
||||
{
|
||||
dkLen: 32,
|
||||
asyncTick: 10,
|
||||
maxmem: 1024 ** 3 + 1024,
|
||||
},
|
||||
_opts
|
||||
);
|
||||
const { N, r, p, dkLen, asyncTick, maxmem, onProgress } = opts;
|
||||
anumber(N);
|
||||
anumber(r);
|
||||
anumber(p);
|
||||
anumber(dkLen);
|
||||
anumber(asyncTick);
|
||||
anumber(maxmem);
|
||||
if (onProgress !== undefined && typeof onProgress !== 'function')
|
||||
throw new Error('progressCb should be function');
|
||||
const blockSize = 128 * r;
|
||||
const blockSize32 = blockSize / 4;
|
||||
|
||||
// Max N is 2^32 (Integrify is 32-bit). Real limit is 2^22: JS engines Uint8Array limit is 4GB in 2024.
|
||||
// Spec check `N >= 2^(blockSize / 8)` is not done for compat with popular libs,
|
||||
// which used incorrect r: 1, p: 8. Also, the check seems to be a spec error:
|
||||
// https://www.rfc-editor.org/errata_search.php?rfc=7914
|
||||
const pow32 = Math.pow(2, 32);
|
||||
if (N <= 1 || (N & (N - 1)) !== 0 || N > pow32) {
|
||||
throw new Error('Scrypt: N must be larger than 1, a power of 2, and less than 2^32');
|
||||
}
|
||||
if (p < 0 || p > ((pow32 - 1) * 32) / blockSize) {
|
||||
throw new Error(
|
||||
'Scrypt: p must be a positive integer less than or equal to ((2^32 - 1) * 32) / (128 * r)'
|
||||
);
|
||||
}
|
||||
if (dkLen < 0 || dkLen > (pow32 - 1) * 32) {
|
||||
throw new Error(
|
||||
'Scrypt: dkLen should be positive integer less than or equal to (2^32 - 1) * 32'
|
||||
);
|
||||
}
|
||||
const memUsed = blockSize * (N + p);
|
||||
if (memUsed > maxmem) {
|
||||
throw new Error(
|
||||
'Scrypt: memused is bigger than maxMem. Expected 128 * r * (N + p) > maxmem of ' + maxmem
|
||||
);
|
||||
}
|
||||
// [B0...Bp−1] ← PBKDF2HMAC-SHA256(Passphrase, Salt, 1, blockSize*ParallelizationFactor)
|
||||
// Since it has only one iteration there is no reason to use async variant
|
||||
const B = pbkdf2(sha256, password, salt, { c: 1, dkLen: blockSize * p });
|
||||
const B32 = u32(B);
|
||||
// Re-used between parallel iterations. Array(iterations) of B
|
||||
const V = u32(new Uint8Array(blockSize * N));
|
||||
const tmp = u32(new Uint8Array(blockSize));
|
||||
let blockMixCb = () => {};
|
||||
if (onProgress) {
|
||||
const totalBlockMix = 2 * N * p;
|
||||
// Invoke callback if progress changes from 10.01 to 10.02
|
||||
// Allows to draw smooth progress bar on up to 8K screen
|
||||
const callbackPer = Math.max(Math.floor(totalBlockMix / 10000), 1);
|
||||
let blockMixCnt = 0;
|
||||
blockMixCb = () => {
|
||||
blockMixCnt++;
|
||||
if (onProgress && (!(blockMixCnt % callbackPer) || blockMixCnt === totalBlockMix))
|
||||
onProgress(blockMixCnt / totalBlockMix);
|
||||
};
|
||||
}
|
||||
return { N, r, p, dkLen, blockSize32, V, B32, B, tmp, blockMixCb, asyncTick };
|
||||
}
|
||||
|
||||
function scryptOutput(
|
||||
password: KDFInput,
|
||||
dkLen: number,
|
||||
B: Uint8Array,
|
||||
V: Uint32Array,
|
||||
tmp: Uint32Array
|
||||
) {
|
||||
const res = pbkdf2(sha256, password, B, { c: 1, dkLen });
|
||||
clean(B, V, tmp);
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scrypt KDF from RFC 7914.
|
||||
* @param password - pass
|
||||
* @param salt - salt
|
||||
* @param opts - parameters
|
||||
* - `N` is cpu/mem work factor (power of 2 e.g. 2**18)
|
||||
* - `r` is block size (8 is common), fine-tunes sequential memory read size and performance
|
||||
* - `p` is parallelization factor (1 is common)
|
||||
* - `dkLen` is output key length in bytes e.g. 32.
|
||||
* - `asyncTick` - (default: 10) max time in ms for which async function can block execution
|
||||
* - `maxmem` - (default: `1024 ** 3 + 1024` aka 1GB+1KB). A limit that the app could use for scrypt
|
||||
* - `onProgress` - callback function that would be executed for progress report
|
||||
* @returns Derived key
|
||||
* @example
|
||||
* scrypt('password', 'salt', { N: 2**18, r: 8, p: 1, dkLen: 32 });
|
||||
*/
|
||||
export function scrypt(password: KDFInput, salt: KDFInput, opts: ScryptOpts): Uint8Array {
|
||||
const { N, r, p, dkLen, blockSize32, V, B32, B, tmp, blockMixCb } = scryptInit(
|
||||
password,
|
||||
salt,
|
||||
opts
|
||||
);
|
||||
swap32IfBE(B32);
|
||||
for (let pi = 0; pi < p; pi++) {
|
||||
const Pi = blockSize32 * pi;
|
||||
for (let i = 0; i < blockSize32; i++) V[i] = B32[Pi + i]; // V[0] = B[i]
|
||||
for (let i = 0, pos = 0; i < N - 1; i++) {
|
||||
BlockMix(V, pos, V, (pos += blockSize32), r); // V[i] = BlockMix(V[i-1]);
|
||||
blockMixCb();
|
||||
}
|
||||
BlockMix(V, (N - 1) * blockSize32, B32, Pi, r); // Process last element
|
||||
blockMixCb();
|
||||
for (let i = 0; i < N; i++) {
|
||||
// First u32 of the last 64-byte block (u32 is LE)
|
||||
const j = B32[Pi + blockSize32 - 16] % N; // j = Integrify(X) % iterations
|
||||
for (let k = 0; k < blockSize32; k++) tmp[k] = B32[Pi + k] ^ V[j * blockSize32 + k]; // tmp = B ^ V[j]
|
||||
BlockMix(tmp, 0, B32, Pi, r); // B = BlockMix(B ^ V[j])
|
||||
blockMixCb();
|
||||
}
|
||||
}
|
||||
swap32IfBE(B32);
|
||||
return scryptOutput(password, dkLen, B, V, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Scrypt KDF from RFC 7914. Async version.
|
||||
* @example
|
||||
* await scryptAsync('password', 'salt', { N: 2**18, r: 8, p: 1, dkLen: 32 });
|
||||
*/
|
||||
export async function scryptAsync(
|
||||
password: KDFInput,
|
||||
salt: KDFInput,
|
||||
opts: ScryptOpts
|
||||
): Promise<Uint8Array> {
|
||||
const { N, r, p, dkLen, blockSize32, V, B32, B, tmp, blockMixCb, asyncTick } = scryptInit(
|
||||
password,
|
||||
salt,
|
||||
opts
|
||||
);
|
||||
swap32IfBE(B32);
|
||||
for (let pi = 0; pi < p; pi++) {
|
||||
const Pi = blockSize32 * pi;
|
||||
for (let i = 0; i < blockSize32; i++) V[i] = B32[Pi + i]; // V[0] = B[i]
|
||||
let pos = 0;
|
||||
await asyncLoop(N - 1, asyncTick, () => {
|
||||
BlockMix(V, pos, V, (pos += blockSize32), r); // V[i] = BlockMix(V[i-1]);
|
||||
blockMixCb();
|
||||
});
|
||||
BlockMix(V, (N - 1) * blockSize32, B32, Pi, r); // Process last element
|
||||
blockMixCb();
|
||||
await asyncLoop(N, asyncTick, () => {
|
||||
// First u32 of the last 64-byte block (u32 is LE)
|
||||
const j = B32[Pi + blockSize32 - 16] % N; // j = Integrify(X) % iterations
|
||||
for (let k = 0; k < blockSize32; k++) tmp[k] = B32[Pi + k] ^ V[j * blockSize32 + k]; // tmp = B ^ V[j]
|
||||
BlockMix(tmp, 0, B32, Pi, r); // B = BlockMix(B ^ V[j])
|
||||
blockMixCb();
|
||||
});
|
||||
}
|
||||
swap32IfBE(B32);
|
||||
return scryptOutput(password, dkLen, B, V, tmp);
|
||||
}
|
||||
10
qwen/nodejs/node_modules/@noble/hashes/src/sha1.ts
generated
vendored
Normal file
10
qwen/nodejs/node_modules/@noble/hashes/src/sha1.ts
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
/**
|
||||
* SHA1 (RFC 3174) legacy hash function.
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import { SHA1 as SHA1n, sha1 as sha1n } from './legacy.ts';
|
||||
/** @deprecated Use import from `noble/hashes/legacy` module */
|
||||
export const SHA1: typeof SHA1n = SHA1n;
|
||||
/** @deprecated Use import from `noble/hashes/legacy` module */
|
||||
export const sha1: typeof sha1n = sha1n;
|
||||
402
qwen/nodejs/node_modules/@noble/hashes/src/sha2.ts
generated
vendored
Normal file
402
qwen/nodejs/node_modules/@noble/hashes/src/sha2.ts
generated
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
/**
|
||||
* SHA2 hash function. A.k.a. sha256, sha384, sha512, sha512_224, sha512_256.
|
||||
* SHA256 is the fastest hash implementable in JS, even faster than Blake3.
|
||||
* Check out [RFC 4634](https://datatracker.ietf.org/doc/html/rfc4634) and
|
||||
* [FIPS 180-4](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf).
|
||||
* @module
|
||||
*/
|
||||
import { Chi, HashMD, Maj, SHA224_IV, SHA256_IV, SHA384_IV, SHA512_IV } from './_md.ts';
|
||||
import * as u64 from './_u64.ts';
|
||||
import { type CHash, clean, createHasher, rotr } from './utils.ts';
|
||||
|
||||
/**
|
||||
* Round constants:
|
||||
* First 32 bits of fractional parts of the cube roots of the first 64 primes 2..311)
|
||||
*/
|
||||
// prettier-ignore
|
||||
const SHA256_K = /* @__PURE__ */ Uint32Array.from([
|
||||
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
|
||||
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
|
||||
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
|
||||
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
|
||||
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
|
||||
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
|
||||
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
|
||||
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
||||
]);
|
||||
|
||||
/** Reusable temporary buffer. "W" comes straight from spec. */
|
||||
const SHA256_W = /* @__PURE__ */ new Uint32Array(64);
|
||||
export class SHA256 extends HashMD<SHA256> {
|
||||
// We cannot use array here since array allows indexing by variable
|
||||
// which means optimizer/compiler cannot use registers.
|
||||
protected A: number = SHA256_IV[0] | 0;
|
||||
protected B: number = SHA256_IV[1] | 0;
|
||||
protected C: number = SHA256_IV[2] | 0;
|
||||
protected D: number = SHA256_IV[3] | 0;
|
||||
protected E: number = SHA256_IV[4] | 0;
|
||||
protected F: number = SHA256_IV[5] | 0;
|
||||
protected G: number = SHA256_IV[6] | 0;
|
||||
protected H: number = SHA256_IV[7] | 0;
|
||||
|
||||
constructor(outputLen: number = 32) {
|
||||
super(64, outputLen, 8, false);
|
||||
}
|
||||
protected get(): [number, number, number, number, number, number, number, number] {
|
||||
const { A, B, C, D, E, F, G, H } = this;
|
||||
return [A, B, C, D, E, F, G, H];
|
||||
}
|
||||
// prettier-ignore
|
||||
protected set(
|
||||
A: number, B: number, C: number, D: number, E: number, F: number, G: number, H: number
|
||||
): void {
|
||||
this.A = A | 0;
|
||||
this.B = B | 0;
|
||||
this.C = C | 0;
|
||||
this.D = D | 0;
|
||||
this.E = E | 0;
|
||||
this.F = F | 0;
|
||||
this.G = G | 0;
|
||||
this.H = H | 0;
|
||||
}
|
||||
protected process(view: DataView, offset: number): void {
|
||||
// Extend the first 16 words into the remaining 48 words w[16..63] of the message schedule array
|
||||
for (let i = 0; i < 16; i++, offset += 4) SHA256_W[i] = view.getUint32(offset, false);
|
||||
for (let i = 16; i < 64; i++) {
|
||||
const W15 = SHA256_W[i - 15];
|
||||
const W2 = SHA256_W[i - 2];
|
||||
const s0 = rotr(W15, 7) ^ rotr(W15, 18) ^ (W15 >>> 3);
|
||||
const s1 = rotr(W2, 17) ^ rotr(W2, 19) ^ (W2 >>> 10);
|
||||
SHA256_W[i] = (s1 + SHA256_W[i - 7] + s0 + SHA256_W[i - 16]) | 0;
|
||||
}
|
||||
// Compression function main loop, 64 rounds
|
||||
let { A, B, C, D, E, F, G, H } = this;
|
||||
for (let i = 0; i < 64; i++) {
|
||||
const sigma1 = rotr(E, 6) ^ rotr(E, 11) ^ rotr(E, 25);
|
||||
const T1 = (H + sigma1 + Chi(E, F, G) + SHA256_K[i] + SHA256_W[i]) | 0;
|
||||
const sigma0 = rotr(A, 2) ^ rotr(A, 13) ^ rotr(A, 22);
|
||||
const T2 = (sigma0 + Maj(A, B, C)) | 0;
|
||||
H = G;
|
||||
G = F;
|
||||
F = E;
|
||||
E = (D + T1) | 0;
|
||||
D = C;
|
||||
C = B;
|
||||
B = A;
|
||||
A = (T1 + T2) | 0;
|
||||
}
|
||||
// Add the compressed chunk to the current hash value
|
||||
A = (A + this.A) | 0;
|
||||
B = (B + this.B) | 0;
|
||||
C = (C + this.C) | 0;
|
||||
D = (D + this.D) | 0;
|
||||
E = (E + this.E) | 0;
|
||||
F = (F + this.F) | 0;
|
||||
G = (G + this.G) | 0;
|
||||
H = (H + this.H) | 0;
|
||||
this.set(A, B, C, D, E, F, G, H);
|
||||
}
|
||||
protected roundClean(): void {
|
||||
clean(SHA256_W);
|
||||
}
|
||||
destroy(): void {
|
||||
this.set(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
clean(this.buffer);
|
||||
}
|
||||
}
|
||||
|
||||
export class SHA224 extends SHA256 {
|
||||
protected A: number = SHA224_IV[0] | 0;
|
||||
protected B: number = SHA224_IV[1] | 0;
|
||||
protected C: number = SHA224_IV[2] | 0;
|
||||
protected D: number = SHA224_IV[3] | 0;
|
||||
protected E: number = SHA224_IV[4] | 0;
|
||||
protected F: number = SHA224_IV[5] | 0;
|
||||
protected G: number = SHA224_IV[6] | 0;
|
||||
protected H: number = SHA224_IV[7] | 0;
|
||||
constructor() {
|
||||
super(28);
|
||||
}
|
||||
}
|
||||
|
||||
// SHA2-512 is slower than sha256 in js because u64 operations are slow.
|
||||
|
||||
// Round contants
|
||||
// First 32 bits of the fractional parts of the cube roots of the first 80 primes 2..409
|
||||
// prettier-ignore
|
||||
const K512 = /* @__PURE__ */ (() => u64.split([
|
||||
'0x428a2f98d728ae22', '0x7137449123ef65cd', '0xb5c0fbcfec4d3b2f', '0xe9b5dba58189dbbc',
|
||||
'0x3956c25bf348b538', '0x59f111f1b605d019', '0x923f82a4af194f9b', '0xab1c5ed5da6d8118',
|
||||
'0xd807aa98a3030242', '0x12835b0145706fbe', '0x243185be4ee4b28c', '0x550c7dc3d5ffb4e2',
|
||||
'0x72be5d74f27b896f', '0x80deb1fe3b1696b1', '0x9bdc06a725c71235', '0xc19bf174cf692694',
|
||||
'0xe49b69c19ef14ad2', '0xefbe4786384f25e3', '0x0fc19dc68b8cd5b5', '0x240ca1cc77ac9c65',
|
||||
'0x2de92c6f592b0275', '0x4a7484aa6ea6e483', '0x5cb0a9dcbd41fbd4', '0x76f988da831153b5',
|
||||
'0x983e5152ee66dfab', '0xa831c66d2db43210', '0xb00327c898fb213f', '0xbf597fc7beef0ee4',
|
||||
'0xc6e00bf33da88fc2', '0xd5a79147930aa725', '0x06ca6351e003826f', '0x142929670a0e6e70',
|
||||
'0x27b70a8546d22ffc', '0x2e1b21385c26c926', '0x4d2c6dfc5ac42aed', '0x53380d139d95b3df',
|
||||
'0x650a73548baf63de', '0x766a0abb3c77b2a8', '0x81c2c92e47edaee6', '0x92722c851482353b',
|
||||
'0xa2bfe8a14cf10364', '0xa81a664bbc423001', '0xc24b8b70d0f89791', '0xc76c51a30654be30',
|
||||
'0xd192e819d6ef5218', '0xd69906245565a910', '0xf40e35855771202a', '0x106aa07032bbd1b8',
|
||||
'0x19a4c116b8d2d0c8', '0x1e376c085141ab53', '0x2748774cdf8eeb99', '0x34b0bcb5e19b48a8',
|
||||
'0x391c0cb3c5c95a63', '0x4ed8aa4ae3418acb', '0x5b9cca4f7763e373', '0x682e6ff3d6b2b8a3',
|
||||
'0x748f82ee5defb2fc', '0x78a5636f43172f60', '0x84c87814a1f0ab72', '0x8cc702081a6439ec',
|
||||
'0x90befffa23631e28', '0xa4506cebde82bde9', '0xbef9a3f7b2c67915', '0xc67178f2e372532b',
|
||||
'0xca273eceea26619c', '0xd186b8c721c0c207', '0xeada7dd6cde0eb1e', '0xf57d4f7fee6ed178',
|
||||
'0x06f067aa72176fba', '0x0a637dc5a2c898a6', '0x113f9804bef90dae', '0x1b710b35131c471b',
|
||||
'0x28db77f523047d84', '0x32caab7b40c72493', '0x3c9ebe0a15c9bebc', '0x431d67c49c100d4c',
|
||||
'0x4cc5d4becb3e42b6', '0x597f299cfc657e2a', '0x5fcb6fab3ad6faec', '0x6c44198c4a475817'
|
||||
].map(n => BigInt(n))))();
|
||||
const SHA512_Kh = /* @__PURE__ */ (() => K512[0])();
|
||||
const SHA512_Kl = /* @__PURE__ */ (() => K512[1])();
|
||||
|
||||
// Reusable temporary buffers
|
||||
const SHA512_W_H = /* @__PURE__ */ new Uint32Array(80);
|
||||
const SHA512_W_L = /* @__PURE__ */ new Uint32Array(80);
|
||||
|
||||
export class SHA512 extends HashMD<SHA512> {
|
||||
// We cannot use array here since array allows indexing by variable
|
||||
// which means optimizer/compiler cannot use registers.
|
||||
// h -- high 32 bits, l -- low 32 bits
|
||||
protected Ah: number = SHA512_IV[0] | 0;
|
||||
protected Al: number = SHA512_IV[1] | 0;
|
||||
protected Bh: number = SHA512_IV[2] | 0;
|
||||
protected Bl: number = SHA512_IV[3] | 0;
|
||||
protected Ch: number = SHA512_IV[4] | 0;
|
||||
protected Cl: number = SHA512_IV[5] | 0;
|
||||
protected Dh: number = SHA512_IV[6] | 0;
|
||||
protected Dl: number = SHA512_IV[7] | 0;
|
||||
protected Eh: number = SHA512_IV[8] | 0;
|
||||
protected El: number = SHA512_IV[9] | 0;
|
||||
protected Fh: number = SHA512_IV[10] | 0;
|
||||
protected Fl: number = SHA512_IV[11] | 0;
|
||||
protected Gh: number = SHA512_IV[12] | 0;
|
||||
protected Gl: number = SHA512_IV[13] | 0;
|
||||
protected Hh: number = SHA512_IV[14] | 0;
|
||||
protected Hl: number = SHA512_IV[15] | 0;
|
||||
|
||||
constructor(outputLen: number = 64) {
|
||||
super(128, outputLen, 16, false);
|
||||
}
|
||||
// prettier-ignore
|
||||
protected get(): [
|
||||
number, number, number, number, number, number, number, number,
|
||||
number, number, number, number, number, number, number, number
|
||||
] {
|
||||
const { Ah, Al, Bh, Bl, Ch, Cl, Dh, Dl, Eh, El, Fh, Fl, Gh, Gl, Hh, Hl } = this;
|
||||
return [Ah, Al, Bh, Bl, Ch, Cl, Dh, Dl, Eh, El, Fh, Fl, Gh, Gl, Hh, Hl];
|
||||
}
|
||||
// prettier-ignore
|
||||
protected set(
|
||||
Ah: number, Al: number, Bh: number, Bl: number, Ch: number, Cl: number, Dh: number, Dl: number,
|
||||
Eh: number, El: number, Fh: number, Fl: number, Gh: number, Gl: number, Hh: number, Hl: number
|
||||
): void {
|
||||
this.Ah = Ah | 0;
|
||||
this.Al = Al | 0;
|
||||
this.Bh = Bh | 0;
|
||||
this.Bl = Bl | 0;
|
||||
this.Ch = Ch | 0;
|
||||
this.Cl = Cl | 0;
|
||||
this.Dh = Dh | 0;
|
||||
this.Dl = Dl | 0;
|
||||
this.Eh = Eh | 0;
|
||||
this.El = El | 0;
|
||||
this.Fh = Fh | 0;
|
||||
this.Fl = Fl | 0;
|
||||
this.Gh = Gh | 0;
|
||||
this.Gl = Gl | 0;
|
||||
this.Hh = Hh | 0;
|
||||
this.Hl = Hl | 0;
|
||||
}
|
||||
protected process(view: DataView, offset: number): void {
|
||||
// Extend the first 16 words into the remaining 64 words w[16..79] of the message schedule array
|
||||
for (let i = 0; i < 16; i++, offset += 4) {
|
||||
SHA512_W_H[i] = view.getUint32(offset);
|
||||
SHA512_W_L[i] = view.getUint32((offset += 4));
|
||||
}
|
||||
for (let i = 16; i < 80; i++) {
|
||||
// s0 := (w[i-15] rightrotate 1) xor (w[i-15] rightrotate 8) xor (w[i-15] rightshift 7)
|
||||
const W15h = SHA512_W_H[i - 15] | 0;
|
||||
const W15l = SHA512_W_L[i - 15] | 0;
|
||||
const s0h = u64.rotrSH(W15h, W15l, 1) ^ u64.rotrSH(W15h, W15l, 8) ^ u64.shrSH(W15h, W15l, 7);
|
||||
const s0l = u64.rotrSL(W15h, W15l, 1) ^ u64.rotrSL(W15h, W15l, 8) ^ u64.shrSL(W15h, W15l, 7);
|
||||
// s1 := (w[i-2] rightrotate 19) xor (w[i-2] rightrotate 61) xor (w[i-2] rightshift 6)
|
||||
const W2h = SHA512_W_H[i - 2] | 0;
|
||||
const W2l = SHA512_W_L[i - 2] | 0;
|
||||
const s1h = u64.rotrSH(W2h, W2l, 19) ^ u64.rotrBH(W2h, W2l, 61) ^ u64.shrSH(W2h, W2l, 6);
|
||||
const s1l = u64.rotrSL(W2h, W2l, 19) ^ u64.rotrBL(W2h, W2l, 61) ^ u64.shrSL(W2h, W2l, 6);
|
||||
// SHA256_W[i] = s0 + s1 + SHA256_W[i - 7] + SHA256_W[i - 16];
|
||||
const SUMl = u64.add4L(s0l, s1l, SHA512_W_L[i - 7], SHA512_W_L[i - 16]);
|
||||
const SUMh = u64.add4H(SUMl, s0h, s1h, SHA512_W_H[i - 7], SHA512_W_H[i - 16]);
|
||||
SHA512_W_H[i] = SUMh | 0;
|
||||
SHA512_W_L[i] = SUMl | 0;
|
||||
}
|
||||
let { Ah, Al, Bh, Bl, Ch, Cl, Dh, Dl, Eh, El, Fh, Fl, Gh, Gl, Hh, Hl } = this;
|
||||
// Compression function main loop, 80 rounds
|
||||
for (let i = 0; i < 80; i++) {
|
||||
// S1 := (e rightrotate 14) xor (e rightrotate 18) xor (e rightrotate 41)
|
||||
const sigma1h = u64.rotrSH(Eh, El, 14) ^ u64.rotrSH(Eh, El, 18) ^ u64.rotrBH(Eh, El, 41);
|
||||
const sigma1l = u64.rotrSL(Eh, El, 14) ^ u64.rotrSL(Eh, El, 18) ^ u64.rotrBL(Eh, El, 41);
|
||||
//const T1 = (H + sigma1 + Chi(E, F, G) + SHA256_K[i] + SHA256_W[i]) | 0;
|
||||
const CHIh = (Eh & Fh) ^ (~Eh & Gh);
|
||||
const CHIl = (El & Fl) ^ (~El & Gl);
|
||||
// T1 = H + sigma1 + Chi(E, F, G) + SHA512_K[i] + SHA512_W[i]
|
||||
// prettier-ignore
|
||||
const T1ll = u64.add5L(Hl, sigma1l, CHIl, SHA512_Kl[i], SHA512_W_L[i]);
|
||||
const T1h = u64.add5H(T1ll, Hh, sigma1h, CHIh, SHA512_Kh[i], SHA512_W_H[i]);
|
||||
const T1l = T1ll | 0;
|
||||
// S0 := (a rightrotate 28) xor (a rightrotate 34) xor (a rightrotate 39)
|
||||
const sigma0h = u64.rotrSH(Ah, Al, 28) ^ u64.rotrBH(Ah, Al, 34) ^ u64.rotrBH(Ah, Al, 39);
|
||||
const sigma0l = u64.rotrSL(Ah, Al, 28) ^ u64.rotrBL(Ah, Al, 34) ^ u64.rotrBL(Ah, Al, 39);
|
||||
const MAJh = (Ah & Bh) ^ (Ah & Ch) ^ (Bh & Ch);
|
||||
const MAJl = (Al & Bl) ^ (Al & Cl) ^ (Bl & Cl);
|
||||
Hh = Gh | 0;
|
||||
Hl = Gl | 0;
|
||||
Gh = Fh | 0;
|
||||
Gl = Fl | 0;
|
||||
Fh = Eh | 0;
|
||||
Fl = El | 0;
|
||||
({ h: Eh, l: El } = u64.add(Dh | 0, Dl | 0, T1h | 0, T1l | 0));
|
||||
Dh = Ch | 0;
|
||||
Dl = Cl | 0;
|
||||
Ch = Bh | 0;
|
||||
Cl = Bl | 0;
|
||||
Bh = Ah | 0;
|
||||
Bl = Al | 0;
|
||||
const All = u64.add3L(T1l, sigma0l, MAJl);
|
||||
Ah = u64.add3H(All, T1h, sigma0h, MAJh);
|
||||
Al = All | 0;
|
||||
}
|
||||
// Add the compressed chunk to the current hash value
|
||||
({ h: Ah, l: Al } = u64.add(this.Ah | 0, this.Al | 0, Ah | 0, Al | 0));
|
||||
({ h: Bh, l: Bl } = u64.add(this.Bh | 0, this.Bl | 0, Bh | 0, Bl | 0));
|
||||
({ h: Ch, l: Cl } = u64.add(this.Ch | 0, this.Cl | 0, Ch | 0, Cl | 0));
|
||||
({ h: Dh, l: Dl } = u64.add(this.Dh | 0, this.Dl | 0, Dh | 0, Dl | 0));
|
||||
({ h: Eh, l: El } = u64.add(this.Eh | 0, this.El | 0, Eh | 0, El | 0));
|
||||
({ h: Fh, l: Fl } = u64.add(this.Fh | 0, this.Fl | 0, Fh | 0, Fl | 0));
|
||||
({ h: Gh, l: Gl } = u64.add(this.Gh | 0, this.Gl | 0, Gh | 0, Gl | 0));
|
||||
({ h: Hh, l: Hl } = u64.add(this.Hh | 0, this.Hl | 0, Hh | 0, Hl | 0));
|
||||
this.set(Ah, Al, Bh, Bl, Ch, Cl, Dh, Dl, Eh, El, Fh, Fl, Gh, Gl, Hh, Hl);
|
||||
}
|
||||
protected roundClean(): void {
|
||||
clean(SHA512_W_H, SHA512_W_L);
|
||||
}
|
||||
destroy(): void {
|
||||
clean(this.buffer);
|
||||
this.set(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
export class SHA384 extends SHA512 {
|
||||
protected Ah: number = SHA384_IV[0] | 0;
|
||||
protected Al: number = SHA384_IV[1] | 0;
|
||||
protected Bh: number = SHA384_IV[2] | 0;
|
||||
protected Bl: number = SHA384_IV[3] | 0;
|
||||
protected Ch: number = SHA384_IV[4] | 0;
|
||||
protected Cl: number = SHA384_IV[5] | 0;
|
||||
protected Dh: number = SHA384_IV[6] | 0;
|
||||
protected Dl: number = SHA384_IV[7] | 0;
|
||||
protected Eh: number = SHA384_IV[8] | 0;
|
||||
protected El: number = SHA384_IV[9] | 0;
|
||||
protected Fh: number = SHA384_IV[10] | 0;
|
||||
protected Fl: number = SHA384_IV[11] | 0;
|
||||
protected Gh: number = SHA384_IV[12] | 0;
|
||||
protected Gl: number = SHA384_IV[13] | 0;
|
||||
protected Hh: number = SHA384_IV[14] | 0;
|
||||
protected Hl: number = SHA384_IV[15] | 0;
|
||||
|
||||
constructor() {
|
||||
super(48);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncated SHA512/256 and SHA512/224.
|
||||
* SHA512_IV is XORed with 0xa5a5a5a5a5a5a5a5, then used as "intermediary" IV of SHA512/t.
|
||||
* Then t hashes string to produce result IV.
|
||||
* See `test/misc/sha2-gen-iv.js`.
|
||||
*/
|
||||
|
||||
/** SHA512/224 IV */
|
||||
const T224_IV = /* @__PURE__ */ Uint32Array.from([
|
||||
0x8c3d37c8, 0x19544da2, 0x73e19966, 0x89dcd4d6, 0x1dfab7ae, 0x32ff9c82, 0x679dd514, 0x582f9fcf,
|
||||
0x0f6d2b69, 0x7bd44da8, 0x77e36f73, 0x04c48942, 0x3f9d85a8, 0x6a1d36c8, 0x1112e6ad, 0x91d692a1,
|
||||
]);
|
||||
|
||||
/** SHA512/256 IV */
|
||||
const T256_IV = /* @__PURE__ */ Uint32Array.from([
|
||||
0x22312194, 0xfc2bf72c, 0x9f555fa3, 0xc84c64c2, 0x2393b86b, 0x6f53b151, 0x96387719, 0x5940eabd,
|
||||
0x96283ee2, 0xa88effe3, 0xbe5e1e25, 0x53863992, 0x2b0199fc, 0x2c85b8aa, 0x0eb72ddc, 0x81c52ca2,
|
||||
]);
|
||||
|
||||
export class SHA512_224 extends SHA512 {
|
||||
protected Ah: number = T224_IV[0] | 0;
|
||||
protected Al: number = T224_IV[1] | 0;
|
||||
protected Bh: number = T224_IV[2] | 0;
|
||||
protected Bl: number = T224_IV[3] | 0;
|
||||
protected Ch: number = T224_IV[4] | 0;
|
||||
protected Cl: number = T224_IV[5] | 0;
|
||||
protected Dh: number = T224_IV[6] | 0;
|
||||
protected Dl: number = T224_IV[7] | 0;
|
||||
protected Eh: number = T224_IV[8] | 0;
|
||||
protected El: number = T224_IV[9] | 0;
|
||||
protected Fh: number = T224_IV[10] | 0;
|
||||
protected Fl: number = T224_IV[11] | 0;
|
||||
protected Gh: number = T224_IV[12] | 0;
|
||||
protected Gl: number = T224_IV[13] | 0;
|
||||
protected Hh: number = T224_IV[14] | 0;
|
||||
protected Hl: number = T224_IV[15] | 0;
|
||||
|
||||
constructor() {
|
||||
super(28);
|
||||
}
|
||||
}
|
||||
|
||||
export class SHA512_256 extends SHA512 {
|
||||
protected Ah: number = T256_IV[0] | 0;
|
||||
protected Al: number = T256_IV[1] | 0;
|
||||
protected Bh: number = T256_IV[2] | 0;
|
||||
protected Bl: number = T256_IV[3] | 0;
|
||||
protected Ch: number = T256_IV[4] | 0;
|
||||
protected Cl: number = T256_IV[5] | 0;
|
||||
protected Dh: number = T256_IV[6] | 0;
|
||||
protected Dl: number = T256_IV[7] | 0;
|
||||
protected Eh: number = T256_IV[8] | 0;
|
||||
protected El: number = T256_IV[9] | 0;
|
||||
protected Fh: number = T256_IV[10] | 0;
|
||||
protected Fl: number = T256_IV[11] | 0;
|
||||
protected Gh: number = T256_IV[12] | 0;
|
||||
protected Gl: number = T256_IV[13] | 0;
|
||||
protected Hh: number = T256_IV[14] | 0;
|
||||
protected Hl: number = T256_IV[15] | 0;
|
||||
|
||||
constructor() {
|
||||
super(32);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* SHA2-256 hash function from RFC 4634.
|
||||
*
|
||||
* It is the fastest JS hash, even faster than Blake3.
|
||||
* To break sha256 using birthday attack, attackers need to try 2^128 hashes.
|
||||
* BTC network is doing 2^70 hashes/sec (2^95 hashes/year) as per 2025.
|
||||
*/
|
||||
export const sha256: CHash = /* @__PURE__ */ createHasher(() => new SHA256());
|
||||
/** SHA2-224 hash function from RFC 4634 */
|
||||
export const sha224: CHash = /* @__PURE__ */ createHasher(() => new SHA224());
|
||||
|
||||
/** SHA2-512 hash function from RFC 4634. */
|
||||
export const sha512: CHash = /* @__PURE__ */ createHasher(() => new SHA512());
|
||||
/** SHA2-384 hash function from RFC 4634. */
|
||||
export const sha384: CHash = /* @__PURE__ */ createHasher(() => new SHA384());
|
||||
|
||||
/**
|
||||
* SHA2-512/256 "truncated" hash function, with improved resistance to length extension attacks.
|
||||
* See the paper on [truncated SHA512](https://eprint.iacr.org/2010/548.pdf).
|
||||
*/
|
||||
export const sha512_256: CHash = /* @__PURE__ */ createHasher(() => new SHA512_256());
|
||||
/**
|
||||
* SHA2-512/224 "truncated" hash function, with improved resistance to length extension attacks.
|
||||
* See the paper on [truncated SHA512](https://eprint.iacr.org/2010/548.pdf).
|
||||
*/
|
||||
export const sha512_224: CHash = /* @__PURE__ */ createHasher(() => new SHA512_224());
|
||||
24
qwen/nodejs/node_modules/@noble/hashes/src/sha256.ts
generated
vendored
Normal file
24
qwen/nodejs/node_modules/@noble/hashes/src/sha256.ts
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* SHA2-256 a.k.a. sha256. In JS, it is the fastest hash, even faster than Blake3.
|
||||
*
|
||||
* To break sha256 using birthday attack, attackers need to try 2^128 hashes.
|
||||
* BTC network is doing 2^70 hashes/sec (2^95 hashes/year) as per 2025.
|
||||
*
|
||||
* Check out [FIPS 180-4](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf).
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import {
|
||||
SHA224 as SHA224n,
|
||||
sha224 as sha224n,
|
||||
SHA256 as SHA256n,
|
||||
sha256 as sha256n,
|
||||
} from './sha2.ts';
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const SHA256: typeof SHA256n = SHA256n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const sha256: typeof sha256n = sha256n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const SHA224: typeof SHA224n = SHA224n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const sha224: typeof sha224n = sha224n;
|
||||
499
qwen/nodejs/node_modules/@noble/hashes/src/sha3-addons.ts
generated
vendored
Normal file
499
qwen/nodejs/node_modules/@noble/hashes/src/sha3-addons.ts
generated
vendored
Normal file
@@ -0,0 +1,499 @@
|
||||
/**
|
||||
* SHA3 (keccak) addons.
|
||||
*
|
||||
* * Full [NIST SP 800-185](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf):
|
||||
* cSHAKE, KMAC, TupleHash, ParallelHash + XOF variants
|
||||
* * Reduced-round Keccak [(draft)](https://datatracker.ietf.org/doc/draft-irtf-cfrg-kangarootwelve/):
|
||||
* * 🦘 K12 aka KangarooTwelve
|
||||
* * M14 aka MarsupilamiFourteen
|
||||
* * TurboSHAKE
|
||||
* * KeccakPRG: Pseudo-random generator based on Keccak [(pdf)](https://keccak.team/files/CSF-0.1.pdf)
|
||||
* @module
|
||||
*/
|
||||
import { Keccak, type ShakeOpts } from './sha3.ts';
|
||||
import {
|
||||
abytes,
|
||||
anumber,
|
||||
type CHashO,
|
||||
type CHashXO,
|
||||
createOptHasher,
|
||||
createXOFer,
|
||||
Hash,
|
||||
type HashXOF,
|
||||
type Input,
|
||||
toBytes,
|
||||
u32,
|
||||
} from './utils.ts';
|
||||
|
||||
// cSHAKE && KMAC (NIST SP800-185)
|
||||
const _8n = BigInt(8);
|
||||
const _ffn = BigInt(0xff);
|
||||
|
||||
// NOTE: it is safe to use bigints here, since they used only for length encoding (not actual data).
|
||||
// We use bigints in sha256 for lengths too.
|
||||
function leftEncode(n: number | bigint): Uint8Array {
|
||||
n = BigInt(n);
|
||||
const res = [Number(n & _ffn)];
|
||||
n >>= _8n;
|
||||
for (; n > 0; n >>= _8n) res.unshift(Number(n & _ffn));
|
||||
res.unshift(res.length);
|
||||
return new Uint8Array(res);
|
||||
}
|
||||
|
||||
function rightEncode(n: number | bigint): Uint8Array {
|
||||
n = BigInt(n);
|
||||
const res = [Number(n & _ffn)];
|
||||
n >>= _8n;
|
||||
for (; n > 0; n >>= _8n) res.unshift(Number(n & _ffn));
|
||||
res.push(res.length);
|
||||
return new Uint8Array(res);
|
||||
}
|
||||
|
||||
function chooseLen(opts: ShakeOpts, outputLen: number): number {
|
||||
return opts.dkLen === undefined ? outputLen : opts.dkLen;
|
||||
}
|
||||
|
||||
const abytesOrZero = (buf?: Input) => {
|
||||
if (buf === undefined) return Uint8Array.of();
|
||||
return toBytes(buf);
|
||||
};
|
||||
// NOTE: second modulo is necessary since we don't need to add padding if current element takes whole block
|
||||
const getPadding = (len: number, block: number) => new Uint8Array((block - (len % block)) % block);
|
||||
export type cShakeOpts = ShakeOpts & { personalization?: Input; NISTfn?: Input };
|
||||
|
||||
// Personalization
|
||||
function cshakePers(hash: Keccak, opts: cShakeOpts = {}): Keccak {
|
||||
if (!opts || (!opts.personalization && !opts.NISTfn)) return hash;
|
||||
// Encode and pad inplace to avoid unneccesary memory copies/slices (so we don't need to zero them later)
|
||||
// bytepad(encode_string(N) || encode_string(S), 168)
|
||||
const blockLenBytes = leftEncode(hash.blockLen);
|
||||
const fn = abytesOrZero(opts.NISTfn);
|
||||
const fnLen = leftEncode(_8n * BigInt(fn.length)); // length in bits
|
||||
const pers = abytesOrZero(opts.personalization);
|
||||
const persLen = leftEncode(_8n * BigInt(pers.length)); // length in bits
|
||||
if (!fn.length && !pers.length) return hash;
|
||||
hash.suffix = 0x04;
|
||||
hash.update(blockLenBytes).update(fnLen).update(fn).update(persLen).update(pers);
|
||||
let totalLen = blockLenBytes.length + fnLen.length + fn.length + persLen.length + pers.length;
|
||||
hash.update(getPadding(totalLen, hash.blockLen));
|
||||
return hash;
|
||||
}
|
||||
|
||||
const gencShake = (suffix: number, blockLen: number, outputLen: number) =>
|
||||
createXOFer<Keccak, cShakeOpts>((opts: cShakeOpts = {}) =>
|
||||
cshakePers(new Keccak(blockLen, suffix, chooseLen(opts, outputLen), true), opts)
|
||||
);
|
||||
|
||||
// TODO: refactor
|
||||
export type ICShake = {
|
||||
(msg: Input, opts?: cShakeOpts): Uint8Array;
|
||||
outputLen: number;
|
||||
blockLen: number;
|
||||
create(opts: cShakeOpts): HashXOF<Keccak>;
|
||||
};
|
||||
export type ITupleHash = {
|
||||
(messages: Input[], opts?: cShakeOpts): Uint8Array;
|
||||
create(opts?: cShakeOpts): TupleHash;
|
||||
};
|
||||
export type IParHash = {
|
||||
(message: Input, opts?: ParallelOpts): Uint8Array;
|
||||
create(opts?: ParallelOpts): ParallelHash;
|
||||
};
|
||||
export const cshake128: ICShake = /* @__PURE__ */ (() => gencShake(0x1f, 168, 128 / 8))();
|
||||
export const cshake256: ICShake = /* @__PURE__ */ (() => gencShake(0x1f, 136, 256 / 8))();
|
||||
|
||||
export class KMAC extends Keccak implements HashXOF<KMAC> {
|
||||
constructor(
|
||||
blockLen: number,
|
||||
outputLen: number,
|
||||
enableXOF: boolean,
|
||||
key: Input,
|
||||
opts: cShakeOpts = {}
|
||||
) {
|
||||
super(blockLen, 0x1f, outputLen, enableXOF);
|
||||
cshakePers(this, { NISTfn: 'KMAC', personalization: opts.personalization });
|
||||
key = toBytes(key);
|
||||
abytes(key);
|
||||
// 1. newX = bytepad(encode_string(K), 168) || X || right_encode(L).
|
||||
const blockLenBytes = leftEncode(this.blockLen);
|
||||
const keyLen = leftEncode(_8n * BigInt(key.length));
|
||||
this.update(blockLenBytes).update(keyLen).update(key);
|
||||
const totalLen = blockLenBytes.length + keyLen.length + key.length;
|
||||
this.update(getPadding(totalLen, this.blockLen));
|
||||
}
|
||||
protected finish(): void {
|
||||
if (!this.finished) this.update(rightEncode(this.enableXOF ? 0 : _8n * BigInt(this.outputLen))); // outputLen in bits
|
||||
super.finish();
|
||||
}
|
||||
_cloneInto(to?: KMAC): KMAC {
|
||||
// Create new instance without calling constructor since key already in state and we don't know it.
|
||||
// Force "to" to be instance of KMAC instead of Sha3.
|
||||
if (!to) {
|
||||
to = Object.create(Object.getPrototypeOf(this), {}) as KMAC;
|
||||
to.state = this.state.slice();
|
||||
to.blockLen = this.blockLen;
|
||||
to.state32 = u32(to.state);
|
||||
}
|
||||
return super._cloneInto(to) as KMAC;
|
||||
}
|
||||
clone(): KMAC {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
|
||||
function genKmac(blockLen: number, outputLen: number, xof = false) {
|
||||
const kmac = (key: Input, message: Input, opts?: cShakeOpts): Uint8Array =>
|
||||
kmac.create(key, opts).update(message).digest();
|
||||
kmac.create = (key: Input, opts: cShakeOpts = {}) =>
|
||||
new KMAC(blockLen, chooseLen(opts, outputLen), xof, key, opts);
|
||||
return kmac;
|
||||
}
|
||||
|
||||
export const kmac128: {
|
||||
(key: Input, message: Input, opts?: cShakeOpts): Uint8Array;
|
||||
create(key: Input, opts?: cShakeOpts): KMAC;
|
||||
} = /* @__PURE__ */ (() => genKmac(168, 128 / 8))();
|
||||
export const kmac256: {
|
||||
(key: Input, message: Input, opts?: cShakeOpts): Uint8Array;
|
||||
create(key: Input, opts?: cShakeOpts): KMAC;
|
||||
} = /* @__PURE__ */ (() => genKmac(136, 256 / 8))();
|
||||
export const kmac128xof: {
|
||||
(key: Input, message: Input, opts?: cShakeOpts): Uint8Array;
|
||||
create(key: Input, opts?: cShakeOpts): KMAC;
|
||||
} = /* @__PURE__ */ (() => genKmac(168, 128 / 8, true))();
|
||||
export const kmac256xof: {
|
||||
(key: Input, message: Input, opts?: cShakeOpts): Uint8Array;
|
||||
create(key: Input, opts?: cShakeOpts): KMAC;
|
||||
} = /* @__PURE__ */ (() => genKmac(136, 256 / 8, true))();
|
||||
|
||||
// TupleHash
|
||||
// Usage: tuple(['ab', 'cd']) != tuple(['a', 'bcd'])
|
||||
export class TupleHash extends Keccak implements HashXOF<TupleHash> {
|
||||
constructor(blockLen: number, outputLen: number, enableXOF: boolean, opts: cShakeOpts = {}) {
|
||||
super(blockLen, 0x1f, outputLen, enableXOF);
|
||||
cshakePers(this, { NISTfn: 'TupleHash', personalization: opts.personalization });
|
||||
// Change update after cshake processed
|
||||
this.update = (data: Input) => {
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
super.update(leftEncode(_8n * BigInt(data.length)));
|
||||
super.update(data);
|
||||
return this;
|
||||
};
|
||||
}
|
||||
protected finish(): void {
|
||||
if (!this.finished)
|
||||
super.update(rightEncode(this.enableXOF ? 0 : _8n * BigInt(this.outputLen))); // outputLen in bits
|
||||
super.finish();
|
||||
}
|
||||
_cloneInto(to?: TupleHash): TupleHash {
|
||||
to ||= new TupleHash(this.blockLen, this.outputLen, this.enableXOF);
|
||||
return super._cloneInto(to) as TupleHash;
|
||||
}
|
||||
clone(): TupleHash {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
|
||||
function genTuple(blockLen: number, outputLen: number, xof = false) {
|
||||
const tuple = (messages: Input[], opts?: cShakeOpts): Uint8Array => {
|
||||
const h = tuple.create(opts);
|
||||
for (const msg of messages) h.update(msg);
|
||||
return h.digest();
|
||||
};
|
||||
tuple.create = (opts: cShakeOpts = {}) =>
|
||||
new TupleHash(blockLen, chooseLen(opts, outputLen), xof, opts);
|
||||
return tuple;
|
||||
}
|
||||
|
||||
/** 128-bit TupleHASH. */
|
||||
export const tuplehash128: ITupleHash = /* @__PURE__ */ (() => genTuple(168, 128 / 8))();
|
||||
/** 256-bit TupleHASH. */
|
||||
export const tuplehash256: ITupleHash = /* @__PURE__ */ (() => genTuple(136, 256 / 8))();
|
||||
/** 128-bit TupleHASH XOF. */
|
||||
export const tuplehash128xof: ITupleHash = /* @__PURE__ */ (() => genTuple(168, 128 / 8, true))();
|
||||
/** 256-bit TupleHASH XOF. */
|
||||
export const tuplehash256xof: ITupleHash = /* @__PURE__ */ (() => genTuple(136, 256 / 8, true))();
|
||||
|
||||
// ParallelHash (same as K12/M14, but without speedup for inputs less 8kb, reduced number of rounds and more simple)
|
||||
type ParallelOpts = cShakeOpts & { blockLen?: number };
|
||||
|
||||
export class ParallelHash extends Keccak implements HashXOF<ParallelHash> {
|
||||
private leafHash?: Hash<Keccak>;
|
||||
protected leafCons: () => Hash<Keccak>;
|
||||
private chunkPos = 0; // Position of current block in chunk
|
||||
private chunksDone = 0; // How many chunks we already have
|
||||
private chunkLen: number;
|
||||
constructor(
|
||||
blockLen: number,
|
||||
outputLen: number,
|
||||
leafCons: () => Hash<Keccak>,
|
||||
enableXOF: boolean,
|
||||
opts: ParallelOpts = {}
|
||||
) {
|
||||
super(blockLen, 0x1f, outputLen, enableXOF);
|
||||
cshakePers(this, { NISTfn: 'ParallelHash', personalization: opts.personalization });
|
||||
this.leafCons = leafCons;
|
||||
let { blockLen: B } = opts;
|
||||
B ||= 8;
|
||||
anumber(B);
|
||||
this.chunkLen = B;
|
||||
super.update(leftEncode(B));
|
||||
// Change update after cshake processed
|
||||
this.update = (data: Input) => {
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
const { chunkLen, leafCons } = this;
|
||||
for (let pos = 0, len = data.length; pos < len; ) {
|
||||
if (this.chunkPos == chunkLen || !this.leafHash) {
|
||||
if (this.leafHash) {
|
||||
super.update(this.leafHash.digest());
|
||||
this.chunksDone++;
|
||||
}
|
||||
this.leafHash = leafCons();
|
||||
this.chunkPos = 0;
|
||||
}
|
||||
const take = Math.min(chunkLen - this.chunkPos, len - pos);
|
||||
this.leafHash.update(data.subarray(pos, pos + take));
|
||||
this.chunkPos += take;
|
||||
pos += take;
|
||||
}
|
||||
return this;
|
||||
};
|
||||
}
|
||||
protected finish(): void {
|
||||
if (this.finished) return;
|
||||
if (this.leafHash) {
|
||||
super.update(this.leafHash.digest());
|
||||
this.chunksDone++;
|
||||
}
|
||||
super.update(rightEncode(this.chunksDone));
|
||||
super.update(rightEncode(this.enableXOF ? 0 : _8n * BigInt(this.outputLen))); // outputLen in bits
|
||||
super.finish();
|
||||
}
|
||||
_cloneInto(to?: ParallelHash): ParallelHash {
|
||||
to ||= new ParallelHash(this.blockLen, this.outputLen, this.leafCons, this.enableXOF);
|
||||
if (this.leafHash) to.leafHash = this.leafHash._cloneInto(to.leafHash as Keccak);
|
||||
to.chunkPos = this.chunkPos;
|
||||
to.chunkLen = this.chunkLen;
|
||||
to.chunksDone = this.chunksDone;
|
||||
return super._cloneInto(to) as ParallelHash;
|
||||
}
|
||||
destroy(): void {
|
||||
super.destroy.call(this);
|
||||
if (this.leafHash) this.leafHash.destroy();
|
||||
}
|
||||
clone(): ParallelHash {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
|
||||
function genPrl(
|
||||
blockLen: number,
|
||||
outputLen: number,
|
||||
leaf: ReturnType<typeof gencShake>,
|
||||
xof = false
|
||||
) {
|
||||
const parallel = (message: Input, opts?: ParallelOpts): Uint8Array =>
|
||||
parallel.create(opts).update(message).digest();
|
||||
parallel.create = (opts: ParallelOpts = {}) =>
|
||||
new ParallelHash(
|
||||
blockLen,
|
||||
chooseLen(opts, outputLen),
|
||||
() => leaf.create({ dkLen: 2 * outputLen }),
|
||||
xof,
|
||||
opts
|
||||
);
|
||||
return parallel;
|
||||
}
|
||||
|
||||
/** 128-bit ParallelHash. In JS, it is not parallel. */
|
||||
export const parallelhash128: IParHash = /* @__PURE__ */ (() => genPrl(168, 128 / 8, cshake128))();
|
||||
/** 256-bit ParallelHash. In JS, it is not parallel. */
|
||||
export const parallelhash256: IParHash = /* @__PURE__ */ (() => genPrl(136, 256 / 8, cshake256))();
|
||||
/** 128-bit ParallelHash XOF. In JS, it is not parallel. */
|
||||
export const parallelhash128xof: IParHash = /* @__PURE__ */ (() =>
|
||||
genPrl(168, 128 / 8, cshake128, true))();
|
||||
/** 256-bit ParallelHash. In JS, it is not parallel. */
|
||||
export const parallelhash256xof: IParHash = /* @__PURE__ */ (() =>
|
||||
genPrl(136, 256 / 8, cshake256, true))();
|
||||
|
||||
// Should be simple 'shake with 12 rounds', but no, we got whole new spec about Turbo SHAKE Pro MAX.
|
||||
export type TurboshakeOpts = ShakeOpts & {
|
||||
D?: number; // Domain separation byte
|
||||
};
|
||||
|
||||
const genTurboshake = (blockLen: number, outputLen: number) =>
|
||||
createXOFer<HashXOF<Keccak>, TurboshakeOpts>((opts: TurboshakeOpts = {}) => {
|
||||
const D = opts.D === undefined ? 0x1f : opts.D;
|
||||
// Section 2.1 of https://datatracker.ietf.org/doc/draft-irtf-cfrg-kangarootwelve/
|
||||
if (!Number.isSafeInteger(D) || D < 0x01 || D > 0x7f)
|
||||
throw new Error('invalid domain separation byte must be 0x01..0x7f, got: ' + D);
|
||||
return new Keccak(blockLen, D, opts.dkLen === undefined ? outputLen : opts.dkLen, true, 12);
|
||||
});
|
||||
|
||||
/** TurboSHAKE 128-bit: reduced 12-round keccak. */
|
||||
export const turboshake128: CHashXO = /* @__PURE__ */ genTurboshake(168, 256 / 8);
|
||||
/** TurboSHAKE 256-bit: reduced 12-round keccak. */
|
||||
export const turboshake256: CHashXO = /* @__PURE__ */ genTurboshake(136, 512 / 8);
|
||||
|
||||
// Kangaroo
|
||||
// Same as NIST rightEncode, but returns [0] for zero string
|
||||
function rightEncodeK12(n: number | bigint): Uint8Array {
|
||||
n = BigInt(n);
|
||||
const res: number[] = [];
|
||||
for (; n > 0; n >>= _8n) res.unshift(Number(n & _ffn));
|
||||
res.push(res.length);
|
||||
return Uint8Array.from(res);
|
||||
}
|
||||
|
||||
export type KangarooOpts = { dkLen?: number; personalization?: Input };
|
||||
const EMPTY_BUFFER = /* @__PURE__ */ Uint8Array.of();
|
||||
|
||||
export class KangarooTwelve extends Keccak implements HashXOF<KangarooTwelve> {
|
||||
readonly chunkLen = 8192;
|
||||
private leafHash?: Keccak;
|
||||
protected leafLen: number;
|
||||
private personalization: Uint8Array;
|
||||
private chunkPos = 0; // Position of current block in chunk
|
||||
private chunksDone = 0; // How many chunks we already have
|
||||
constructor(
|
||||
blockLen: number,
|
||||
leafLen: number,
|
||||
outputLen: number,
|
||||
rounds: number,
|
||||
opts: KangarooOpts
|
||||
) {
|
||||
super(blockLen, 0x07, outputLen, true, rounds);
|
||||
this.leafLen = leafLen;
|
||||
this.personalization = abytesOrZero(opts.personalization);
|
||||
}
|
||||
update(data: Input): this {
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
const { chunkLen, blockLen, leafLen, rounds } = this;
|
||||
for (let pos = 0, len = data.length; pos < len; ) {
|
||||
if (this.chunkPos == chunkLen) {
|
||||
if (this.leafHash) super.update(this.leafHash.digest());
|
||||
else {
|
||||
this.suffix = 0x06; // Its safe to change suffix here since its used only in digest()
|
||||
super.update(Uint8Array.from([3, 0, 0, 0, 0, 0, 0, 0]));
|
||||
}
|
||||
this.leafHash = new Keccak(blockLen, 0x0b, leafLen, false, rounds);
|
||||
this.chunksDone++;
|
||||
this.chunkPos = 0;
|
||||
}
|
||||
const take = Math.min(chunkLen - this.chunkPos, len - pos);
|
||||
const chunk = data.subarray(pos, pos + take);
|
||||
if (this.leafHash) this.leafHash.update(chunk);
|
||||
else super.update(chunk);
|
||||
this.chunkPos += take;
|
||||
pos += take;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
protected finish(): void {
|
||||
if (this.finished) return;
|
||||
const { personalization } = this;
|
||||
this.update(personalization).update(rightEncodeK12(personalization.length));
|
||||
// Leaf hash
|
||||
if (this.leafHash) {
|
||||
super.update(this.leafHash.digest());
|
||||
super.update(rightEncodeK12(this.chunksDone));
|
||||
super.update(Uint8Array.from([0xff, 0xff]));
|
||||
}
|
||||
super.finish.call(this);
|
||||
}
|
||||
destroy(): void {
|
||||
super.destroy.call(this);
|
||||
if (this.leafHash) this.leafHash.destroy();
|
||||
// We cannot zero personalization buffer since it is user provided and we don't want to mutate user input
|
||||
this.personalization = EMPTY_BUFFER;
|
||||
}
|
||||
_cloneInto(to?: KangarooTwelve): KangarooTwelve {
|
||||
const { blockLen, leafLen, leafHash, outputLen, rounds } = this;
|
||||
to ||= new KangarooTwelve(blockLen, leafLen, outputLen, rounds, {});
|
||||
super._cloneInto(to);
|
||||
if (leafHash) to.leafHash = leafHash._cloneInto(to.leafHash);
|
||||
to.personalization.set(this.personalization);
|
||||
to.leafLen = this.leafLen;
|
||||
to.chunkPos = this.chunkPos;
|
||||
to.chunksDone = this.chunksDone;
|
||||
return to;
|
||||
}
|
||||
clone(): KangarooTwelve {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
/** KangarooTwelve: reduced 12-round keccak. */
|
||||
export const k12: CHashO = /* @__PURE__ */ (() =>
|
||||
createOptHasher<KangarooTwelve, KangarooOpts>(
|
||||
(opts: KangarooOpts = {}) => new KangarooTwelve(168, 32, chooseLen(opts, 32), 12, opts)
|
||||
))();
|
||||
/** MarsupilamiFourteen: reduced 14-round keccak. */
|
||||
export const m14: CHashO = /* @__PURE__ */ (() =>
|
||||
createOptHasher<KangarooTwelve, KangarooOpts>(
|
||||
(opts: KangarooOpts = {}) => new KangarooTwelve(136, 64, chooseLen(opts, 64), 14, opts)
|
||||
))();
|
||||
|
||||
/**
|
||||
* More at https://github.com/XKCP/XKCP/tree/master/lib/high/Keccak/PRG.
|
||||
*/
|
||||
export class KeccakPRG extends Keccak {
|
||||
protected rate: number;
|
||||
constructor(capacity: number) {
|
||||
anumber(capacity);
|
||||
// Rho should be full bytes
|
||||
if (capacity < 0 || capacity > 1600 - 10 || (1600 - capacity - 2) % 8)
|
||||
throw new Error('invalid capacity');
|
||||
// blockLen = rho in bytes
|
||||
super((1600 - capacity - 2) / 8, 0, 0, true);
|
||||
this.rate = 1600 - capacity;
|
||||
this.posOut = Math.floor((this.rate + 7) / 8);
|
||||
}
|
||||
keccak(): void {
|
||||
// Duplex padding
|
||||
this.state[this.pos] ^= 0x01;
|
||||
this.state[this.blockLen] ^= 0x02; // Rho is full bytes
|
||||
super.keccak();
|
||||
this.pos = 0;
|
||||
this.posOut = 0;
|
||||
}
|
||||
update(data: Input): this {
|
||||
super.update(data);
|
||||
this.posOut = this.blockLen;
|
||||
return this;
|
||||
}
|
||||
feed(data: Input): this {
|
||||
return this.update(data);
|
||||
}
|
||||
protected finish(): void {}
|
||||
digestInto(_out: Uint8Array): Uint8Array {
|
||||
throw new Error('digest is not allowed, use .fetch instead');
|
||||
}
|
||||
fetch(bytes: number): Uint8Array {
|
||||
return this.xof(bytes);
|
||||
}
|
||||
// Ensure irreversibility (even if state leaked previous outputs cannot be computed)
|
||||
forget(): void {
|
||||
if (this.rate < 1600 / 2 + 1) throw new Error('rate is too low to use .forget()');
|
||||
this.keccak();
|
||||
for (let i = 0; i < this.blockLen; i++) this.state[i] = 0;
|
||||
this.pos = this.blockLen;
|
||||
this.keccak();
|
||||
this.posOut = this.blockLen;
|
||||
}
|
||||
_cloneInto(to?: KeccakPRG): KeccakPRG {
|
||||
const { rate } = this;
|
||||
to ||= new KeccakPRG(1600 - rate);
|
||||
super._cloneInto(to);
|
||||
to.rate = rate;
|
||||
return to;
|
||||
}
|
||||
clone(): KeccakPRG {
|
||||
return this._cloneInto();
|
||||
}
|
||||
}
|
||||
|
||||
/** KeccakPRG: Pseudo-random generator based on Keccak. https://keccak.team/files/CSF-0.1.pdf */
|
||||
export const keccakprg = (capacity = 254): KeccakPRG => new KeccakPRG(capacity);
|
||||
258
qwen/nodejs/node_modules/@noble/hashes/src/sha3.ts
generated
vendored
Normal file
258
qwen/nodejs/node_modules/@noble/hashes/src/sha3.ts
generated
vendored
Normal file
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* SHA3 (keccak) hash function, based on a new "Sponge function" design.
|
||||
* Different from older hashes, the internal state is bigger than output size.
|
||||
*
|
||||
* Check out [FIPS-202](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf),
|
||||
* [Website](https://keccak.team/keccak.html),
|
||||
* [the differences between SHA-3 and Keccak](https://crypto.stackexchange.com/questions/15727/what-are-the-key-differences-between-the-draft-sha-3-standard-and-the-keccak-sub).
|
||||
*
|
||||
* Check out `sha3-addons` module for cSHAKE, k12, and others.
|
||||
* @module
|
||||
*/
|
||||
import { rotlBH, rotlBL, rotlSH, rotlSL, split } from './_u64.ts';
|
||||
// prettier-ignore
|
||||
import {
|
||||
abytes, aexists, anumber, aoutput,
|
||||
clean, createHasher, createXOFer, Hash,
|
||||
swap32IfBE,
|
||||
toBytes, u32,
|
||||
type CHash, type CHashXO, type HashXOF, type Input
|
||||
} from './utils.ts';
|
||||
|
||||
// No __PURE__ annotations in sha3 header:
|
||||
// EVERYTHING is in fact used on every export.
|
||||
// Various per round constants calculations
|
||||
const _0n = BigInt(0);
|
||||
const _1n = BigInt(1);
|
||||
const _2n = BigInt(2);
|
||||
const _7n = BigInt(7);
|
||||
const _256n = BigInt(256);
|
||||
const _0x71n = BigInt(0x71);
|
||||
const SHA3_PI: number[] = [];
|
||||
const SHA3_ROTL: number[] = [];
|
||||
const _SHA3_IOTA: bigint[] = [];
|
||||
for (let round = 0, R = _1n, x = 1, y = 0; round < 24; round++) {
|
||||
// Pi
|
||||
[x, y] = [y, (2 * x + 3 * y) % 5];
|
||||
SHA3_PI.push(2 * (5 * y + x));
|
||||
// Rotational
|
||||
SHA3_ROTL.push((((round + 1) * (round + 2)) / 2) % 64);
|
||||
// Iota
|
||||
let t = _0n;
|
||||
for (let j = 0; j < 7; j++) {
|
||||
R = ((R << _1n) ^ ((R >> _7n) * _0x71n)) % _256n;
|
||||
if (R & _2n) t ^= _1n << ((_1n << /* @__PURE__ */ BigInt(j)) - _1n);
|
||||
}
|
||||
_SHA3_IOTA.push(t);
|
||||
}
|
||||
const IOTAS = split(_SHA3_IOTA, true);
|
||||
const SHA3_IOTA_H = IOTAS[0];
|
||||
const SHA3_IOTA_L = IOTAS[1];
|
||||
|
||||
// Left rotation (without 0, 32, 64)
|
||||
const rotlH = (h: number, l: number, s: number) => (s > 32 ? rotlBH(h, l, s) : rotlSH(h, l, s));
|
||||
const rotlL = (h: number, l: number, s: number) => (s > 32 ? rotlBL(h, l, s) : rotlSL(h, l, s));
|
||||
|
||||
/** `keccakf1600` internal function, additionally allows to adjust round count. */
|
||||
export function keccakP(s: Uint32Array, rounds: number = 24): void {
|
||||
const B = new Uint32Array(5 * 2);
|
||||
// NOTE: all indices are x2 since we store state as u32 instead of u64 (bigints to slow in js)
|
||||
for (let round = 24 - rounds; round < 24; round++) {
|
||||
// Theta θ
|
||||
for (let x = 0; x < 10; x++) B[x] = s[x] ^ s[x + 10] ^ s[x + 20] ^ s[x + 30] ^ s[x + 40];
|
||||
for (let x = 0; x < 10; x += 2) {
|
||||
const idx1 = (x + 8) % 10;
|
||||
const idx0 = (x + 2) % 10;
|
||||
const B0 = B[idx0];
|
||||
const B1 = B[idx0 + 1];
|
||||
const Th = rotlH(B0, B1, 1) ^ B[idx1];
|
||||
const Tl = rotlL(B0, B1, 1) ^ B[idx1 + 1];
|
||||
for (let y = 0; y < 50; y += 10) {
|
||||
s[x + y] ^= Th;
|
||||
s[x + y + 1] ^= Tl;
|
||||
}
|
||||
}
|
||||
// Rho (ρ) and Pi (π)
|
||||
let curH = s[2];
|
||||
let curL = s[3];
|
||||
for (let t = 0; t < 24; t++) {
|
||||
const shift = SHA3_ROTL[t];
|
||||
const Th = rotlH(curH, curL, shift);
|
||||
const Tl = rotlL(curH, curL, shift);
|
||||
const PI = SHA3_PI[t];
|
||||
curH = s[PI];
|
||||
curL = s[PI + 1];
|
||||
s[PI] = Th;
|
||||
s[PI + 1] = Tl;
|
||||
}
|
||||
// Chi (χ)
|
||||
for (let y = 0; y < 50; y += 10) {
|
||||
for (let x = 0; x < 10; x++) B[x] = s[y + x];
|
||||
for (let x = 0; x < 10; x++) s[y + x] ^= ~B[(x + 2) % 10] & B[(x + 4) % 10];
|
||||
}
|
||||
// Iota (ι)
|
||||
s[0] ^= SHA3_IOTA_H[round];
|
||||
s[1] ^= SHA3_IOTA_L[round];
|
||||
}
|
||||
clean(B);
|
||||
}
|
||||
|
||||
/** Keccak sponge function. */
|
||||
export class Keccak extends Hash<Keccak> implements HashXOF<Keccak> {
|
||||
protected state: Uint8Array;
|
||||
protected pos = 0;
|
||||
protected posOut = 0;
|
||||
protected finished = false;
|
||||
protected state32: Uint32Array;
|
||||
protected destroyed = false;
|
||||
|
||||
public blockLen: number;
|
||||
public suffix: number;
|
||||
public outputLen: number;
|
||||
protected enableXOF = false;
|
||||
protected rounds: number;
|
||||
|
||||
// NOTE: we accept arguments in bytes instead of bits here.
|
||||
constructor(
|
||||
blockLen: number,
|
||||
suffix: number,
|
||||
outputLen: number,
|
||||
enableXOF = false,
|
||||
rounds: number = 24
|
||||
) {
|
||||
super();
|
||||
this.blockLen = blockLen;
|
||||
this.suffix = suffix;
|
||||
this.outputLen = outputLen;
|
||||
this.enableXOF = enableXOF;
|
||||
this.rounds = rounds;
|
||||
// Can be passed from user as dkLen
|
||||
anumber(outputLen);
|
||||
// 1600 = 5x5 matrix of 64bit. 1600 bits === 200 bytes
|
||||
// 0 < blockLen < 200
|
||||
if (!(0 < blockLen && blockLen < 200))
|
||||
throw new Error('only keccak-f1600 function is supported');
|
||||
this.state = new Uint8Array(200);
|
||||
this.state32 = u32(this.state);
|
||||
}
|
||||
clone(): Keccak {
|
||||
return this._cloneInto();
|
||||
}
|
||||
protected keccak(): void {
|
||||
swap32IfBE(this.state32);
|
||||
keccakP(this.state32, this.rounds);
|
||||
swap32IfBE(this.state32);
|
||||
this.posOut = 0;
|
||||
this.pos = 0;
|
||||
}
|
||||
update(data: Input): this {
|
||||
aexists(this);
|
||||
data = toBytes(data);
|
||||
abytes(data);
|
||||
const { blockLen, state } = this;
|
||||
const len = data.length;
|
||||
for (let pos = 0; pos < len; ) {
|
||||
const take = Math.min(blockLen - this.pos, len - pos);
|
||||
for (let i = 0; i < take; i++) state[this.pos++] ^= data[pos++];
|
||||
if (this.pos === blockLen) this.keccak();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
protected finish(): void {
|
||||
if (this.finished) return;
|
||||
this.finished = true;
|
||||
const { state, suffix, pos, blockLen } = this;
|
||||
// Do the padding
|
||||
state[pos] ^= suffix;
|
||||
if ((suffix & 0x80) !== 0 && pos === blockLen - 1) this.keccak();
|
||||
state[blockLen - 1] ^= 0x80;
|
||||
this.keccak();
|
||||
}
|
||||
protected writeInto(out: Uint8Array): Uint8Array {
|
||||
aexists(this, false);
|
||||
abytes(out);
|
||||
this.finish();
|
||||
const bufferOut = this.state;
|
||||
const { blockLen } = this;
|
||||
for (let pos = 0, len = out.length; pos < len; ) {
|
||||
if (this.posOut >= blockLen) this.keccak();
|
||||
const take = Math.min(blockLen - this.posOut, len - pos);
|
||||
out.set(bufferOut.subarray(this.posOut, this.posOut + take), pos);
|
||||
this.posOut += take;
|
||||
pos += take;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
xofInto(out: Uint8Array): Uint8Array {
|
||||
// Sha3/Keccak usage with XOF is probably mistake, only SHAKE instances can do XOF
|
||||
if (!this.enableXOF) throw new Error('XOF is not possible for this instance');
|
||||
return this.writeInto(out);
|
||||
}
|
||||
xof(bytes: number): Uint8Array {
|
||||
anumber(bytes);
|
||||
return this.xofInto(new Uint8Array(bytes));
|
||||
}
|
||||
digestInto(out: Uint8Array): Uint8Array {
|
||||
aoutput(out, this);
|
||||
if (this.finished) throw new Error('digest() was already called');
|
||||
this.writeInto(out);
|
||||
this.destroy();
|
||||
return out;
|
||||
}
|
||||
digest(): Uint8Array {
|
||||
return this.digestInto(new Uint8Array(this.outputLen));
|
||||
}
|
||||
destroy(): void {
|
||||
this.destroyed = true;
|
||||
clean(this.state);
|
||||
}
|
||||
_cloneInto(to?: Keccak): Keccak {
|
||||
const { blockLen, suffix, outputLen, rounds, enableXOF } = this;
|
||||
to ||= new Keccak(blockLen, suffix, outputLen, enableXOF, rounds);
|
||||
to.state32.set(this.state32);
|
||||
to.pos = this.pos;
|
||||
to.posOut = this.posOut;
|
||||
to.finished = this.finished;
|
||||
to.rounds = rounds;
|
||||
// Suffix can change in cSHAKE
|
||||
to.suffix = suffix;
|
||||
to.outputLen = outputLen;
|
||||
to.enableXOF = enableXOF;
|
||||
to.destroyed = this.destroyed;
|
||||
return to;
|
||||
}
|
||||
}
|
||||
|
||||
const gen = (suffix: number, blockLen: number, outputLen: number) =>
|
||||
createHasher(() => new Keccak(blockLen, suffix, outputLen));
|
||||
|
||||
/** SHA3-224 hash function. */
|
||||
export const sha3_224: CHash = /* @__PURE__ */ (() => gen(0x06, 144, 224 / 8))();
|
||||
/** SHA3-256 hash function. Different from keccak-256. */
|
||||
export const sha3_256: CHash = /* @__PURE__ */ (() => gen(0x06, 136, 256 / 8))();
|
||||
/** SHA3-384 hash function. */
|
||||
export const sha3_384: CHash = /* @__PURE__ */ (() => gen(0x06, 104, 384 / 8))();
|
||||
/** SHA3-512 hash function. */
|
||||
export const sha3_512: CHash = /* @__PURE__ */ (() => gen(0x06, 72, 512 / 8))();
|
||||
|
||||
/** keccak-224 hash function. */
|
||||
export const keccak_224: CHash = /* @__PURE__ */ (() => gen(0x01, 144, 224 / 8))();
|
||||
/** keccak-256 hash function. Different from SHA3-256. */
|
||||
export const keccak_256: CHash = /* @__PURE__ */ (() => gen(0x01, 136, 256 / 8))();
|
||||
/** keccak-384 hash function. */
|
||||
export const keccak_384: CHash = /* @__PURE__ */ (() => gen(0x01, 104, 384 / 8))();
|
||||
/** keccak-512 hash function. */
|
||||
export const keccak_512: CHash = /* @__PURE__ */ (() => gen(0x01, 72, 512 / 8))();
|
||||
|
||||
export type ShakeOpts = { dkLen?: number };
|
||||
|
||||
const genShake = (suffix: number, blockLen: number, outputLen: number) =>
|
||||
createXOFer<HashXOF<Keccak>, ShakeOpts>(
|
||||
(opts: ShakeOpts = {}) =>
|
||||
new Keccak(blockLen, suffix, opts.dkLen === undefined ? outputLen : opts.dkLen, true)
|
||||
);
|
||||
|
||||
/** SHAKE128 XOF with 128-bit security. */
|
||||
export const shake128: CHashXO = /* @__PURE__ */ (() => genShake(0x1f, 168, 128 / 8))();
|
||||
/** SHAKE256 XOF with 256-bit security. */
|
||||
export const shake256: CHashXO = /* @__PURE__ */ (() => genShake(0x1f, 136, 256 / 8))();
|
||||
34
qwen/nodejs/node_modules/@noble/hashes/src/sha512.ts
generated
vendored
Normal file
34
qwen/nodejs/node_modules/@noble/hashes/src/sha512.ts
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
/**
|
||||
* SHA2-512 a.k.a. sha512 and sha384. It is slower than sha256 in js because u64 operations are slow.
|
||||
*
|
||||
* Check out [RFC 4634](https://datatracker.ietf.org/doc/html/rfc4634) and
|
||||
* [the paper on truncated SHA512/256](https://eprint.iacr.org/2010/548.pdf).
|
||||
* @module
|
||||
* @deprecated
|
||||
*/
|
||||
import {
|
||||
SHA384 as SHA384n,
|
||||
sha384 as sha384n,
|
||||
sha512_224 as sha512_224n,
|
||||
SHA512_224 as SHA512_224n,
|
||||
sha512_256 as sha512_256n,
|
||||
SHA512_256 as SHA512_256n,
|
||||
SHA512 as SHA512n,
|
||||
sha512 as sha512n,
|
||||
} from './sha2.ts';
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const SHA512: typeof SHA512n = SHA512n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const sha512: typeof sha512n = sha512n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const SHA384: typeof SHA384n = SHA384n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const sha384: typeof sha384n = sha384n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const SHA512_224: typeof SHA512_224n = SHA512_224n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const sha512_224: typeof sha512_224n = sha512_224n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const SHA512_256: typeof SHA512_256n = SHA512_256n;
|
||||
/** @deprecated Use import from `noble/hashes/sha2` module */
|
||||
export const sha512_256: typeof sha512_256n = sha512_256n;
|
||||
395
qwen/nodejs/node_modules/@noble/hashes/src/utils.ts
generated
vendored
Normal file
395
qwen/nodejs/node_modules/@noble/hashes/src/utils.ts
generated
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
/**
|
||||
* Utilities for hex, bytes, CSPRNG.
|
||||
* @module
|
||||
*/
|
||||
/*! noble-hashes - MIT License (c) 2022 Paul Miller (paulmillr.com) */
|
||||
|
||||
// We use WebCrypto aka globalThis.crypto, which exists in browsers and node.js 16+.
|
||||
// node.js versions earlier than v19 don't declare it in global scope.
|
||||
// For node.js, package.json#exports field mapping rewrites import
|
||||
// from `crypto` to `cryptoNode`, which imports native module.
|
||||
// Makes the utils un-importable in browsers without a bundler.
|
||||
// Once node.js 18 is deprecated (2025-04-30), we can just drop the import.
|
||||
import { crypto } from '@noble/hashes/crypto';
|
||||
|
||||
/** Checks if something is Uint8Array. Be careful: nodejs Buffer will return true. */
|
||||
export function isBytes(a: unknown): a is Uint8Array {
|
||||
return a instanceof Uint8Array || (ArrayBuffer.isView(a) && a.constructor.name === 'Uint8Array');
|
||||
}
|
||||
|
||||
/** Asserts something is positive integer. */
|
||||
export function anumber(n: number): void {
|
||||
if (!Number.isSafeInteger(n) || n < 0) throw new Error('positive integer expected, got ' + n);
|
||||
}
|
||||
|
||||
/** Asserts something is Uint8Array. */
|
||||
export function abytes(b: Uint8Array | undefined, ...lengths: number[]): void {
|
||||
if (!isBytes(b)) throw new Error('Uint8Array expected');
|
||||
if (lengths.length > 0 && !lengths.includes(b.length))
|
||||
throw new Error('Uint8Array expected of length ' + lengths + ', got length=' + b.length);
|
||||
}
|
||||
|
||||
/** Asserts something is hash */
|
||||
export function ahash(h: IHash): void {
|
||||
if (typeof h !== 'function' || typeof h.create !== 'function')
|
||||
throw new Error('Hash should be wrapped by utils.createHasher');
|
||||
anumber(h.outputLen);
|
||||
anumber(h.blockLen);
|
||||
}
|
||||
|
||||
/** Asserts a hash instance has not been destroyed / finished */
|
||||
export function aexists(instance: any, checkFinished = true): void {
|
||||
if (instance.destroyed) throw new Error('Hash instance has been destroyed');
|
||||
if (checkFinished && instance.finished) throw new Error('Hash#digest() has already been called');
|
||||
}
|
||||
|
||||
/** Asserts output is properly-sized byte array */
|
||||
export function aoutput(out: any, instance: any): void {
|
||||
abytes(out);
|
||||
const min = instance.outputLen;
|
||||
if (out.length < min) {
|
||||
throw new Error('digestInto() expects output buffer of length at least ' + min);
|
||||
}
|
||||
}
|
||||
|
||||
/** Generic type encompassing 8/16/32-byte arrays - but not 64-byte. */
|
||||
// prettier-ignore
|
||||
export type TypedArray = Int8Array | Uint8ClampedArray | Uint8Array |
|
||||
Uint16Array | Int16Array | Uint32Array | Int32Array;
|
||||
|
||||
/** Cast u8 / u16 / u32 to u8. */
|
||||
export function u8(arr: TypedArray): Uint8Array {
|
||||
return new Uint8Array(arr.buffer, arr.byteOffset, arr.byteLength);
|
||||
}
|
||||
|
||||
/** Cast u8 / u16 / u32 to u32. */
|
||||
export function u32(arr: TypedArray): Uint32Array {
|
||||
return new Uint32Array(arr.buffer, arr.byteOffset, Math.floor(arr.byteLength / 4));
|
||||
}
|
||||
|
||||
/** Zeroize a byte array. Warning: JS provides no guarantees. */
|
||||
export function clean(...arrays: TypedArray[]): void {
|
||||
for (let i = 0; i < arrays.length; i++) {
|
||||
arrays[i].fill(0);
|
||||
}
|
||||
}
|
||||
|
||||
/** Create DataView of an array for easy byte-level manipulation. */
|
||||
export function createView(arr: TypedArray): DataView {
|
||||
return new DataView(arr.buffer, arr.byteOffset, arr.byteLength);
|
||||
}
|
||||
|
||||
/** The rotate right (circular right shift) operation for uint32 */
|
||||
export function rotr(word: number, shift: number): number {
|
||||
return (word << (32 - shift)) | (word >>> shift);
|
||||
}
|
||||
|
||||
/** The rotate left (circular left shift) operation for uint32 */
|
||||
export function rotl(word: number, shift: number): number {
|
||||
return (word << shift) | ((word >>> (32 - shift)) >>> 0);
|
||||
}
|
||||
|
||||
/** Is current platform little-endian? Most are. Big-Endian platform: IBM */
|
||||
export const isLE: boolean = /* @__PURE__ */ (() =>
|
||||
new Uint8Array(new Uint32Array([0x11223344]).buffer)[0] === 0x44)();
|
||||
|
||||
/** The byte swap operation for uint32 */
|
||||
export function byteSwap(word: number): number {
|
||||
return (
|
||||
((word << 24) & 0xff000000) |
|
||||
((word << 8) & 0xff0000) |
|
||||
((word >>> 8) & 0xff00) |
|
||||
((word >>> 24) & 0xff)
|
||||
);
|
||||
}
|
||||
/** Conditionally byte swap if on a big-endian platform */
|
||||
export const swap8IfBE: (n: number) => number = isLE
|
||||
? (n: number) => n
|
||||
: (n: number) => byteSwap(n);
|
||||
|
||||
/** @deprecated */
|
||||
export const byteSwapIfBE: typeof swap8IfBE = swap8IfBE;
|
||||
/** In place byte swap for Uint32Array */
|
||||
export function byteSwap32(arr: Uint32Array): Uint32Array {
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
arr[i] = byteSwap(arr[i]);
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
export const swap32IfBE: (u: Uint32Array) => Uint32Array = isLE
|
||||
? (u: Uint32Array) => u
|
||||
: byteSwap32;
|
||||
|
||||
// Built-in hex conversion https://caniuse.com/mdn-javascript_builtins_uint8array_fromhex
|
||||
const hasHexBuiltin: boolean = /* @__PURE__ */ (() =>
|
||||
// @ts-ignore
|
||||
typeof Uint8Array.from([]).toHex === 'function' && typeof Uint8Array.fromHex === 'function')();
|
||||
|
||||
// Array where index 0xf0 (240) is mapped to string 'f0'
|
||||
const hexes = /* @__PURE__ */ Array.from({ length: 256 }, (_, i) =>
|
||||
i.toString(16).padStart(2, '0')
|
||||
);
|
||||
|
||||
/**
|
||||
* Convert byte array to hex string. Uses built-in function, when available.
|
||||
* @example bytesToHex(Uint8Array.from([0xca, 0xfe, 0x01, 0x23])) // 'cafe0123'
|
||||
*/
|
||||
export function bytesToHex(bytes: Uint8Array): string {
|
||||
abytes(bytes);
|
||||
// @ts-ignore
|
||||
if (hasHexBuiltin) return bytes.toHex();
|
||||
// pre-caching improves the speed 6x
|
||||
let hex = '';
|
||||
for (let i = 0; i < bytes.length; i++) {
|
||||
hex += hexes[bytes[i]];
|
||||
}
|
||||
return hex;
|
||||
}
|
||||
|
||||
// We use optimized technique to convert hex string to byte array
|
||||
const asciis = { _0: 48, _9: 57, A: 65, F: 70, a: 97, f: 102 } as const;
|
||||
function asciiToBase16(ch: number): number | undefined {
|
||||
if (ch >= asciis._0 && ch <= asciis._9) return ch - asciis._0; // '2' => 50-48
|
||||
if (ch >= asciis.A && ch <= asciis.F) return ch - (asciis.A - 10); // 'B' => 66-(65-10)
|
||||
if (ch >= asciis.a && ch <= asciis.f) return ch - (asciis.a - 10); // 'b' => 98-(97-10)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert hex string to byte array. Uses built-in function, when available.
|
||||
* @example hexToBytes('cafe0123') // Uint8Array.from([0xca, 0xfe, 0x01, 0x23])
|
||||
*/
|
||||
export function hexToBytes(hex: string): Uint8Array {
|
||||
if (typeof hex !== 'string') throw new Error('hex string expected, got ' + typeof hex);
|
||||
// @ts-ignore
|
||||
if (hasHexBuiltin) return Uint8Array.fromHex(hex);
|
||||
const hl = hex.length;
|
||||
const al = hl / 2;
|
||||
if (hl % 2) throw new Error('hex string expected, got unpadded hex of length ' + hl);
|
||||
const array = new Uint8Array(al);
|
||||
for (let ai = 0, hi = 0; ai < al; ai++, hi += 2) {
|
||||
const n1 = asciiToBase16(hex.charCodeAt(hi));
|
||||
const n2 = asciiToBase16(hex.charCodeAt(hi + 1));
|
||||
if (n1 === undefined || n2 === undefined) {
|
||||
const char = hex[hi] + hex[hi + 1];
|
||||
throw new Error('hex string expected, got non-hex character "' + char + '" at index ' + hi);
|
||||
}
|
||||
array[ai] = n1 * 16 + n2; // multiply first octet, e.g. 'a3' => 10*16+3 => 160 + 3 => 163
|
||||
}
|
||||
return array;
|
||||
}
|
||||
|
||||
/**
|
||||
* There is no setImmediate in browser and setTimeout is slow.
|
||||
* Call of async fn will return Promise, which will be fullfiled only on
|
||||
* next scheduler queue processing step and this is exactly what we need.
|
||||
*/
|
||||
export const nextTick = async (): Promise<void> => {};
|
||||
|
||||
/** Returns control to thread each 'tick' ms to avoid blocking. */
|
||||
export async function asyncLoop(
|
||||
iters: number,
|
||||
tick: number,
|
||||
cb: (i: number) => void
|
||||
): Promise<void> {
|
||||
let ts = Date.now();
|
||||
for (let i = 0; i < iters; i++) {
|
||||
cb(i);
|
||||
// Date.now() is not monotonic, so in case if clock goes backwards we return return control too
|
||||
const diff = Date.now() - ts;
|
||||
if (diff >= 0 && diff < tick) continue;
|
||||
await nextTick();
|
||||
ts += diff;
|
||||
}
|
||||
}
|
||||
|
||||
// Global symbols, but ts doesn't see them: https://github.com/microsoft/TypeScript/issues/31535
|
||||
declare const TextEncoder: any;
|
||||
declare const TextDecoder: any;
|
||||
|
||||
/**
|
||||
* Converts string to bytes using UTF8 encoding.
|
||||
* @example utf8ToBytes('abc') // Uint8Array.from([97, 98, 99])
|
||||
*/
|
||||
export function utf8ToBytes(str: string): Uint8Array {
|
||||
if (typeof str !== 'string') throw new Error('string expected');
|
||||
return new Uint8Array(new TextEncoder().encode(str)); // https://bugzil.la/1681809
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts bytes to string using UTF8 encoding.
|
||||
* @example bytesToUtf8(Uint8Array.from([97, 98, 99])) // 'abc'
|
||||
*/
|
||||
export function bytesToUtf8(bytes: Uint8Array): string {
|
||||
return new TextDecoder().decode(bytes);
|
||||
}
|
||||
|
||||
/** Accepted input of hash functions. Strings are converted to byte arrays. */
|
||||
export type Input = string | Uint8Array;
|
||||
/**
|
||||
* Normalizes (non-hex) string or Uint8Array to Uint8Array.
|
||||
* Warning: when Uint8Array is passed, it would NOT get copied.
|
||||
* Keep in mind for future mutable operations.
|
||||
*/
|
||||
export function toBytes(data: Input): Uint8Array {
|
||||
if (typeof data === 'string') data = utf8ToBytes(data);
|
||||
abytes(data);
|
||||
return data;
|
||||
}
|
||||
|
||||
/** KDFs can accept string or Uint8Array for user convenience. */
|
||||
export type KDFInput = string | Uint8Array;
|
||||
/**
|
||||
* Helper for KDFs: consumes uint8array or string.
|
||||
* When string is passed, does utf8 decoding, using TextDecoder.
|
||||
*/
|
||||
export function kdfInputToBytes(data: KDFInput): Uint8Array {
|
||||
if (typeof data === 'string') data = utf8ToBytes(data);
|
||||
abytes(data);
|
||||
return data;
|
||||
}
|
||||
|
||||
/** Copies several Uint8Arrays into one. */
|
||||
export function concatBytes(...arrays: Uint8Array[]): Uint8Array {
|
||||
let sum = 0;
|
||||
for (let i = 0; i < arrays.length; i++) {
|
||||
const a = arrays[i];
|
||||
abytes(a);
|
||||
sum += a.length;
|
||||
}
|
||||
const res = new Uint8Array(sum);
|
||||
for (let i = 0, pad = 0; i < arrays.length; i++) {
|
||||
const a = arrays[i];
|
||||
res.set(a, pad);
|
||||
pad += a.length;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
type EmptyObj = {};
|
||||
export function checkOpts<T1 extends EmptyObj, T2 extends EmptyObj>(
|
||||
defaults: T1,
|
||||
opts?: T2
|
||||
): T1 & T2 {
|
||||
if (opts !== undefined && {}.toString.call(opts) !== '[object Object]')
|
||||
throw new Error('options should be object or undefined');
|
||||
const merged = Object.assign(defaults, opts);
|
||||
return merged as T1 & T2;
|
||||
}
|
||||
|
||||
/** Hash interface. */
|
||||
export type IHash = {
|
||||
(data: Uint8Array): Uint8Array;
|
||||
blockLen: number;
|
||||
outputLen: number;
|
||||
create: any;
|
||||
};
|
||||
|
||||
/** For runtime check if class implements interface */
|
||||
export abstract class Hash<T extends Hash<T>> {
|
||||
abstract blockLen: number; // Bytes per block
|
||||
abstract outputLen: number; // Bytes in output
|
||||
abstract update(buf: Input): this;
|
||||
// Writes digest into buf
|
||||
abstract digestInto(buf: Uint8Array): void;
|
||||
abstract digest(): Uint8Array;
|
||||
/**
|
||||
* Resets internal state. Makes Hash instance unusable.
|
||||
* Reset is impossible for keyed hashes if key is consumed into state. If digest is not consumed
|
||||
* by user, they will need to manually call `destroy()` when zeroing is necessary.
|
||||
*/
|
||||
abstract destroy(): void;
|
||||
/**
|
||||
* Clones hash instance. Unsafe: doesn't check whether `to` is valid. Can be used as `clone()`
|
||||
* when no options are passed.
|
||||
* Reasons to use `_cloneInto` instead of clone: 1) performance 2) reuse instance => all internal
|
||||
* buffers are overwritten => causes buffer overwrite which is used for digest in some cases.
|
||||
* There are no guarantees for clean-up because it's impossible in JS.
|
||||
*/
|
||||
abstract _cloneInto(to?: T): T;
|
||||
// Safe version that clones internal state
|
||||
abstract clone(): T;
|
||||
}
|
||||
|
||||
/**
|
||||
* XOF: streaming API to read digest in chunks.
|
||||
* Same as 'squeeze' in keccak/k12 and 'seek' in blake3, but more generic name.
|
||||
* When hash used in XOF mode it is up to user to call '.destroy' afterwards, since we cannot
|
||||
* destroy state, next call can require more bytes.
|
||||
*/
|
||||
export type HashXOF<T extends Hash<T>> = Hash<T> & {
|
||||
xof(bytes: number): Uint8Array; // Read 'bytes' bytes from digest stream
|
||||
xofInto(buf: Uint8Array): Uint8Array; // read buf.length bytes from digest stream into buf
|
||||
};
|
||||
|
||||
/** Hash function */
|
||||
export type CHash = ReturnType<typeof createHasher>;
|
||||
/** Hash function with output */
|
||||
export type CHashO = ReturnType<typeof createOptHasher>;
|
||||
/** XOF with output */
|
||||
export type CHashXO = ReturnType<typeof createXOFer>;
|
||||
|
||||
/** Wraps hash function, creating an interface on top of it */
|
||||
export function createHasher<T extends Hash<T>>(
|
||||
hashCons: () => Hash<T>
|
||||
): {
|
||||
(msg: Input): Uint8Array;
|
||||
outputLen: number;
|
||||
blockLen: number;
|
||||
create(): Hash<T>;
|
||||
} {
|
||||
const hashC = (msg: Input): Uint8Array => hashCons().update(toBytes(msg)).digest();
|
||||
const tmp = hashCons();
|
||||
hashC.outputLen = tmp.outputLen;
|
||||
hashC.blockLen = tmp.blockLen;
|
||||
hashC.create = () => hashCons();
|
||||
return hashC;
|
||||
}
|
||||
|
||||
export function createOptHasher<H extends Hash<H>, T extends Object>(
|
||||
hashCons: (opts?: T) => Hash<H>
|
||||
): {
|
||||
(msg: Input, opts?: T): Uint8Array;
|
||||
outputLen: number;
|
||||
blockLen: number;
|
||||
create(opts?: T): Hash<H>;
|
||||
} {
|
||||
const hashC = (msg: Input, opts?: T): Uint8Array => hashCons(opts).update(toBytes(msg)).digest();
|
||||
const tmp = hashCons({} as T);
|
||||
hashC.outputLen = tmp.outputLen;
|
||||
hashC.blockLen = tmp.blockLen;
|
||||
hashC.create = (opts?: T) => hashCons(opts);
|
||||
return hashC;
|
||||
}
|
||||
|
||||
export function createXOFer<H extends HashXOF<H>, T extends Object>(
|
||||
hashCons: (opts?: T) => HashXOF<H>
|
||||
): {
|
||||
(msg: Input, opts?: T): Uint8Array;
|
||||
outputLen: number;
|
||||
blockLen: number;
|
||||
create(opts?: T): HashXOF<H>;
|
||||
} {
|
||||
const hashC = (msg: Input, opts?: T): Uint8Array => hashCons(opts).update(toBytes(msg)).digest();
|
||||
const tmp = hashCons({} as T);
|
||||
hashC.outputLen = tmp.outputLen;
|
||||
hashC.blockLen = tmp.blockLen;
|
||||
hashC.create = (opts?: T) => hashCons(opts);
|
||||
return hashC;
|
||||
}
|
||||
export const wrapConstructor: typeof createHasher = createHasher;
|
||||
export const wrapConstructorWithOpts: typeof createOptHasher = createOptHasher;
|
||||
export const wrapXOFConstructorWithOpts: typeof createXOFer = createXOFer;
|
||||
|
||||
/** Cryptographically secure PRNG. Uses internal OS-level `crypto.getRandomValues`. */
|
||||
export function randomBytes(bytesLength = 32): Uint8Array {
|
||||
if (crypto && typeof crypto.getRandomValues === 'function') {
|
||||
return crypto.getRandomValues(new Uint8Array(bytesLength));
|
||||
}
|
||||
// Legacy Node.js compatibility
|
||||
if (crypto && typeof crypto.randomBytes === 'function') {
|
||||
return Uint8Array.from(crypto.randomBytes(bytesLength));
|
||||
}
|
||||
throw new Error('crypto.getRandomValues must be defined');
|
||||
}
|
||||
Reference in New Issue
Block a user