Last active
November 8, 2021 20:57
-
-
Save AyaanZaveri/dc74e31899fd71b238e13c5ccfc0e3c0 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/*! For license information please see handtrack.min.js.LICENSE.txt */ | |
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.handTrack=t():e.handTrack=t()}(self,(function(){return(()=>{var e={377:(e,t,n)=>{var s=n(832),a=n(652),r=n(801),i=n(30),o=n(618),l=n(49),u=n(971);u.alea=s,u.xor128=a,u.xorwow=r,u.xorshift7=i,u.xor4096=o,u.tychei=l,e.exports=u},832:function(e,t,n){var s;!function(e,a,r){function i(e){var t,n=this,s=(t=4022871197,function(e){e=e.toString();for(var n=0;n<e.length;n++){var s=.02519603282416938*(t+=e.charCodeAt(n));s-=t=s>>>0,t=(s*=t)>>>0,t+=4294967296*(s-=t)}return 2.3283064365386963e-10*(t>>>0)});n.next=function(){var e=2091639*n.s0+2.3283064365386963e-10*n.c;return n.s0=n.s1,n.s1=n.s2,n.s2=e-(n.c=0|e)},n.c=1,n.s0=s(" "),n.s1=s(" "),n.s2=s(" "),n.s0-=s(e),n.s0<0&&(n.s0+=1),n.s1-=s(e),n.s1<0&&(n.s1+=1),n.s2-=s(e),n.s2<0&&(n.s2+=1),s=null}function o(e,t){return t.c=e.c,t.s0=e.s0,t.s1=e.s1,t.s2=e.s2,t}function l(e,t){var n=new i(e),s=t&&t.state,a=n.next;return a.int32=function(){return 4294967296*n.next()|0},a.double=function(){return a()+11102230246251565e-32*(2097152*a()|0)},a.quick=a,s&&("object"==typeof s&&o(s,n),a.state=function(){return o(n,{})}),a}a&&a.exports?a.exports=l:n.amdD&&n.amdO?void 0===(s=function(){return l}.call(t,n,t,a))||(a.exports=s):this.alea=l}(0,e=n.nmd(e),n.amdD)},49:function(e,t,n){var s;!function(e,a,r){function i(e){var t=this,n="";t.next=function(){var e=t.b,n=t.c,s=t.d,a=t.a;return e=e<<25^e>>>7^n,n=n-s|0,s=s<<24^s>>>8^a,a=a-e|0,t.b=e=e<<20^e>>>12^n,t.c=n=n-s|0,t.d=s<<16^n>>>16^a,t.a=a-e|0},t.a=0,t.b=0,t.c=-1640531527,t.d=1367130551,e===Math.floor(e)?(t.a=e/4294967296|0,t.b=0|e):n+=e;for(var s=0;s<n.length+20;s++)t.b^=0|n.charCodeAt(s),t.next()}function o(e,t){return t.a=e.a,t.b=e.b,t.c=e.c,t.d=e.d,t}function l(e,t){var n=new i(e),s=t&&t.state,a=function(){return(n.next()>>>0)/4294967296};return a.double=function(){do{var e=((n.next()>>>11)+(n.next()>>>0)/4294967296)/(1<<21)}while(0===e);return e},a.int32=n.next,a.quick=a,s&&("object"==typeof s&&o(s,n),a.state=function(){return o(n,{})}),a}a&&a.exports?a.exports=l:n.amdD&&n.amdO?void 0===(s=function(){return l}.call(t,n,t,a))||(a.exports=s):this.tychei=l}(0,e=n.nmd(e),n.amdD)},652:function(e,t,n){var s;!function(e,a,r){function i(e){var t=this,n="";t.x=0,t.y=0,t.z=0,t.w=0,t.next=function(){var e=t.x^t.x<<11;return t.x=t.y,t.y=t.z,t.z=t.w,t.w^=t.w>>>19^e^e>>>8},e===(0|e)?t.x=e:n+=e;for(var s=0;s<n.length+64;s++)t.x^=0|n.charCodeAt(s),t.next()}function o(e,t){return t.x=e.x,t.y=e.y,t.z=e.z,t.w=e.w,t}function l(e,t){var n=new i(e),s=t&&t.state,a=function(){return(n.next()>>>0)/4294967296};return a.double=function(){do{var e=((n.next()>>>11)+(n.next()>>>0)/4294967296)/(1<<21)}while(0===e);return e},a.int32=n.next,a.quick=a,s&&("object"==typeof s&&o(s,n),a.state=function(){return o(n,{})}),a}a&&a.exports?a.exports=l:n.amdD&&n.amdO?void 0===(s=function(){return l}.call(t,n,t,a))||(a.exports=s):this.xor128=l}(0,e=n.nmd(e),n.amdD)},618:function(e,t,n){var s;!function(e,a,r){function i(e){var t=this;t.next=function(){var e,n,s=t.w,a=t.X,r=t.i;return t.w=s=s+1640531527|0,n=a[r+34&127],e=a[r=r+1&127],n^=n<<13,e^=e<<17,n^=n>>>15,e^=e>>>12,n=a[r]=n^e,t.i=r,n+(s^s>>>16)|0},function(e,t){var n,s,a,r,i,o=[],l=128;for(t===(0|t)?(s=t,t=null):(t+="\0",s=0,l=Math.max(l,t.length)),a=0,r=-32;r<l;++r)t&&(s^=t.charCodeAt((r+32)%t.length)),0===r&&(i=s),s^=s<<10,s^=s>>>15,s^=s<<4,s^=s>>>13,r>=0&&(i=i+1640531527|0,a=0==(n=o[127&r]^=s+i)?a+1:0);for(a>=128&&(o[127&(t&&t.length||0)]=-1),a=127,r=512;r>0;--r)s=o[a+34&127],n=o[a=a+1&127],s^=s<<13,n^=n<<17,s^=s>>>15,n^=n>>>12,o[a]=s^n;e.w=i,e.X=o,e.i=a}(t,e)}function o(e,t){return t.i=e.i,t.w=e.w,t.X=e.X.slice(),t}function l(e,t){null==e&&(e=+new Date);var n=new i(e),s=t&&t.state,a=function(){return(n.next()>>>0)/4294967296};return a.double=function(){do{var e=((n.next()>>>11)+(n.next()>>>0)/4294967296)/(1<<21)}while(0===e);return e},a.int32=n.next,a.quick=a,s&&(s.X&&o(s,n),a.state=function(){return o(n,{})}),a}a&&a.exports?a.exports=l:n.amdD&&n.amdO?void 0===(s=function(){return l}.call(t,n,t,a))||(a.exports=s):this.xor4096=l}(0,e=n.nmd(e),n.amdD)},30:function(e,t,n){var s;!function(e,a,r){function i(e){var t=this;t.next=function(){var e,n,s=t.x,a=t.i;return e=s[a],n=(e^=e>>>7)^e<<24,n^=(e=s[a+1&7])^e>>>10,n^=(e=s[a+3&7])^e>>>3,n^=(e=s[a+4&7])^e<<7,e=s[a+7&7],n^=(e^=e<<13)^e<<9,s[a]=n,t.i=a+1&7,n},function(e,t){var n,s=[];if(t===(0|t))s[0]=t;else for(t=""+t,n=0;n<t.length;++n)s[7&n]=s[7&n]<<15^t.charCodeAt(n)+s[n+1&7]<<13;for(;s.length<8;)s.push(0);for(n=0;n<8&&0===s[n];++n);for(8==n?s[7]=-1:s[n],e.x=s,e.i=0,n=256;n>0;--n)e.next()}(t,e)}function o(e,t){return t.x=e.x.slice(),t.i=e.i,t}function l(e,t){null==e&&(e=+new Date);var n=new i(e),s=t&&t.state,a=function(){return(n.next()>>>0)/4294967296};return a.double=function(){do{var e=((n.next()>>>11)+(n.next()>>>0)/4294967296)/(1<<21)}while(0===e);return e},a.int32=n.next,a.quick=a,s&&(s.x&&o(s,n),a.state=function(){return o(n,{})}),a}a&&a.exports?a.exports=l:n.amdD&&n.amdO?void 0===(s=function(){return l}.call(t,n,t,a))||(a.exports=s):this.xorshift7=l}(0,e=n.nmd(e),n.amdD)},801:function(e,t,n){var s;!function(e,a,r){function i(e){var t=this,n="";t.next=function(){var e=t.x^t.x>>>2;return t.x=t.y,t.y=t.z,t.z=t.w,t.w=t.v,(t.d=t.d+362437|0)+(t.v=t.v^t.v<<4^e^e<<1)|0},t.x=0,t.y=0,t.z=0,t.w=0,t.v=0,e===(0|e)?t.x=e:n+=e;for(var s=0;s<n.length+64;s++)t.x^=0|n.charCodeAt(s),s==n.length&&(t.d=t.x<<10^t.x>>>4),t.next()}function o(e,t){return t.x=e.x,t.y=e.y,t.z=e.z,t.w=e.w,t.v=e.v,t.d=e.d,t}function l(e,t){var n=new i(e),s=t&&t.state,a=function(){return(n.next()>>>0)/4294967296};return a.double=function(){do{var e=((n.next()>>>11)+(n.next()>>>0)/4294967296)/(1<<21)}while(0===e);return e},a.int32=n.next,a.quick=a,s&&("object"==typeof s&&o(s,n),a.state=function(){return o(n,{})}),a}a&&a.exports?a.exports=l:n.amdD&&n.amdO?void 0===(s=function(){return l}.call(t,n,t,a))||(a.exports=s):this.xorwow=l}(0,e=n.nmd(e),n.amdD)},971:(e,t,n)=>{var s;!function(a,r){var i,o=this,l=256,u=r.pow(l,6),c=r.pow(2,52),h=2*c,p=255;function d(e,t,n){var s=[],p=y(g((t=1==t?{entropy:!0}:t||{}).entropy?[e,b(a)]:null==e?function(){try{var e;return i&&(e=i.randomBytes)?e=e(l):(e=new Uint8Array(l),(o.crypto||o.msCrypto).getRandomValues(e)),b(e)}catch(e){var t=o.navigator,n=t&&t.plugins;return[+new Date,o,n,o.screen,b(a)]}}():e,3),s),d=new f(s),x=function(){for(var e=d.g(6),t=u,n=0;e<c;)e=(e+n)*l,t*=l,n=d.g(1);for(;e>=h;)e/=2,t/=2,n>>>=1;return(e+n)/t};return x.int32=function(){return 0|d.g(4)},x.quick=function(){return d.g(4)/4294967296},x.double=x,y(b(d.S),a),(t.pass||n||function(e,t,n,s){return s&&(s.S&&m(s,d),e.state=function(){return m(d,{})}),n?(r.random=e,t):e})(x,p,"global"in t?t.global:this==r,t.state)}function f(e){var t,n=e.length,s=this,a=0,r=s.i=s.j=0,i=s.S=[];for(n||(e=[n++]);a<l;)i[a]=a++;for(a=0;a<l;a++)i[a]=i[r=p&r+e[a%n]+(t=i[a])],i[r]=t;(s.g=function(e){for(var t,n=0,a=s.i,r=s.j,i=s.S;e--;)t=i[a=p&a+1],n=n*l+i[p&(i[a]=i[r=p&r+t])+(i[r]=t)];return s.i=a,s.j=r,n})(l)}function m(e,t){return t.i=e.i,t.j=e.j,t.S=e.S.slice(),t}function g(e,t){var n,s=[],a=typeof e;if(t&&"object"==a)for(n in e)try{s.push(g(e[n],t-1))}catch(e){}return s.length?s:"string"==a?e:e+"\0"}function y(e,t){for(var n,s=e+"",a=0;a<s.length;)t[p&a]=p&(n^=19*t[p&a])+s.charCodeAt(a++);return b(t)}function b(e){return String.fromCharCode.apply(0,e)}if(r.seedrandom=d,y(r.random(),a),e.exports){e.exports=d;try{i=n(42)}catch(e){}}else void 0===(s=function(){return d}.call(t,n,t,e))||(e.exports=s)}([],Math)},410:()=>{},628:()=>{},601:()=>{},792:()=>{},42:()=>{}},t={};function n(s){var a=t[s];if(void 0!==a)return a.exports;var r=t[s]={id:s,loaded:!1,exports:{}};return e[s].call(r.exports,r,r.exports,n),r.loaded=!0,r.exports}n.amdD=function(){throw new Error("define cannot be used indirect")},n.amdO={},n.d=(e,t)=>{for(var s in t)n.o(t,s)&&!n.o(e,s)&&Object.defineProperty(e,s,{enumerable:!0,get:t[s]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),n.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.nmd=e=>(e.paths=[],e.children||(e.children=[]),e);var s={};return(()=>{"use strict";n.r(s),n.d(s,{ObjectDetection:()=>_D,colorMap:()=>ED,load:()=>RD,startVideo:()=>FD,stopVideo:()=>DD,version:()=>CD});var e={};n.r(e),n.d(e,{assertParamsValid:()=>rr,computeFlatOffset:()=>br,computeOutShape:()=>or,getNormalizedAxes:()=>hr,isSliceContinous:()=>yr,maskToAxes:()=>ir,parseSliceParams:()=>xr,sliceInfo:()=>wr,startForAxis:()=>mr,startIndicesWithElidedDims:()=>pr,stopForAxis:()=>gr,stopIndicesWithElidedDims:()=>dr,stridesForAxis:()=>fr,stridesWithElidedDims:()=>lr});var t={};n.r(t),n.d(t,{collectGatherOpShapeInfo:()=>zu,computeOutShape:()=>Lu,segOpComputeOptimalWindowSize:()=>Mu});var a={};n.r(a),n.d(a,{ERF_A1:()=>wu,ERF_A2:()=>ku,ERF_A3:()=>vu,ERF_A4:()=>Nu,ERF_A5:()=>Iu,ERF_P:()=>xu,PARALLELIZE_THRESHOLD:()=>ru,SELU_SCALE:()=>bu,SELU_SCALEALPHA:()=>yu,applyActivation:()=>el,assertAndGetBroadcastShape:()=>vi,assertAxesAreInnerMostDims:()=>Ji,assertParamsConsistent:()=>su,assignToTypedArray:()=>Fu,axesAreInnerMostDims:()=>Ki,calculateShapes:()=>gu,combineLocations:()=>qi,complexWithEvenIndex:()=>Eu,complexWithOddIndex:()=>Au,computeConv2DInfo:()=>Vr,computeConv3DInfo:()=>Ur,computeDefaultPad:()=>Gr,computeDilation2DInfo:()=>Pr,computeOptimalWindowSize:()=>iu,computeOutAndReduceShapes:()=>Xi,computeOutShape:()=>au,computePool2DInfo:()=>Br,computePool3DInfo:()=>Wr,convertConv2DDataFormat:()=>Jr,eitherStridesOrDilationsAreOne:()=>Yr,expandShapeToKeepDim:()=>Yi,exponent:()=>_u,exponents:()=>Du,fromStringArrayToUint8:()=>Bu,fromUint8ToStringArray:()=>Pu,getAxesPermutation:()=>Zi,getBroadcastDims:()=>wi,getComplexWithIndex:()=>Ru,getFusedBiasGradient:()=>Qo,getFusedDyActivation:()=>Zo,getImageCenter:()=>ou,getInnerMostAxes:()=>eo,getPermuted:()=>uu,getReductionAxes:()=>ki,getReshaped:()=>lu,getReshapedPermuted:()=>cu,getSliceBeginCoords:()=>hu,getSliceSize:()=>pu,getUndoAxesPermutation:()=>Qi,log:()=>Cu,mergeRealAndImagArrays:()=>Su,prepareAndValidate:()=>du,prepareSplitSize:()=>Ou,segment_util:()=>t,shouldFuse:()=>tl,slice_util:()=>e,splitRealAndImagArrays:()=>Tu,tupleValuesAreOne:()=>Xr,upcastType:()=>Vs,validateInput:()=>mu,validateUpdateShape:()=>fu,warn:()=>$u});var r={};n.r(r),n.d(r,{json:()=>Jy});var i={};n.r(i),n.d(i,{json:()=>Zy});var o={};n.r(o),n.d(o,{json:()=>Qy});var l={};n.r(l),n.d(l,{json:()=>eb});var u={};n.r(u),n.d(u,{json:()=>tb});var c={};n.r(c),n.d(c,{json:()=>nb});var h={};n.r(h),n.d(h,{json:()=>sb});var p={};n.r(p),n.d(p,{json:()=>ab});var d={};n.r(d),n.d(d,{json:()=>rb});var f={};n.r(f),n.d(f,{json:()=>ib});var m={};n.r(m),n.d(m,{json:()=>ob});var g={};n.r(g),n.d(g,{json:()=>lb});var y={};n.r(y),n.d(y,{json:()=>ub});var b={};n.r(b),n.d(b,{json:()=>cb});var x={};n.r(x),n.d(x,{json:()=>hb});var w={};n.r(w),n.d(w,{json:()=>pb});var k={};n.r(k),n.d(k,{json:()=>db});var v={};n.r(v),n.d(v,{addImpl:()=>pw,bincountImpl:()=>ak,bincountReduceImpl:()=>rk,ceilImpl:()=>lk,concatImpl:()=>fk,expImpl:()=>ev,expm1Impl:()=>rv,floorImpl:()=>Cv,gatherV2Impl:()=>Ov,greaterImpl:()=>Lv,lessImpl:()=>Yv,linSpaceImpl:()=>nN,logImpl:()=>aN,maxImpl:()=>xN,maximumImpl:()=>vN,minimumImpl:()=>_N,multiplyImpl:()=>lv,negImpl:()=>GN,notEqualImpl:()=>ZN,prodImpl:()=>hI,rangeImpl:()=>dI,rsqrtImpl:()=>$I,simpleAbsImpl:()=>kw,sliceImpl:()=>ek,squaredDifferenceImpl:()=>ZI,stridedSliceImpl:()=>s$,subImpl:()=>mv,tileImpl:()=>l$,topKImpl:()=>u$,transposeImpl:()=>Tw,uniqueImpl:()=>f$});class N{constructor(e,t){this.backend=e,this.dataMover=t,this.data=new WeakMap,this.dataIdsCount=0}get(e){return this.data.has(e)||this.dataMover.moveData(this.backend,e),this.data.get(e)}set(e,t){this.dataIdsCount++,this.data.set(e,t)}has(e){return this.data.has(e)}delete(e){return this.dataIdsCount--,this.data.delete(e)}numDataIds(){return this.dataIdsCount}}class I{refCount(e){return $("refCount")}incRef(e){return $("incRef")}timerAvailable(){return!0}time(e){return $("time")}read(e){return $("read")}readSync(e){return $("readSync")}numDataIds(){return $("numDataIds")}disposeData(e,t){return $("disposeData")}write(e,t,n){return $("write")}move(e,t,n,s,a){return $("move")}memory(){return $("memory")}floatPrecision(){return $("floatPrecision")}epsilon(){return 32===this.floatPrecision()?1e-7:1e-4}dispose(){return $("dispose")}}function $(e){throw new Error(`'${e}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function C(e){let t=e.length,n=0,s=0;for(;t>0;)s=Math.random()*t|0,t--,n=e[t],e[t]=e[s],e[s]=n}function S(e,t,n){return Math.max(e,Math.min(t,n))}function T(e){return e%2==0?e:e+1}function E(e,t){if(!e)throw new Error("string"==typeof t?t:t())}function A(e,t,n=""){E(_(e,t),(()=>n+` Shapes ${e} and ${t} must match`))}function R(e){E(null!=e,(()=>"The input to the tensor constructor must be a non-null value."))}function F(e,t=[],n=!1){if(null==t&&(t=[]),Array.isArray(e)||H(e)&&!n)for(let s=0;s<e.length;++s)F(e[s],t,n);else t.push(e);return t}function D(e){if(0===e.length)return 1;let t=e[0];for(let n=1;n<e.length;n++)t*=e[n];return t}function _(e,t){if(e===t)return!0;if(null==e||null==t)return!1;if(e.length!==t.length)return!1;for(let n=0;n<e.length;n++)if(e[n]!==t[n])return!1;return!0}function O(e){return e%1==0}function M(e){const t=Math.ceil(Math.sqrt(e));return[t,Math.ceil(e/t)]}function L(e,t){return t<=e.length?e:e+" ".repeat(t-e.length)}function z(e,t=(e=>0),n){return new Promise(((s,a)=>{let r=0;const i=()=>{if(e())return void s();r++;const o=t(r);null!=n&&r>=n?a():setTimeout(i,o)};i()}))}function P(e,t){let n=1,s=-1;for(let t=0;t<e.length;++t)if(e[t]>=0)n*=e[t];else if(-1===e[t]){if(-1!==s)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${s} and dim ${t}`);s=t}else if(e[t]<0)throw Error(`Shapes can not be < 0. Found ${e[t]} at dim ${t}`);if(-1===s){if(t>0&&t!==n)throw Error(`Size(${t}) must match the product of shape ${e}`);return e}if(0===n)throw Error(`Cannot infer the missing size in [${e}] when there are 0 elements`);if(t%n!=0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${n}`);const a=e.slice();return a[s]=t/n,a}function B(e,t){const n=t.length;return E((e=null==e?t.map(((e,t)=>t)):[].concat(e)).every((e=>e>=-n&&e<n)),(()=>`All values in axis param must be in range [-${n}, ${n}) but got axis ${e}`)),E(e.every((e=>O(e))),(()=>`All values in axis param must be integers but got axis ${e}`)),e.map((e=>e<0?n+e:e))}function W(e,t){const n=[],s=[],a=null!=t&&Array.isArray(t)&&0===t.length,r=null==t||a?null:B(t,e).sort();let i=0;for(let t=0;t<e.length;++t){if(null!=r){if(r[i]===t&&1!==e[t])throw new Error(`Can't squeeze axis ${t} since its dim '${e[t]}' is not 1`);(null==r[i]||r[i]>t)&&1===e[t]&&(n.push(e[t]),s.push(t)),r[i]<=t&&i++}1!==e[t]&&(n.push(e[t]),s.push(t))}return{newShape:n,keptDims:s}}function V(e,t){let n=null;if(null==e||"float32"===e)n=new Float32Array(t);else if("int32"===e)n=new Int32Array(t);else{if("bool"!==e)throw new Error(`Unknown data type ${e}`);n=new Uint8Array(t)}return n}function U(e,t){let n=null;if(null==e||"float32"===e)n=new Float32Array(t);else if("int32"===e)n=new Int32Array(t);else if("bool"===e)n=new Uint8Array(t);else{if("string"!==e)throw new Error(`Unknown data type ${e}`);n=new Array(t)}return n}function G(e,t){return!("complex64"===t||"float32"===t&&"complex64"!==e||"int32"===t&&"float32"!==e&&"complex64"!==e||"bool"===t&&"bool"===e)}function H(e){return e instanceof Float32Array||e instanceof Int32Array||e instanceof Uint8Array}function j(e){if("float32"===e||"int32"===e)return 4;if("complex64"===e)return 8;if("bool"===e)return 1;throw new Error(`Unknown dtype ${e}`)}function K(e){return"string"==typeof e||e instanceof String}function q(e){return"number"==typeof e}function X(e){return Array.isArray(e)?X(e[0]):e instanceof Float32Array?"float32":e instanceof Int32Array||e instanceof Uint8Array?"int32":q(e)?"float32":K(e)?"string":"boolean"==typeof e?"bool":"float32"}function Y(e){return!!(e&&e.constructor&&e.call&&e.apply)}function J(e,t){for(let n=t;n<e;++n)if(e%n==0)return n;return e}function Z(e){const t=e.length;if(t<2)return[];const n=new Array(t-1);n[t-2]=e[t-1];for(let s=t-3;s>=0;--s)n[s]=n[s+1]*e[s+1];return n}function Q(e,t,n){const s=new Array;if(1===t.length){const a=t[0];for(let t=0;t<a;t++)s[t]=n[e+t]}else{const a=t[0],r=t.slice(1),i=r.reduce(((e,t)=>e*t));for(let t=0;t<a;t++)s[t]=Q(e+t*i,r,n)}return s}function ee(e,t){if(0===e.length)return t[0];const n=e.reduce(((e,t)=>e*t));if(0===n)return[];if(n!==t.length)throw new Error(`[${e}] does not match the input size ${t.length}.`);return Q(0,e,t)}function te(e,t){const n=ne(e,t);for(let e=0;e<n.length;e++)n[e]=1;return n}function ne(e,t){if(null==t||"float32"===t||"complex64"===t)return new Float32Array(e);if("int32"===t)return new Int32Array(e);if("bool"===t)return new Uint8Array(e);throw new Error(`Unknown data type ${t}`)}function se(e,t){const n=e.reduce(((e,t)=>e*t),1);if(null==t||"float32"===t)return ee(e,new Float32Array(n));if("int32"===t)return ee(e,new Int32Array(n));if("bool"===t)return ee(e,new Uint8Array(n));throw new Error(`Unknown data type ${t}`)}function ae(e){e.forEach((t=>{E(Number.isInteger(t)&&t>=0,(()=>`Tensor must have a shape comprised of positive integers but got shape [${e}].`))}))}function re(e,t,n){if(0===t)return 0;if(1===t)return e[0];let s=e[e.length-1];for(let t=0;t<e.length-1;++t)s+=n[t]*e[t];return s}function ie(e,t,n){if(0===t)return[];if(1===t)return[e];const s=new Array(t);for(let t=0;t<s.length-1;++t)s[t]=Math.floor(e/n[t]),e-=s[t]*n[t];return s[s.length-1]=e,s}function oe(e){return e&&e.then&&"function"==typeof e.then}class le{constructor(e){this.global=e,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(e,t){null!=this.platform&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${t}.`),this.platformName=e,this.platform=t}registerFlag(e,t,n){if(this.flagRegistry[e]={evaluationFn:t,setHook:n},null!=this.urlFlags[e]){const t=this.urlFlags[e];console.warn(`Setting feature override from URL ${e}: ${t}.`),this.set(e,t)}}async getAsync(e){return e in this.flags||(this.flags[e]=await this.evaluateFlag(e)),this.flags[e]}get(e){if(e in this.flags)return this.flags[e];const t=this.evaluateFlag(e);if(oe(t))throw new Error(`Flag ${e} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[e]=t,this.flags[e]}getNumber(e){return this.get(e)}getBool(e){return this.get(e)}getFlags(){return this.flags}get features(){return this.flags}set(e,t){if(null==this.flagRegistry[e])throw new Error(`Cannot set flag ${e} as it has not been registered.`);this.flags[e]=t,null!=this.flagRegistry[e].setHook&&this.flagRegistry[e].setHook(t)}evaluateFlag(e){if(null==this.flagRegistry[e])throw new Error(`Cannot evaluate flag '${e}': no evaluation function found.`);return this.flagRegistry[e].evaluationFn()}setFlags(e){this.flags=Object.assign({},e)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(void 0===this.global||void 0===this.global.location||void 0===this.global.location.search)return;const e=function(e){const t={};return e.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,((e,...n)=>(function(e,t,n){e[decodeURIComponent(t)]=decodeURIComponent(n||"")}(t,n[0],n[1]),n.join("=")))),t}(this.global.location.search);"tfjsflags"in e&&e.tfjsflags.split(",").forEach((e=>{const[t,n]=e.split(":");this.urlFlags[t]=function(e,t){if("true"===(t=t.toLowerCase())||"false"===t)return"true"===t;if(""+ +t===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${e}.`)}(t,n)}))}}function ue(){return he}let ce,he=null;function pe(){if(null==ce){let e;if("undefined"!=typeof window)e=window;else if(void 0!==n.g)e=n.g;else if("undefined"!=typeof process)e=process;else{if("undefined"==typeof self)throw new Error("Could not find a global object");e=self}ce=e}return ce}function de(e,t){const n=function(){const e=pe();return null==e._tfGlobals&&(e._tfGlobals=new Map),e._tfGlobals}();if(n.has(e))return n.get(e);{const s=t();return n.set(e,s),n.get(e)}}const fe="Abs",me="Acos",ge="Acosh",ye="Add",be="AddN",xe="All",we="Any",ke="ArgMax",ve="ArgMin",Ne="Asin",Ie="Asinh",$e="Atan",Ce="Atanh",Se="Atan2",Te="AvgPool",Ee="AvgPoolGrad",Ae="AvgPool3D",Re="AvgPool3DGrad",Fe="BatchMatMul",De="BatchToSpaceND",_e="Bincount",Oe="Cast",Me="Ceil",Le="ClipByValue",ze="Complex",Pe="ComplexAbs",Be="Concat",We="Conv2D",Ve="Conv2DBackpropFilter",Ue="Conv2DBackpropInput",Ge="Conv3D",He="Conv3DBackpropFilterV2",je="Conv3DBackpropInputV2",Ke="Cos",qe="Cosh",Xe="Cumsum",Ye="CropAndResize",Je="DenseBincount",Ze="DepthToSpace",Qe="DepthwiseConv2dNative",et="DepthwiseConv2dNativeBackpropFilter",tt="DepthwiseConv2dNativeBackpropInput",nt="Diag",st="Dilation2D",at="Dilation2DBackpropInput",rt="Dilation2DBackpropFilter",it="RealDiv",ot="Elu",lt="EluGrad",ut="Erf",ct="Equal",ht="Exp",pt="ExpandDims",dt="Expm1",ft="FFT",mt="Fill",gt="FlipLeftRight",yt="Floor",bt="FloorDiv",xt="FusedBatchNorm",wt="GatherV2",kt="GatherNd",vt="Greater",Nt="GreaterEqual",It="Identity",$t="IFFT",Ct="Imag",St="IsFinite",Tt="IsInf",Et="IsNan",At="LeakyRelu",Rt="Less",Ft="LessEqual",Dt="LinSpace",_t="Log",Ot="Log1p",Mt="LogicalAnd",Lt="LogicalNot",zt="LogicalOr",Pt="LRN",Bt="LRNGrad",Wt="Max",Vt="Maximum",Ut="MaxPool",Gt="MaxPoolGrad",Ht="MaxPool3D",jt="MaxPool3DGrad",Kt="MaxPoolWithArgmax",qt="Mean",Xt="Min",Yt="Minimum",Jt="MirrorPad",Zt="Mod",Qt="Multinomial",en="Multiply",tn="Neg",nn="NotEqual",sn="NonMaxSuppressionV3",an="NonMaxSuppressionV4",rn="NonMaxSuppressionV5",on="OnesLike",ln="OneHot",un="Pack",cn="PadV2",hn="Pow",pn="Prelu",dn="Prod",fn="Range",mn="Real",gn="Reciprocal",yn="Relu",bn="Reshape",xn="ResizeNearestNeighbor",wn="ResizeNearestNeighborGrad",kn="ResizeBilinear",vn="ResizeBilinearGrad",Nn="Relu6",In="Reverse",$n="Round",Cn="Rsqrt",Sn="ScatterNd",Tn="Select",En="Selu",An="Slice",Rn="Sin",Fn="Sinh",Dn="Sign",_n="Sigmoid",On="Softplus",Mn="Sqrt",Ln="Sum",zn="SpaceToBatchND",Pn="SplitV",Bn="Softmax",Wn="SquaredDifference",Vn="Square",Un="Sub",Gn="SparseToDense",Hn="StridedSlice",jn="Tan",Kn="Tanh",qn="Tile",Xn="TopK",Yn="Transform",Jn="Transpose",Zn="Unique",Qn="Unpack",es="UnsortedSegmentSum",ts="ZerosLike",ns="Step",ss="FromPixels",as="RotateWithOffset",rs="_FusedMatMul",is="FusedConv2D",os="FusedDepthwiseConv2D",ls=de("kernelRegistry",(()=>new Map)),us=de("gradRegistry",(()=>new Map));function cs(e,t){const n=ms(e,t);return ls.get(n)}function hs(e){return us.get(e)}function ps(e){const t=ls.entries(),n=[];for(;;){const{done:s,value:a}=t.next();if(s)break;const[r,i]=a,[o]=r.split("_");o===e&&n.push(i)}return n}function ds(e){const{kernelName:t,backendName:n}=e,s=ms(t,n);ls.has(s)&&console.warn(`The kernel '${t}' for backend '${n}' is already registered`),ls.set(s,e)}function fs(e){const{kernelName:t}=e;us.has(t)&&ue().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`),us.set(t,e)}function ms(e,t){return`${t}_${e}`}function gs(e,t){return"string"===t?xs(e):ys([e],t)}function ys(e,t){if("string"===t)throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(e)&&(e=F(e)),ue().getBool("DEBUG")&&function(e,t){for(let n=0;n<e.length;n++){const s=e[n];if(isNaN(s)||!isFinite(s))throw Error(`A tensor of type ${t} being uploaded contains ${s}.`)}}(e,t),function(e,t){return e instanceof Float32Array&&"float32"===t||e instanceof Int32Array&&"int32"===t||e instanceof Uint8Array&&"bool"===t}(e,t))return e;if(null==t||"float32"===t||"complex64"===t)return new Float32Array(e);if("int32"===t)return new Int32Array(e);if("bool"===t){const t=new Uint8Array(e.length);for(let n=0;n<t.length;++n)0!==Math.round(e[n])&&(t[n]=1);return t}throw new Error(`Unknown data type ${t}`)}function bs(){return ue().platform.now()}function xs(e,t="utf-8"){return t=t||"utf-8",ue().platform.encode(e,t)}function ws(e,t="utf-8"){return t=t||"utf-8",ue().platform.decode(e,t)}class ks{constructor(e,t){this.backendTimer=e,this.logger=t,null==t&&(this.logger=new Ns)}profileKernel(e,t,n){let s;const a=()=>{s=n()};let r;const i=bs();if(this.backendTimer.timerAvailable())r=this.backendTimer.time(a);else{a();for(const e of s)e.dataSync();r=Promise.resolve({kernelMs:bs()-i})}if(ue().getBool("CHECK_COMPUTATION_FOR_ERRORS"))for(let t=0;t<s.length;t++){const n=s[t];n.data().then((t=>{vs(t,n.dtype,e)}))}return{kernelName:e,outputs:s,inputs:t,timeMs:r.then((e=>e.kernelMs)),extraInfo:r.then((e=>null!=e.getExtraProfileInfo?e.getExtraProfileInfo():""))}}logKernelProfile(e){const{kernelName:t,outputs:n,timeMs:s,inputs:a,extraInfo:r}=e;n.forEach((e=>{Promise.all([e.data(),s,r]).then((n=>{this.logger.logKernelProfile(t,e,n[0],n[1],a,n[2])}))}))}}function vs(e,t,n){if("float32"!==t)return!1;for(let t=0;t<e.length;t++){const s=e[t];if(isNaN(s)||!isFinite(s))return console.warn(`Found ${s} in the result of '${n}'`),!0}return!1}class Ns{logKernelProfile(e,t,n,s,a,r){const i="number"==typeof s?L(`${s}ms`,9):s.error,o=L(e,25),l=t.rank,u=t.size,c=L(t.shape.toString(),14);let h="";for(const e in a){const n=a[e];if(null!=n){const s=n.shape||t.shape,a=s.length;h+=`${e}: ${a}D ${a>0?s:""} `}}console.log(`%c${o}\t%c${i}\t%c${l}D ${c}\t%c${u}\t%c${h}\t%c${r}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function Is(e,t,n,s){const a=Z(t),r=function(e,t,n,s){const a=D(t),r=s[s.length-1],i=new Array(r).fill(0),o=t.length,l="complex64"===n?Ts(e):e;if(o>1)for(let e=0;e<a/r;e++){const t=e*r;for(let e=0;e<r;e++)i[e]=Math.max(i[e],$s(l[t+e],0,n).length)}return i}(e,t,n,a),i=t.length,o=Ss(e,t,n,a,r),l=["Tensor"];return s&&(l.push(` dtype: ${n}`),l.push(` rank: ${i}`),l.push(` shape: [${t}]`),l.push(" values:")),l.push(o.map((e=>" "+e)).join("\n")),l.join("\n")}function $s(e,t,n){let s;return s=Array.isArray(e)?`${parseFloat(e[0].toFixed(7))} + ${parseFloat(e[1].toFixed(7))}j`:K(e)?`'${e}'`:"bool"===n?Cs(e):parseFloat(e.toFixed(7)).toString(),L(s,t)}function Cs(e){return 0===e?"false":"true"}function Ss(e,t,n,s,a,r=!0){const i="complex64"===n?2:1,o=t[0],l=t.length;if(0===l)return"complex64"===n?[$s(Ts(e)[0],0,n)]:"bool"===n?[Cs(e[0])]:[e[0].toString()];if(1===l){if(o>20){const t=3*i;let s=Array.from(e.slice(0,t)),r=Array.from(e.slice((o-3)*i,o*i));return"complex64"===n&&(s=Ts(s),r=Ts(r)),["["+s.map(((e,t)=>$s(e,a[t],n))).join(", ")+", ..., "+r.map(((e,t)=>$s(e,a[o-3+t],n))).join(", ")+"]"]}return["["+("complex64"===n?Ts(e):Array.from(e)).map(((e,t)=>$s(e,a[t],n))).join(", ")+"]"]}const u=t.slice(1),c=s.slice(1),h=s[0]*i,p=[];if(o>20){for(let t=0;t<3;t++){const s=t*h,r=s+h;p.push(...Ss(e.slice(s,r),u,n,c,a,!1))}p.push("...");for(let t=o-3;t<o;t++){const s=t*h,r=s+h;p.push(...Ss(e.slice(s,r),u,n,c,a,t===o-1))}}else for(let t=0;t<o;t++){const s=t*h,r=s+h;p.push(...Ss(e.slice(s,r),u,n,c,a,t===o-1))}const d=2===l?",":"";p[0]="["+p[0]+d;for(let e=1;e<p.length-1;e++)p[e]=" "+p[e]+d;let f=",\n";for(let e=2;e<l;e++)f+="\n";return p[p.length-1]=" "+p[p.length-1]+"]"+(r?"":f),p}function Ts(e){const t=[];for(let n=0;n<e.length;n+=2)t.push([e[n],e[n+1]]);return t}class Es{constructor(e,t,n){if(this.dtype=t,this.shape=e.slice(),this.size=D(e),null!=n){const e=n.length;E(e===this.size,(()=>`Length of values '${e}' does not match the size inferred by the shape '${this.size}'.`))}if("complex64"===t)throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||U(t,this.size),this.strides=Z(e)}set(e,...t){0===t.length&&(t=[0]),E(t.length===this.rank,(()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`));const n=this.locToIndex(t);this.values[n]=e}get(...e){0===e.length&&(e=[0]);let t=0;for(const n of e){if(n<0||n>=this.shape[t]){const t=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(t)}t++}let n=e[e.length-1];for(let t=0;t<e.length-1;++t)n+=this.strides[t]*e[t];return this.values[n]}locToIndex(e){if(0===this.rank)return 0;if(1===this.rank)return e[0];let t=e[e.length-1];for(let n=0;n<e.length-1;++n)t+=this.strides[n]*e[n];return t}indexToLoc(e){if(0===this.rank)return[];if(1===this.rank)return[e];const t=new Array(this.shape.length);for(let n=0;n<t.length-1;++n)t[n]=Math.floor(e/this.strides[n]),e-=t[n]*this.strides[n];return t[t.length-1]=e,t}get rank(){return this.shape.length}toTensor(){return As().makeTensor(this.values,this.shape,this.dtype)}}let As=null,Rs=null,Fs=null;class Ds{constructor(e,t,n,s){this.kept=!1,this.isDisposedInternal=!1,this.shape=e.slice(),this.dtype=t||"float32",this.size=D(e),this.strides=Z(e),this.dataId=n,this.id=s,this.rankType=this.rank<5?this.rank.toString():"higher"}get rank(){return this.shape.length}async buffer(){const e=await this.data();return Rs.buffer(this.shape,this.dtype,e)}bufferSync(){return Rs.buffer(this.shape,this.dtype,this.dataSync())}async array(){const e=await this.data();return ee(this.shape,e)}arraySync(){return ee(this.shape,this.dataSync())}async data(){this.throwIfDisposed();const e=As().read(this.dataId);if("string"===this.dtype){const t=await e;try{return t.map((e=>ws(e)))}catch(e){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=As().readSync(this.dataId);if("string"===this.dtype)try{return e.map((e=>ws(e)))}catch(e){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await As().read(this.dataId);return"string"===this.dtype?e:new Uint8Array(e.buffer)}dispose(){this.isDisposed||(As().disposeTensor(this),this.isDisposedInternal=!0)}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return Rs.print(this,e)}clone(){return this.throwIfDisposed(),Rs.clone(this)}toString(e=!1){return Is(this.dataSync(),this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),Rs.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),As().makeVariable(this,e,t,n)}}function _s(){return de("Tensor",(()=>Ds))}Object.defineProperty(Ds,Symbol.hasInstance,{value:e=>!!e&&null!=e.data&&null!=e.dataSync&&null!=e.throwIfDisposed}),_s();class Os extends Ds{constructor(e,t,n,s){super(e.shape,e.dtype,e.dataId,s),this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!_(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);As().disposeTensor(this),this.dataId=e.dataId,As().incRef(this,null)}dispose(){As().disposeVariable(this),this.isDisposedInternal=!0}}var Ms,Ls,zs,Ps,Bs;Object.defineProperty(Os,Symbol.hasInstance,{value:e=>e instanceof Ds&&null!=e.assign&&e.assign instanceof Function}),function(e){e.R0="R0",e.R1="R1",e.R2="R2",e.R3="R3",e.R4="R4",e.R5="R5",e.R6="R6"}(Ms||(Ms={})),function(e){e.float32="float32",e.int32="int32",e.bool="int32",e.complex64="complex64"}(Ls||(Ls={})),function(e){e.float32="float32",e.int32="int32",e.bool="bool",e.complex64="complex64"}(zs||(zs={})),function(e){e.float32="float32",e.int32="float32",e.bool="float32",e.complex64="complex64"}(Ps||(Ps={})),function(e){e.float32="complex64",e.int32="complex64",e.bool="complex64",e.complex64="complex64"}(Bs||(Bs={}));const Ws={float32:Ps,int32:Ls,bool:zs,complex64:Bs};function Vs(e,t){if("string"===e||"string"===t){if("string"===e&&"string"===t)return"string";throw new Error(`Can not upcast ${e} with ${t}`)}return Ws[e][t]}function Us(e){return Vs(e,"int32")}function Gs(e,t){if(e.dtype===t.dtype)return[e,t];const n=Vs(e.dtype,t.dtype);return[e.cast(n),t.cast(n)]}function Hs(e,t){return t.some((t=>t.id===e.id))}function js(e){const t=[];return Ks(e,t,new Set),t}function Ks(e,t,n){if(null==e)return;if(e instanceof Ds)return void t.push(e);if(s=e,!Array.isArray(s)&&"object"!=typeof s)return;var s;const a=e;for(const e in a){const s=a[e];n.has(s)||(n.add(s),Ks(s,t,n))}}function qs(e){return null!=e.kernelName}class Xs{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null,get kernelNames(){return Array.from(new Set(this.kernels.map((e=>e.name))))}}}dispose(){for(const e in this.registeredVariables)this.registeredVariables[e].dispose()}}class Ys{constructor(e){this.ENV=e,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new Xs}async ready(){if(null!=this.pendingBackendInit)return this.pendingBackendInit.then((()=>{}));if(null!=this.backendInstance)return;const e=this.getSortedBackends();for(let t=0;t<e.length;t++){const n=e[t];if(await this.initializeBackend(n).success)return void await this.setBackend(n)}throw new Error("Could not initialize any backends, all backend initializations failed.")}get backend(){if(null!=this.pendingBackendInit)throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`);if(null==this.backendInstance){const{name:e,asyncInit:t}=this.initializeBackendsAndReturnBest();if(t)throw new Error(`The highest priority backend '${e}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`);this.setBackend(e)}return this.backendInstance}backendNames(){return Object.keys(this.registryFactory)}findBackend(e){if(!(e in this.registry)){if(!(e in this.registryFactory))return null;{const{asyncInit:t}=this.initializeBackend(e);if(t)return null}}return this.registry[e]}findBackendFactory(e){return e in this.registryFactory?this.registryFactory[e].factory:null}registerBackend(e,t,n=1){return e in this.registryFactory?(console.warn(`${e} backend was already registered. Reusing existing backend factory.`),!1):(this.registryFactory[e]={factory:t,priority:n},!0)}async setBackend(e){if(null==this.registryFactory[e])throw new Error(`Backend name '${e}' not found in registry`);if(this.backendName=e,null==this.registry[e]){this.backendInstance=null;const{success:t,asyncInit:n}=this.initializeBackend(e);if(!(n?await t:t))return!1}return this.backendInstance=this.registry[e],this.setupRegisteredKernels(),this.profiler=new ks(this.backendInstance),!0}setupRegisteredKernels(){ps(this.backendName).forEach((e=>{null!=e.setupFunc&&e.setupFunc(this.backendInstance)}))}disposeRegisteredKernels(e){ps(e).forEach((t=>{null!=t.disposeFunc&&t.disposeFunc(this.registry[e])}))}initializeBackend(e){const t=this.registryFactory[e];if(null==t)throw new Error(`Cannot initialize backend ${e}, no registration found.`);try{const n=t.factory();if(!n||n instanceof I||"function"!=typeof n.then)return this.registry[e]=n,{success:!0,asyncInit:!1};{const t=++this.pendingBackendInitId,s=n.then((n=>!(t<this.pendingBackendInitId||(this.registry[e]=n,this.pendingBackendInit=null,0)))).catch((n=>(t<this.pendingBackendInitId||(this.pendingBackendInit=null,console.warn(`Initialization of backend ${e} failed`),console.warn(n.stack||n.message)),!1)));return this.pendingBackendInit=s,{success:s,asyncInit:!0}}}catch(t){return console.warn(`Initialization of backend ${e} failed`),console.warn(t.stack||t.message),{success:!1,asyncInit:!1}}}removeBackend(e){if(!(e in this.registryFactory))throw new Error(`${e} backend not found in registry`);this.backendName===e&&null!=this.pendingBackendInit&&this.pendingBackendInitId++,e in this.registry&&(this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e]),delete this.registryFactory[e],this.backendName===e&&(this.pendingBackendInit=null,this.backendName=null,this.backendInstance=null)}getSortedBackends(){if(0===Object.keys(this.registryFactory).length)throw new Error("No backend found in registry.");return Object.keys(this.registryFactory).sort(((e,t)=>this.registryFactory[t].priority-this.registryFactory[e].priority))}initializeBackendsAndReturnBest(){const e=this.getSortedBackends();for(let t=0;t<e.length;t++){const n=e[t],{success:s,asyncInit:a}=this.initializeBackend(n);if(a||s)return{name:n,asyncInit:a}}throw new Error("Could not initialize any backends, all backend initializations failed.")}moveData(e,t){const n=this.state.tensorInfo.get(t),s=n.backend,a=this.readSync(t),r=s.refCount(t);s.disposeData(t,!0),n.backend=e,e.move(t,a,n.shape,n.dtype,r),this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack[this.state.numDataMovesStack.length-1]++}tidy(e,t){let n,s=null;if(null==t){if("function"!=typeof e)throw new Error("Please provide a function to tidy()");t=e}else{if("string"!=typeof e&&!(e instanceof String))throw new Error("When calling with two arguments, the first argument to tidy() must be a string");if("function"!=typeof t)throw new Error("When calling with two arguments, the 2nd argument to tidy() must be a function");s=e}return this.scopedRun((()=>this.startScope(s)),(()=>this.endScope(n)),(()=>(n=t(),n instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),n)))}scopedRun(e,t,n){e();try{const e=n();return t(),e}catch(e){throw t(),e}}nextTensorId(){return Ys.nextTensorId++}nextVariableId(){return Ys.nextVariableId++}clone(e){const t=Zs.runKernel(It,{x:e}),n={x:e};return this.addTapeNode(this.state.activeScope.name,n,[t],(e=>({x:()=>{const t={x:e};return Zs.runKernel(Oe,t,{dtype:"float32"})}})),[],{}),t}runKernel(e,t,n){if(null==cs(e,this.backendName))throw new Error(`Kernel '${e}' not registered for backend '${this.backendName}'`);return this.runKernelFunc({kernelName:e,inputs:t,attrs:n})}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(e,t,n){const s=this.backend.numDataIds();let a=0;n.forEach((e=>{a+="complex64"===e.dtype?3:1}));const r=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],i=s-t-a-r;if(i>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${i} data ids) after running '${e}'`)}runKernelFunc(e){let t,n=[];const s=this.isTapeOn(),a=this.state.numBytes,r=this.state.numTensors;let i,o;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0),null==this.backendName&&this.backend;const l=qs(e)?e.kernelName:null!=this.state.activeScope?this.state.activeScope.name:"";if(qs(e)){const{kernelName:t,inputs:a,attrs:r}=e;null==this.backendName&&this.backend;const l=cs(t,this.backendName);E(null!=l,(()=>`Cannot find registered kernel '${t}' for backend '${this.backendName}'`)),i=()=>{const e=this.backend.numDataIds();o=l.kernelFunc({inputs:a,attrs:r,backend:this.backend});const i=Array.isArray(o)?o:[o];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(t,e,i);const u=i.map((e=>{if(null!=e.rank)return e;const{dataId:t,shape:n,dtype:s}=e;return this.makeTensorFromDataId(t,n,s)}));if(s){const e=this.getTensorsForGradient(t,a,u);n=this.saveTensorsForBackwardMode(e)}return u}}else{const{forwardFunc:t}=e,a=e=>{s&&(n=e.map((e=>this.keep(this.clone(e)))))};i=()=>{const e=this.backend.numDataIds();o=this.tidy((()=>t(this.backend,a)));const n=Array.isArray(o)?o:[o];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(l,e,n),n}}const{inputs:u,attrs:c}=e,h=qs(e)?null:e.backwardsFunc;let p;return this.scopedRun((()=>this.state.kernelDepth++),(()=>this.state.kernelDepth--),(()=>{this.ENV.getBool("DEBUG")||this.state.profiling?(p=this.profiler.profileKernel(l,u,(()=>i())),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(p),t=p.outputs):t=i()})),s&&this.addTapeNode(l,u,t,h,n,c),this.state.profiling&&this.state.activeProfile.kernels.push({name:l,bytesAdded:this.state.numBytes-a,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-r,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(u).map((e=>null!=u[e]?u[e].shape:null)),outputShapes:t.map((e=>e.shape)),kernelTimeMs:p.timeMs,extraInfo:p.extraInfo}),Array.isArray(o)?t:t[0]}saveTensorsForBackwardMode(e){return e.map((e=>this.keep(this.clone(e))))}getTensorsForGradient(e,t,n){const s=hs(e);if(null!=s){const e=s.inputsToSave||[],a=s.outputsToSave||[];let r;s.saveAllInputs?(E(Array.isArray(t),(()=>"saveAllInputs is true, expected inputs to be an array.")),r=Object.keys(t).map((e=>t[e]))):r=e.map((e=>t[e]));const i=n.filter(((e,t)=>a[t]));return r.concat(i)}return[]}makeTensor(e,t,n,s){if(null==e)throw new Error("Values passed to engine.makeTensor() are null");n=n||"float32",s=s||this.backend;let a=e;"string"===n&&K(e[0])&&(a=e.map((e=>xs(e))));const r=s.write(a,t,n),i=new Ds(t,n,r,this.nextTensorId());if(this.trackTensor(i,s),"string"===n){const e=this.state.tensorInfo.get(r),t=function(e){if(null==e)return 0;let t=0;return e.forEach((e=>t+=e.length)),t}(a);this.state.numBytes+=t-e.bytes,e.bytes=t}return i}makeTensorFromDataId(e,t,n,s){const a=new Ds(t,n=n||"float32",e,this.nextTensorId());return this.trackTensor(a,s),a}makeVariable(e,t=!0,n,s){n=n||this.nextVariableId().toString(),null!=s&&s!==e.dtype&&(e=e.cast(s));const a=new Os(e,t,n,this.nextTensorId());if(null!=this.state.registeredVariables[a.name])throw new Error(`Variable with name ${a.name} was already registered`);return this.state.registeredVariables[a.name]=a,this.incRef(a,this.backend),a}trackTensor(e,t){this.state.numTensors++,"string"===e.dtype&&this.state.numStringTensors++;let n=0;"complex64"!==e.dtype&&"string"!==e.dtype&&(n=e.size*j(e.dtype)),this.state.numBytes+=n,this.state.tensorInfo.has(e.dataId)||(this.state.numDataBuffers++,this.state.tensorInfo.set(e.dataId,{backend:t||this.backend,dtype:e.dtype,shape:e.shape,bytes:n})),e instanceof Os||this.track(e)}incRef(e,t){this.trackTensor(e,t),this.backend.incRef(e.dataId)}removeDataId(e,t){this.state.tensorInfo.has(e)&&this.state.tensorInfo.get(e).backend===t&&(this.state.tensorInfo.delete(e),this.state.numDataBuffers--)}disposeTensor(e){if(!this.state.tensorInfo.has(e.dataId))return;const t=this.state.tensorInfo.get(e.dataId);if(this.state.numTensors--,"string"===e.dtype&&(this.state.numStringTensors--,this.state.numBytes-=t.bytes),"complex64"!==e.dtype&&"string"!==e.dtype){const t=e.size*j(e.dtype);this.state.numBytes-=t}t.backend.disposeData(e.dataId)&&this.removeDataId(e.dataId,t.backend)}disposeVariables(){for(const e in this.state.registeredVariables){const t=this.state.registeredVariables[e];this.disposeVariable(t)}}disposeVariable(e){this.disposeTensor(e),null!=this.state.registeredVariables[e.name]&&delete this.state.registeredVariables[e.name]}memory(){const e=this.backend.memory();return e.numTensors=this.state.numTensors,e.numDataBuffers=this.state.numDataBuffers,e.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(e.unreliable=!0,null==e.reasons&&(e.reasons=[]),e.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),e}async profile(e){this.state.profiling=!0;const t=this.state.numBytes,n=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await e(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map((e=>e.totalBytesSnapshot))),this.state.activeProfile.newBytes=this.state.numBytes-t,this.state.activeProfile.newTensors=this.state.numTensors-n;for(const e of this.state.activeProfile.kernels)e.kernelTimeMs=await e.kernelTimeMs,e.extraInfo=await e.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&0===this.state.kernelDepth}addTapeNode(e,t,n,s,a,r){const i={id:this.state.nextTapeNodeId++,kernelName:e,inputs:t,outputs:n,saved:a},o=hs(e);null!=o&&(s=o.gradFunc),null!=s&&(i.gradient=e=>(e=e.map(((e,t)=>{if(null==e){const e=n[t],s=ne(e.size,e.dtype);return this.makeTensor(s,e.shape,e.dtype)}return e})),s(e.length>1?e:e[0],a,r))),this.state.activeTape.push(i)}keep(e){return e.kept=!0,e}startTape(){0===this.state.gradientDepth&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(e){const t={track:[],name:"unnamed scope",id:this.state.nextScopeId++};e&&(t.name=e),this.state.scopeStack.push(t),this.state.activeScope=t}endScope(e){const t=js(e),n=new Set(t.map((e=>e.id)));for(let e=0;e<this.state.activeScope.track.length;e++){const t=this.state.activeScope.track[e];t.kept||n.has(t.id)||t.dispose()}const s=this.state.scopeStack.pop();this.state.activeScope=0===this.state.scopeStack.length?null:this.state.scopeStack[this.state.scopeStack.length-1],t.forEach((e=>{e.kept||e.scopeId!==s.id||this.track(e)}))}gradients(e,t,n,s=!1){if(E(t.length>0,(()=>"gradients() received an empty list of xs.")),null!=n&&"float32"!==n.dtype)throw new Error(`dy must have 'float32' dtype, but has '${n.dtype}'`);const a=this.scopedRun((()=>this.startTape()),(()=>this.endTape()),(()=>this.tidy("forward",e)));E(a instanceof Ds,(()=>"The result y returned by f() must be a tensor."));const r=function(e,t,n){const s={},a={};for(let e=0;e<t.length;e++)s[t[e].id]=!0;for(let n=0;n<e.length;n++){const r=e[n],i=r.inputs;for(const e in i){const n=i[e];let o=!1;for(let e=0;e<t.length;e++)if(s[n.id]){r.outputs.forEach((e=>s[e.id]=!0)),o=!0,a[r.id]=!0;break}if(o)break}}const r={};r[n.id]=!0;const i={};for(let t=e.length-1;t>=0;t--){const n=e[t],s=n.inputs;for(let e=0;e<n.outputs.length;e++)if(r[n.outputs[e].id]){for(const e in s)r[s[e].id]=!0,i[n.id]=!0;break}}const o=[];for(let t=0;t<e.length;t++){const n=e[t];if(a[n.id]&&i[n.id]){const e={};for(const t in n.inputs){const a=n.inputs[t];s[a.id]&&(e[t]=a)}const t=Object.assign({},n);t.inputs=e,t.outputs=n.outputs,o.push(t)}}return o}(this.state.activeTape,t,a);if(!s&&0===r.length&&t.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",(()=>{const e={};e[a.id]=null==n?function(e){const t=te(D(e),"float32");return Zs.makeTensor(t,e,"float32")}(a.shape):n,function(e,t,n,s){for(let a=t.length-1;a>=0;a--){const r=t[a],i=[];if(r.outputs.forEach((t=>{const n=e[t.id];null!=n?i.push(n):i.push(null)})),null==r.gradient)throw new Error(`Cannot compute gradient: gradient function not found for ${r.kernelName}.`);const o=r.gradient(i);for(const t in r.inputs){if(!(t in o))throw new Error(`Cannot backprop through input ${t}. Available gradients found: ${Object.keys(o)}.`);const a=n((()=>o[t]()));if("float32"!==a.dtype)throw new Error(`Error in gradient for op ${r.kernelName}. The gradient of input ${t} must have 'float32' dtype, but has '${a.dtype}'`);const i=r.inputs[t];if(!_(a.shape,i.shape))throw new Error(`Error in gradient for op ${r.kernelName}. The gradient of input '${t}' has shape '${a.shape}', which does not match the shape of the input '${i.shape}'`);if(null==e[i.id])e[i.id]=a;else{const t=e[i.id];e[i.id]=s(t,a),t.dispose()}}}}(e,r,(e=>this.tidy(e)),Qs);const s=t.map((t=>e[t.id]));return 0===this.state.gradientDepth&&(this.state.activeTape.forEach((e=>{for(const t of e.saved)t.dispose()})),this.state.activeTape=null),{value:a,grads:s}}))}customGrad(e){return E(Y(e),(()=>"The f passed in customGrad(f) must be a function.")),(...t)=>{let n;E(t.every((e=>e instanceof Ds)),(()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors"));const s={};return t.forEach(((e,t)=>{s[t]=e})),this.runKernelFunc({forwardFunc:(s,a)=>(n=e(...t,a),E(n.value instanceof Ds,(()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor")),E(Y(n.gradFunc),(()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function.")),n.value),backwardsFunc:(e,s)=>{const a=n.gradFunc(e,s),r=Array.isArray(a)?a:[a];E(r.length===t.length,(()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...).")),E(r.every((e=>e instanceof Ds)),(()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors."));const i={};return r.forEach(((e,t)=>{i[t]=()=>e})),i},inputs:s})}}readSync(e){return this.state.tensorInfo.get(e).backend.readSync(e)}read(e){return this.state.tensorInfo.get(e).backend.read(e)}async time(e){const t=bs(),n=await this.backend.time(e);return n.wallMs=bs()-t,n}track(e){return null!=this.state.activeScope&&(e.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(e)),e}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new Xs;for(const e in this.registry)this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}function Js(){const e=pe();if(null==e._tfengine){const t=new le(e);e._tfengine=new Ys(t)}var t;return t=e._tfengine.ENV,he=t,As=()=>e._tfengine,e._tfengine}Ys.nextTensorId=0,Ys.nextVariableId=0;const Zs=Js();function Qs(e,t){const n={a:e,b:t};return Zs.runKernel(ye,n)}function ea(){if("undefined"!=typeof navigator&&null!=navigator){const e=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}return!1}function ta(){return"undefined"!=typeof window&&null!=window.document||"undefined"!=typeof WorkerGlobalScope}const na=ue();function sa(e,t){let n=e;if(H(e))return"string"===t?[]:[e.length];if(!Array.isArray(e))return[];const s=[];for(;Array.isArray(n)||H(n)&&"string"!==t;)s.push(n.length),n=n[0];return Array.isArray(e)&&ue().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&aa(e,s,[]),s}function aa(e,t,n){if(n=n||[],!Array.isArray(e)&&!H(e))return void E(0===t.length,(()=>`Element arr[${n.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`));E(t.length>0,(()=>`Element arr[${n.join("][")}] should be a primitive, but is an array of ${e.length} elements`)),E(e.length===t[0],(()=>`Element arr[${n.join("][")}] should have ${t[0]} elements, but has ${e.length} elements`));const s=t.slice(1);for(let t=0;t<e.length;++t)aa(e[t],s,n.concat(t))}function ra(e,t,n,s){if("string_or_numeric"!==e){if(null==e)throw new Error("Expected dtype cannot be null.");if("numeric"!==e&&e!==t||"numeric"===e&&"string"===t)throw new Error(`Argument '${n}' passed to '${s}' must be ${e} tensor, but got ${t} tensor`)}}function ia(e,t,n,s="numeric"){if(e instanceof Ds)return ra(s,e.dtype,t,n),e;let a=X(e);if("string"!==a&&["bool","int32","float32"].indexOf(s)>=0&&(a=s),ra(s,a,t,n),null==e||!H(e)&&!Array.isArray(e)&&"number"!=typeof e&&"boolean"!=typeof e&&"string"!=typeof e){const s=null==e?"null":e.constructor.name;throw new Error(`Argument '${t}' passed to '${n}' must be a Tensor or TensorLike, but got '${s}'`)}const r=sa(e,a);H(e)||Array.isArray(e)||(e=[e]);const i="string"!==a?ys(e,a):F(e,[],!0);return Zs.makeTensor(i,r,a)}function oa(e,t,n,s="numeric"){if(!Array.isArray(e))throw new Error(`Argument ${t} passed to ${n} must be a \`Tensor[]\` or \`TensorLike[]\``);return e.map(((e,a)=>ia(e,`${t}[${a}]`,n,s)))}function la(e){const t=Object.keys(e);if(1!==t.length)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let n=t[0];const s=e[n];n.endsWith("_")&&(n=n.substring(0,n.length-1)),n+="__op";const a=(...e)=>{Zs.startScope(n);try{const t=s(...e);return oe(t)&&console.error("Cannot return a Promise inside of tidy."),Zs.endScope(t),t}catch(e){throw Zs.endScope(null),e}};return Object.defineProperty(a,"name",{value:n,configurable:!0}),a}na.registerFlag("DEBUG",(()=>!1),(e=>{e&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")})),na.registerFlag("IS_BROWSER",(()=>ta())),na.registerFlag("IS_NODE",(()=>"undefined"!=typeof process&&void 0!==process.versions&&void 0!==process.versions.node)),na.registerFlag("IS_CHROME",(()=>"undefined"!=typeof navigator&&null!=navigator&&null!=navigator.userAgent&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor))),na.registerFlag("PROD",(()=>!1)),na.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",(()=>na.getBool("DEBUG"))),na.registerFlag("DEPRECATION_WARNINGS_ENABLED",(()=>!0)),na.registerFlag("IS_TEST",(()=>!1)),na.registerFlag("CHECK_COMPUTATION_FOR_ERRORS",(()=>!0)),na.registerFlag("WRAP_TO_IMAGEBITMAP",(()=>!1));const ua=la({complex_:function(e,t){const n=ia(e,"real","complex"),s=ia(t,"imag","complex");A(n.shape,s.shape,`real and imag shapes, ${n.shape} and ${s.shape}, must match in call to tf.complex().`);const a={real:n,imag:s};return Zs.runKernel(ze,a)}});function ca(e,t,n,s){if(null==s&&(s=X(e)),"complex64"===s)throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!H(e)&&!Array.isArray(e)&&"number"!=typeof e&&"boolean"!=typeof e&&"string"!=typeof e)throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(null!=t){ae(t);const e=D(t),s=D(n);E(e===s,(()=>`Based on the provided shape, [${t}], the tensor should have ${e} values but has ${s}`));for(let e=0;e<n.length;++e){const s=n[e],a=e!==n.length-1||s!==D(t.slice(e));E(n[e]===t[e]||!a,(()=>`Error creating a new Tensor. Inferred shape (${n}) does not match the provided shape (${t}). `))}}return H(e)||Array.isArray(e)||(e=[e]),t=t||n,e="string"!==s?ys(e,s):F(e,[],!0),Zs.makeTensor(e,t,s)}function ha(e,t,n){return ca(e,t,sa(e,n),n)}const pa={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};async function da(e,t){const n=[],s=[],a=Array.isArray(e)?e.map((e=>e.name)):Object.keys(e);for(let r=0;r<a.length;++r){const i=a[r],o=Array.isArray(e)?e[r].tensor:e[i];if("float32"!==o.dtype&&"int32"!==o.dtype&&"bool"!==o.dtype&&"string"!==o.dtype&&"complex64"!==o.dtype)throw new Error(`Unsupported dtype in weight '${i}': ${o.dtype}`);const l={name:i,shape:o.shape,dtype:o.dtype};if("string"===o.dtype){const e=new Promise((async e=>{const t=await o.bytes(),n=t.reduce(((e,t)=>e+t.length),0)+4*t.length,s=new Uint8Array(n);let a=0;for(let e=0;e<t.length;e++){const n=t[e],r=new Uint8Array(new Uint32Array([n.length]).buffer);s.set(r,a),a+=4,s.set(n,a),a+=n.length}e(s)}));s.push(e)}else s.push(o.data());null!=t&&(l.group=t),n.push(l)}return{data:fa(await Promise.all(s)),specs:n}}function fa(e){if(null===e)throw new Error(`Invalid input value: ${JSON.stringify(e)}`);let t=0;const n=[];e.forEach((e=>{if(t+=e.byteLength,n.push(e.byteLength===e.buffer.byteLength?e:new e.constructor(e)),!(e instanceof Float32Array||e instanceof Int32Array||e instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${e.constructor.name}`)}));const s=new Uint8Array(t);let a=0;return n.forEach((e=>{s.set(new Uint8Array(e.buffer),a),a+=e.byteLength})),s.buffer}const ma="undefined"!=typeof Buffer&&("undefined"==typeof Blob||"undefined"==typeof atob||"undefined"==typeof btoa);function ga(e){return ma?Buffer.byteLength(e):new Blob([e]).size}function ya(e){if(1===e.length)return e[0];let t=0;e.forEach((e=>{t+=e.byteLength}));const n=new Uint8Array(t);let s=0;return e.forEach((e=>{n.set(new Uint8Array(e),s),s+=e.byteLength})),n.buffer}function ba(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:null==e.modelTopology?0:ga(JSON.stringify(e.modelTopology)),weightSpecsBytes:null==e.weightSpecs?0:ga(JSON.stringify(e.weightSpecs)),weightDataBytes:null==e.weightData?0:e.weightData.byteLength}}function xa(){const e=function(){const e=e=>{let t=e<<13,n=0;for(;0==(8388608&t);)n-=8388608,t<<=1;return t&=-8388609,n+=947912704,t|n},t=new Uint32Array(2048);t[0]=0;for(let n=1;n<1024;n++)t[n]=e(n);for(let e=1024;e<2048;e++)t[e]=939524096+(e-1024<<13);return t}(),t=function(){const e=new Uint32Array(64);e[0]=0,e[31]=1199570944,e[32]=2147483648,e[63]=3347054592;for(let t=1;t<31;t++)e[t]=t<<23;for(let t=33;t<63;t++)e[t]=2147483648+(t-32<<23);return e}(),n=function(){const e=new Uint32Array(64);for(let t=0;t<64;t++)e[t]=1024;return e[0]=e[32]=0,e}();return s=>{const a=new ArrayBuffer(4*s.length),r=new Uint32Array(a);for(let a=0;a<s.length;a++){const i=s[a],o=e[n[i>>10]+(1023&i)]+t[i>>10];r[a]=o}return new Float32Array(a)}}class wa{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return null==wa.instance&&(wa.instance=new wa),wa.instance}static registerSaveRouter(e){wa.getInstance().saveRouters.push(e)}static registerLoadRouter(e){wa.getInstance().loadRouters.push(e)}static getSaveHandlers(e){return wa.getHandlers(e,"save")}static getLoadHandlers(e,t){return wa.getHandlers(e,"load",t)}static getHandlers(e,t,n){const s=[];return("load"===t?wa.getInstance().loadRouters:wa.getInstance().saveRouters).forEach((t=>{const a=t(e,n);null!==a&&s.push(a)})),s}}const ka=e=>wa.getSaveHandlers(e),va="tensorflowjs",Na="models_store",Ia="model_info_store";function $a(){if(!ue().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");const e="undefined"==typeof window?self:window,t=e.indexedDB||e.mozIndexedDB||e.webkitIndexedDB||e.msIndexedDB||e.shimIndexedDB;if(null==t)throw new Error("The current browser does not appear to support IndexedDB.");return t}function Ca(e){const t=e.result;t.createObjectStore(Na,{keyPath:"modelPath"}),t.createObjectStore(Ia,{keyPath:"modelPath"})}class Sa{constructor(e){if(this.indexedDB=$a(),null==e||!e)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=e}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,e)}async load(){return this.databaseAction(this.modelPath)}databaseAction(e,t){return new Promise(((e,n)=>{const s=this.indexedDB.open(va,1);s.onupgradeneeded=()=>Ca(s),s.onsuccess=()=>{const a=s.result;if(null==t){const t=a.transaction(Na,"readonly"),s=t.objectStore(Na).get(this.modelPath);s.onsuccess=()=>{if(null==s.result)return a.close(),n(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));e(s.result.modelArtifacts)},s.onerror=e=>(a.close(),n(s.error)),t.oncomplete=()=>a.close()}else{const s=ba(t),r=a.transaction(Ia,"readwrite");let i=r.objectStore(Ia);const o=i.put({modelPath:this.modelPath,modelArtifactsInfo:s});let l;o.onsuccess=()=>{l=a.transaction(Na,"readwrite");const o=l.objectStore(Na).put({modelPath:this.modelPath,modelArtifacts:t,modelArtifactsInfo:s});o.onsuccess=()=>e({modelArtifactsInfo:s}),o.onerror=e=>{i=r.objectStore(Ia);const t=i.delete(this.modelPath);t.onsuccess=()=>(a.close(),n(o.error)),t.onerror=e=>(a.close(),n(o.error))}},o.onerror=e=>(a.close(),n(o.error)),r.oncomplete=()=>{null==l?a.close():l.oncomplete=()=>a.close()}}},s.onerror=e=>n(s.error)}))}}Sa.URL_SCHEME="indexeddb://";const Ta=e=>{return ue().getBool("IS_BROWSER")&&!Array.isArray(e)&&e.startsWith(Sa.URL_SCHEME)?(t=e.slice(Sa.URL_SCHEME.length),new Sa(t)):null;var t};wa.registerSaveRouter(Ta),wa.registerLoadRouter(Ta);class Ea{constructor(){this.indexedDB=$a()}async listModels(){return new Promise(((e,t)=>{const n=this.indexedDB.open(va,1);n.onupgradeneeded=()=>Ca(n),n.onsuccess=()=>{const s=n.result,a=s.transaction(Ia,"readonly"),r=a.objectStore(Ia).getAll();r.onsuccess=()=>{const t={};for(const e of r.result)t[e.modelPath]=e.modelArtifactsInfo;e(t)},r.onerror=e=>(s.close(),t(r.error)),a.oncomplete=()=>s.close()},n.onerror=e=>t(n.error)}))}async removeModel(e){var t;return e=(t=e).startsWith(Sa.URL_SCHEME)?t.slice(Sa.URL_SCHEME.length):t,new Promise(((t,n)=>{const s=this.indexedDB.open(va,1);s.onupgradeneeded=()=>Ca(s),s.onsuccess=()=>{const a=s.result,r=a.transaction(Ia,"readwrite"),i=r.objectStore(Ia),o=i.get(e);let l;o.onsuccess=()=>{if(null==o.result)return a.close(),n(new Error(`Cannot find model with path '${e}' in IndexedDB.`));{const s=i.delete(e),r=()=>{l=a.transaction(Na,"readwrite");const s=l.objectStore(Na).delete(e);s.onsuccess=()=>t(o.result.modelArtifactsInfo),s.onerror=e=>n(o.error)};s.onsuccess=r,s.onerror=e=>(r(),a.close(),n(o.error))}},o.onerror=e=>(a.close(),n(o.error)),r.oncomplete=()=>{null==l?a.close():l.oncomplete=()=>a.close()}},s.onerror=e=>n(s.error)}))}}const Aa="/",Ra="tensorflowjs_models",Fa="info",Da="model_topology",_a="weight_specs",Oa="weight_data",Ma="model_metadata";function La(e){return{info:[Ra,e,Fa].join(Aa),topology:[Ra,e,Da].join(Aa),weightSpecs:[Ra,e,_a].join(Aa),weightData:[Ra,e,Oa].join(Aa),modelMetadata:[Ra,e,Ma].join(Aa)}}function za(e){const t=e.split(Aa);if(t.length<3)throw new Error(`Invalid key format: ${e}`);return t.slice(1,t.length-1).join(Aa)}class Pa{constructor(e){if(!ue().getBool("IS_BROWSER")||"undefined"==typeof window||void 0===window.localStorage)throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,null==e||!e)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=e,this.keys=La(this.modelPath)}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{const t=JSON.stringify(e.modelTopology),n=JSON.stringify(e.weightSpecs),s=ba(e);try{this.LS.setItem(this.keys.info,JSON.stringify(s)),this.LS.setItem(this.keys.topology,t),this.LS.setItem(this.keys.weightSpecs,n),this.LS.setItem(this.keys.weightData,function(e){if(ma)return Buffer.from(e).toString("base64");const t=new Uint8Array(e);let n="";for(let e=0,s=t.length;e<s;e++)n+=String.fromCharCode(t[e]);return btoa(n)}(e.weightData));const a={format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy};return null!=e.signature&&(a.signature=e.signature),null!=e.userDefinedMetadata&&(a.userDefinedMetadata=e.userDefinedMetadata),null!=e.modelInitializer&&(a.modelInitializer=e.modelInitializer),this.LS.setItem(this.keys.modelMetadata,JSON.stringify(a)),{modelArtifactsInfo:s}}catch(e){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${s.modelTopologyBytes}, weightSpecsBytes=${s.weightSpecsBytes}, weightDataBytes=${s.weightDataBytes}.`)}}}async load(){const e=JSON.parse(this.LS.getItem(this.keys.info));if(null==e)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if("JSON"!==e.modelTopologyType)throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");const t={},n=JSON.parse(this.LS.getItem(this.keys.topology));if(null==n)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);t.modelTopology=n;const s=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(null==s)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);t.weightSpecs=s;const a=this.LS.getItem(this.keys.modelMetadata);if(null!=a){const e=JSON.parse(a);t.format=e.format,t.generatedBy=e.generatedBy,t.convertedBy=e.convertedBy,null!=e.signature&&(t.signature=e.signature),null!=e.userDefinedMetadata&&(t.userDefinedMetadata=e.userDefinedMetadata),null!=e.modelInitializer&&(t.modelInitializer=e.modelInitializer)}const r=this.LS.getItem(this.keys.weightData);if(null==r)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return t.weightData=function(e){if(ma){const t=Buffer.from(e,"base64");return t.buffer.slice(t.byteOffset,t.byteOffset+t.byteLength)}const t=atob(e),n=new Uint8Array(t.length);for(let e=0;e<t.length;++e)n.set([t.charCodeAt(e)],e);return n.buffer}(r),t}}Pa.URL_SCHEME="localstorage://";const Ba=e=>{return ue().getBool("IS_BROWSER")&&!Array.isArray(e)&&e.startsWith(Pa.URL_SCHEME)?(t=e.slice(Pa.URL_SCHEME.length),new Pa(t)):null;var t};wa.registerSaveRouter(Ba),wa.registerLoadRouter(Ba);class Wa{constructor(){E(ue().getBool("IS_BROWSER"),(()=>"Current environment is not a web browser")),E("undefined"==typeof window||void 0!==window.localStorage,(()=>"Current browser does not appear to support localStorage")),this.LS=window.localStorage}async listModels(){const e={},t=Ra+Aa,n=Aa+Fa;for(let s=0;s<this.LS.length;++s){const a=this.LS.key(s);a.startsWith(t)&&a.endsWith(n)&&(e[za(a)]=JSON.parse(this.LS.getItem(a)))}return e}async removeModel(e){var t;const n=La(e=(t=e).startsWith(Pa.URL_SCHEME)?t.slice(Pa.URL_SCHEME.length):t);if(null==this.LS.getItem(n.info))throw new Error(`Cannot find model at path '${e}'`);const s=JSON.parse(this.LS.getItem(n.info));return this.LS.removeItem(n.info),this.LS.removeItem(n.topology),this.LS.removeItem(n.weightSpecs),this.LS.removeItem(n.weightData),s}}class Va{constructor(){this.managers={}}static getInstance(){return null==Va.instance&&(Va.instance=new Va),Va.instance}static registerManager(e,t){E(null!=e,(()=>"scheme must not be undefined or null.")),e.endsWith("://")&&(e=e.slice(0,e.indexOf("://"))),E(e.length>0,(()=>"scheme must not be an empty string."));const n=Va.getInstance();E(null==n.managers[e],(()=>`A model store manager is already registered for scheme '${e}'.`)),n.managers[e]=t}static getManager(e){const t=this.getInstance().managers[e];if(null==t)throw new Error(`Cannot find model manager for scheme '${e}'`);return t}static getSchemes(){return Object.keys(this.getInstance().managers)}}class Ua{fetch(e,t){return fetch(e,t)}now(){return performance.now()}encode(e,t){if("utf-8"!==t&&"utf8"!==t)throw new Error(`Browser's encoder only supports utf-8, but got ${t}`);return null==this.textEncoder&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(e)}decode(e,t){return new TextDecoder(t).decode(e)}}if(ue().get("IS_BROWSER")){ue().setPlatform("browser",new Ua);try{Va.registerManager(Pa.URL_SCHEME,new Wa)}catch(e){}try{Va.registerManager(Sa.URL_SCHEME,new Ea)}catch(e){}}let Ga;function Ha(e,t="float32",n){return t=t||"float32",ae(e),new Es(e,t,n)}ue().get("IS_NODE")&&ue().setPlatform("node",new class{constructor(){this.util=n(628),this.textEncoder=new this.util.TextEncoder}fetch(e,t){return null!=ue().global.fetch?ue().global.fetch(e,t):(null==Ga&&(Ga=n(410)),Ga(e,t))}now(){const e=process.hrtime();return 1e3*e[0]+e[1]/1e6}encode(e,t){if("utf-8"!==t&&"utf8"!==t)throw new Error(`Node built-in encoder only supports utf-8, but got ${t}`);return this.textEncoder.encode(e)}decode(e,t){return 0===e.length?"":new this.util.TextDecoder(t).decode(e)}});const ja=la({cast_:function(e,t){const n=ia(e,"x","cast");if(!function(e){return"bool"===e||"complex64"===e||"float32"===e||"int32"===e||"string"===e}(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if("string"===t&&"string"!==n.dtype||"string"!==t&&"string"===n.dtype)throw new Error("Only strings can be casted to strings");const s={x:n},a={dtype:t};return Zs.runKernel(Oe,s,a)}}),Ka=la({clone_:function(e){const t={x:ia(e,"x","clone","string_or_numeric")};return Zs.runKernel(It,t)}});function qa(e){return new Promise((e=>setTimeout(e))).then(e)}Js(),Rs={buffer:Ha,cast:ja,clone:Ka,print:function(e,t=!1){console.log(e.toString(t))}};class Xa{constructor(e){if(!ue().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");e.startsWith(Xa.URL_SCHEME)&&(e=e.slice(Xa.URL_SCHEME.length)),null!=e&&0!==e.length||(e="model"),this.modelTopologyFileName=e+".json",this.weightDataFileName=e+".weights.bin"}async save(e){if("undefined"==typeof document)throw new Error("Browser downloads are not supported in this environment since `document` is not present");const t=window.URL.createObjectURL(new Blob([e.weightData],{type:"application/octet-stream"}));if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{const n=[{paths:["./"+this.weightDataFileName],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n};null!=e.signature&&(s.signature=e.signature),null!=e.userDefinedMetadata&&(s.userDefinedMetadata=e.userDefinedMetadata),null!=e.modelInitializer&&(s.modelInitializer=e.modelInitializer);const a=window.URL.createObjectURL(new Blob([JSON.stringify(s)],{type:"application/json"})),r=null==this.jsonAnchor?document.createElement("a"):this.jsonAnchor;if(r.download=this.modelTopologyFileName,r.href=a,await qa((()=>r.dispatchEvent(new MouseEvent("click")))),null!=e.weightData){const e=null==this.weightDataAnchor?document.createElement("a"):this.weightDataAnchor;e.download=this.weightDataFileName,e.href=t,await qa((()=>e.dispatchEvent(new MouseEvent("click"))))}return{modelArtifactsInfo:ba(e)}}}}function Ya(e,t,n,s){!function(e){E(null!=e&&Array.isArray(e)&&e.length>0,(()=>"promises must be a none empty array"))}(e),function(e,t){E(e>=0&&e<=1,(()=>`Progress fraction must be in range [0, 1], but got startFraction ${e}`)),E(t>=0&&t<=1,(()=>`Progress fraction must be in range [0, 1], but got endFraction ${t}`)),E(t>=e,(()=>`startFraction must be no more than endFraction, but got startFraction ${e} and endFraction ${t}`))}(n=null==n?0:n,s=null==s?1:s);let a=0;return Promise.all(e.map((r=>(r.then((r=>{const i=n+ ++a/e.length*(s-n);return t(i),r})),r))))}async function Ja(e,t){null==t&&(t={});const n=null==t.fetchFunc?ue().platform.fetch:t.fetchFunc,s=e.map((e=>n(e,t.requestInit,{isBinary:!0}))),a=(null==t.onProgress?await Promise.all(s):await Ya(s,t.onProgress,0,.5)).map((e=>e.arrayBuffer()));return null==t.onProgress?await Promise.all(a):await Ya(a,t.onProgress,.5,1)}Xa.URL_SCHEME="downloads://",wa.registerSaveRouter((e=>ue().getBool("IS_BROWSER")&&!Array.isArray(e)&&e.startsWith(Xa.URL_SCHEME)?function(e="model"){return new Xa(e)}(e.slice(Xa.URL_SCHEME.length)):null));class Za{constructor(e,t){if(this.DEFAULT_METHOD="POST",null==t&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,null!=t.fetchFunc?(E("function"==typeof t.fetchFunc,(()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)")),this.fetch=t.fetchFunc):this.fetch=ue().platform.fetch,E(null!=e&&e.length>0,(()=>"URL path for http must not be null, undefined or empty.")),Array.isArray(e)&&E(2===e.length,(()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`)),this.path=e,null!=t.requestInit&&null!=t.requestInit.body)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n};null!=e.signature&&(s.signature=e.signature),null!=e.userDefinedMetadata&&(s.userDefinedMetadata=e.userDefinedMetadata),null!=e.modelInitializer&&(s.modelInitializer=e.modelInitializer),t.body.append("model.json",new Blob([JSON.stringify(s)],{type:"application/json"}),"model.json"),null!=e.weightData&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:"application/octet-stream"}),"model.weights.bin");const a=await this.fetch(this.path,t);if(a.ok)return{modelArtifactsInfo:ba(e),responses:[a]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${a.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(e){let t=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?t+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":t+=" Please make sure the server is serving valid JSON for this request.",new Error(t)}const n=t.modelTopology,s=t.weightsManifest,a=t.generatedBy,r=t.convertedBy,i=t.format,o=t.signature,l=t.userDefinedMetadata;if(null==n&&null==s)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let u,c;if(null!=s){const e=await this.loadWeights(s);[u,c]=e}const h={modelTopology:n,weightSpecs:u,weightData:c,generatedBy:a,convertedBy:r,format:i};null!=o&&(h.signature=o),null!=l&&(h.userDefinedMetadata=l);const p=t.modelInitializer;return p&&(h.modelInitializer=p),h}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,s]=function(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?");return[e.substring(0,t)+"/",n>t?e.substring(n):""]}(t),a=this.weightPathPrefix||n,r=[];for(const t of e)r.push(...t.weights);const i=[],o=[];for(const t of e)for(const e of t.paths)null!=this.weightUrlConverter?o.push(this.weightUrlConverter(e)):i.push(a+e+s);return this.weightUrlConverter&&i.push(...await Promise.all(o)),[r,ya(await Ja(i,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress}))]}}function Qa(e){return null!=e.match(Za.URL_SCHEME_REGEX)}Za.URL_SCHEME_REGEX=/^https?:\/\//;const er=(e,t)=>{if("undefined"==typeof fetch&&(null==t||null==t.fetchFunc))return null;{let n=!0;if(n=Array.isArray(e)?e.every((e=>Qa(e))):Qa(e),n)return tr(e,t)}return null};function tr(e,t){return new Za(e,t)}function nr(e,t){return tr(e,t)}let sr;wa.registerSaveRouter(er),wa.registerLoadRouter(er);const ar=la({fromPixels_:function(e,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(null==e)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let n=!1,s=!1,a=!1,r=!1,i=!1,o=!1;if(e.data instanceof Uint8Array)n=!0;else if("undefined"!=typeof ImageData&&e instanceof ImageData)s=!0;else if("undefined"!=typeof HTMLVideoElement&&e instanceof HTMLVideoElement)a=!0;else if("undefined"!=typeof HTMLImageElement&&e instanceof HTMLImageElement)r=!0;else if(null!=e.getContext)i=!0;else{if(!("undefined"!=typeof ImageBitmap&&e instanceof ImageBitmap))throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${e.constructor.name}`);o=!0}if(a){const t=2;if(a&&e.readyState<t)throw new Error("The video element has not loaded data yet. Please wait for `loadeddata` event on the <video> element.")}if(null!=cs(ss,Zs.backendName)){const n={pixels:e},s={numChannels:t};return Zs.runKernel(ss,n,s)}const[l,u]=a?[e.videoWidth,e.videoHeight]:[e.width,e.height];let c,h;if(i?c=e.getContext("2d").getImageData(0,0,l,u).data:s||n?c=e.data:(r||a||o)&&(null==sr&&(sr=document.createElement("canvas").getContext("2d")),sr.canvas.width=l,sr.canvas.height=u,sr.drawImage(e,0,0,l,u),c=sr.getImageData(0,0,l,u).data),4===t)h=new Int32Array(c);else{const e=l*u;h=new Int32Array(e*t);for(let n=0;n<e;n++)for(let e=0;e<t;++e)h[n*t+e]=c[4*n+e]}return function(e,t,n){if(R(e),null!=t&&3!==t.length)throw new Error("tensor3d() requires shape to have three numbers");const s=sa(e,n);if(3!==s.length&&1!==s.length)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(1===s.length&&null==t)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return ca(e,t,s,n)}(h,[u,l,t],"int32")}});function rr(e,t,n){const s=e.shape.length;E(s===t.length,(()=>`Error in slice${s}D: Length of begin ${t} must match the rank of the array (${s}).`)),E(s===n.length,(()=>`Error in slice${s}D: Length of size ${n} must match the rank of the array (${s}).`));for(let a=0;a<s;++a)E(t[a]+n[a]<=e.shape[a],(()=>`Error in slice${s}D: begin[${a}] + size[${a}] (${t[a]+n[a]}) would overflow input.shape[${a}] (${e.shape[a]})`))}function ir(e){const t=[];let n=0;for(;e>0;)1&e&&t.push(n),e/=2,n++;return t}function or(e,t,n){const s=[];for(let a=0;a<e.length;a++)s[a]=Math.ceil((t[a]-e[a])/n[a]);return s}function lr(e,t,n,s){const a=[...e];for(let e=a.length;e<s.length;e++)a.push(1);for(let e=0;e<n;e++)0===e?a[t]=1:(a.splice(t,0,1),a.pop());return a}function ur(e,t,n){return n<=e?n:n-(t-1)}function cr(e,t){const n=[];for(let s=0;s<e;s++)n.push(t+s);return n}function hr(e,t,n,s,a,r,i,o,l){const u=e.length;let c=new Array(u),h=new Array(u),p=new Array(u);if(t.length&&n>0){const l=t[0],u=n+1;c=pr(i,l,u,s,e),h=dr(o,l,u,a,e),p=lr(r,l,u,e)}else for(let t=0;t<u;t++)c[t]=mr(i,s,r,e,t,l),h[t]=gr(o,a,r,e,t,l),p[t]=fr(r,t,l);return{begin:c,end:h,strides:p}}function pr(e,t,n,s,a){const r=[...a],i=cr(n,t);for(let a=0;a<r.length;a++)if(i.indexOf(a)>-1)r[a]=0;else{const i=ur(t,n,a);let o=s[i];e&1<<i&&(o=0),r[a]=o}return r}function dr(e,t,n,s,a){const r=[...a],i=cr(n,t);for(let a=0;a<r.length;a++)if(i.indexOf(a)>-1)r[a]=Number.MAX_SAFE_INTEGER;else{const i=ur(t,n,a);let o=s[i];e&1<<i&&(o=Number.MAX_SAFE_INTEGER),r[a]=o}for(let e=0;e<r.length;e++){const t=a[e];r[e]<0&&(r[e]+=t),r[e]=S(0,r[e],a[e])}return r}function fr(e,t,n){let s=e[t];return(n&1<<t||null==s)&&(s=1),s}function mr(e,t,n,s,a,r){let i=t[a];const o=n[a]||1;(e&1<<a||r&1<<a||null==i)&&(i=o>0?Number.MIN_SAFE_INTEGER:Number.MAX_SAFE_INTEGER);const l=s[a];return i<0&&(i+=l),i=S(0,i,l-1),i}function gr(e,t,n,s,a,r){let i=t[a];const o=n[a]||1;(e&1<<a||r&1<<a||null==i)&&(i=o>0?Number.MAX_SAFE_INTEGER:Number.MIN_SAFE_INTEGER);const l=s[a];return i<0&&(i+=l),i=o>0?S(0,i,l):S(-1,i,l-1),i}function yr(e,t,n){let s=n.length;for(let e=0;e<n.length;e++)if(n[e]>1){s=e;break}for(let a=s+1;a<n.length;a++)if(t[a]>0||n[a]!==e[a])return!1;return!0}function br(e,t){let n=e.length>0?e[e.length-1]:1;for(let s=0;s<e.length-1;s++)n+=e[s]*t[s];return n}function xr(e,t,n){let s;const a=e.shape.length;let r;return s="number"==typeof t?[t,...new Array(a-1).fill(0)]:t.length<a?t.concat(new Array(a-t.length).fill(0)):t.slice(),s.forEach((e=>{E(-1!==e,(()=>"slice() does not support negative begin indexing."))})),r=null==n?new Array(a).fill(-1):"number"==typeof n?[n,...new Array(a-1).fill(-1)]:n.length<a?n.concat(new Array(a-n.length).fill(-1)):n,r=r.map(((t,n)=>t>=0?t:(E(-1===t,(()=>`Negative size values should be exactly -1 but got ${t} for the slice() size at index ${n}.`)),e.shape[n]-s[n]))),[s,r]}function wr(e,t,n,s,a,r,i,o,l){let u=t.slice(),c=n.slice(),h=s;null==s&&(h=new Array(u.length));const p=ir(i);if(p.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(0!==i&&0!==o)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(0!==i&&0!==l)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");const d=e.length-u.length,f=ir(o),m=e.slice();f.forEach((e=>{u[e]=0,c[e]=1,m.splice(e,0,1)}));const{begin:g,end:y,strides:b}=hr(m,p,d,u,c,h,a,r,i);u=g,c=y,h=b;const x=ir(l);x.forEach((e=>{c[e]=u[e]+1,h[e]=1}));const w=or(u,c,h),k=w.filter(((e,t)=>-1===x.indexOf(t)));return{nonStrided:h.every((e=>1===e)),$begin:u,$end:c,$strides:h,size:w,newShape:m,outShape:k}}class kr{getClassName(){return this.constructor.className}static fromConfig(e,t){return new e(t)}}class vr{constructor(){this.classNameMap={}}static getMap(){return null==vr.instance&&(vr.instance=new vr),vr.instance}static register(e){vr.getMap().classNameMap[e.className]=[e,e.fromConfig]}}function Nr(e){E(null!=e.className,(()=>"Class being registered does not have the static className property defined.")),E("string"==typeof e.className,(()=>"className is required to be a string, but got type "+typeof e.className)),E(e.className.length>0,(()=>"Class being registered has an empty-string as its className, which is disallowed.")),vr.register(e)}function Ir(){return Zs}function $r(){return Zs.memory()}function Cr(e,t){return Zs.tidy(e,t)}function Sr(e){js(e).forEach((e=>e.dispose()))}function Tr(e){return Zs.keep(e)}function Er(e){return Zs.setBackend(e)}function Ar(e,t,n=1){return Zs.registerBackend(e,t,n)}function Rr(e){return Zs.customGrad(e)}function Fr(e,t){if((H(e)&&"string"!==t||Array.isArray(e))&&"complex64"!==t)throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if("string"===t&&H(e)&&!(e instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");return ca(e,[],[],t)}Fs=function(e){ue().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")};class Dr extends kr{minimize(e,t=!1,n){const{value:s,grads:a}=this.computeGradients(e,n);if(null!=n){const e=n.map((e=>({name:e.name,tensor:a[e.name]})));this.applyGradients(e)}else this.applyGradients(a);return Sr(a),t?s:(s.dispose(),null)}get iterations(){return null==this.iterations_&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(e,t){return function(e,t){E(Y(e),(()=>"The f passed in variableGrads(f) must be a function")),E(null==t||Array.isArray(t)&&t.every((e=>e instanceof Os)),(()=>"The varList passed in variableGrads(f, varList) must be an array of variables"));const n=null!=t;if(!n){t=[];for(const e in Zs.registeredVariables)t.push(Zs.registeredVariables[e])}const s=n?t.filter((e=>!e.trainable)):null,a=t.length;E((t=t.filter((e=>e.trainable))).length>0,(()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${a} variables is trainable.`));const{value:r,grads:i}=Zs.gradients(e,t,null,!0);E(i.some((e=>null!=e)),(()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize().")),E(0===r.rank,(()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${r.rank} tensor`));const o={};return t.forEach(((e,t)=>{null!=i[t]&&(o[e.name]=i[t])})),null!=s&&s.forEach((e=>o[e.name]=null)),{value:r,grads:o}}(e,t)}dispose(){null!=this.iterations_&&Sr(this.iterations_)}async saveIterations(){return null==this.iterations_&&(this.iterations_=0),{name:"iter",tensor:Fr(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(e){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(e){return this.iterations_=(await e[0].tensor.data())[0],e.slice(1)}}Object.defineProperty(Dr,Symbol.hasInstance,{value:e=>null!=e.minimize&&null!=e.computeGradients&&null!=e.applyGradients});const _r=la({abs_:function(e){const t=ia(e,"x","abs");if("complex64"===t.dtype){const e={x:t};return Zs.runKernel(Pe,e)}{const e={x:t};return Zs.runKernel(fe,e)}}}),Or=la({add_:function(e,t){let n=ia(e,"a","add"),s=ia(t,"b","add");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(ye,a)}}),Mr=la({all_:function(e,t=null,n=!1){const s={x:ia(e,"x","all","bool")},a={axis:t,keepDims:n};return Zs.runKernel(xe,s,a)}}),Lr=la({any_:function(e,t=null,n=!1){const s={x:ia(e,"x","any","bool")},a={axis:t,keepDims:n};return Zs.runKernel(we,s,a)}}),zr=la({argMax_:function(e,t=0){const n={x:ia(e,"x","argMax")},s={axis:t};return Zs.runKernel(ke,n,s)}});function Pr(e,t,n,s,a="NHWC",r){return Vr(e,[...t,e[3]],n,r,s,null,null,Jr(a))}function Br(e,t,n,s,a,r,i="channelsLast"){const[o,l]=Hr(t);let u;if("channelsLast"===i)u=[o,l,e[3],e[3]];else{if("channelsFirst"!==i)throw new Error(`Unknown dataFormat ${i}`);u=[o,l,e[1],e[1]]}return Vr(e,u,n,s,a,r,!1,i)}function Wr(e,t,n,s,a,r,i="NDHWC"){const[o,l,u]=jr(t);let c,h;if("NDHWC"===i)h="channelsLast",c=[o,l,u,e[4],e[4]];else{if("NCDHW"!==i)throw new Error(`Unknown dataFormat ${i}`);h="channelsFirst",c=[o,l,u,e[1],e[1]]}return Ur(e,c,n,s,a,!1,h,r)}function Vr(e,t,n,s,a,r,i=!1,o="channelsLast"){let[l,u,c,h]=[-1,-1,-1,-1];if("channelsLast"===o)[l,u,c,h]=e;else{if("channelsFirst"!==o)throw new Error(`Unknown dataFormat ${o}`);[l,h,u,c]=e}const[p,d,,f]=t,[m,g]=Hr(n),[y,b]=Hr(s),x=Kr(p,y),w=Kr(d,b),{padInfo:k,outHeight:v,outWidth:N}=function(e,t,n,s,a,r,i,o,l){let u,c,h;if("number"==typeof e){u={top:e,bottom:e,left:e,right:e,type:0===e?"VALID":"NUMBER"};const a=function(e,t,n,s,a){null==s&&(s=Gr(e,t,n));const r=e[1];return[qr((e[0]-t+2*s)/n+1,a),qr((r-t+2*s)/n+1,a)]}([t,n],r,s,e,o);c=a[0],h=a[1]}else if("same"===e){c=Math.ceil(t/s),h=Math.ceil(n/a);const e=Math.max(0,(c-1)*s+r-t),o=Math.max(0,(h-1)*a+i-n),l=Math.floor(e/2),p=e-l,d=Math.floor(o/2);u={top:l,bottom:p,left:d,right:o-d,type:"SAME"}}else if("valid"===e)u={top:0,bottom:0,left:0,right:0,type:"VALID"},c=Math.ceil((t-r+1)/s),h=Math.ceil((n-i+1)/a);else{if("object"!=typeof e)throw Error(`Unknown padding parameter: ${e}`);{const p="channelsLast"===l?e[1][0]:e[2][0],d="channelsLast"===l?e[1][1]:e[2][1],f="channelsLast"===l?e[2][0]:e[3][0],m="channelsLast"===l?e[2][1]:e[3][1];u={top:p,bottom:d,left:f,right:m,type:0===p&&0===d&&0===f&&0===m?"VALID":"EXPLICIT"},c=qr((t-r+p+d)/s+1,o),h=qr((n-i+f+m)/a+1,o)}}return{padInfo:u,outHeight:c,outWidth:h}}(a,u,c,m,g,x,w,r,o),I=i?f*h:f;let $;return"channelsFirst"===o?$=[l,I,v,N]:"channelsLast"===o&&($=[l,v,N,I]),{batchSize:l,dataFormat:o,inHeight:u,inWidth:c,inChannels:h,outHeight:v,outWidth:N,outChannels:I,padInfo:k,strideHeight:m,strideWidth:g,filterHeight:p,filterWidth:d,effectiveFilterHeight:x,effectiveFilterWidth:w,dilationHeight:y,dilationWidth:b,inShape:e,outShape:$,filterShape:t}}function Ur(e,t,n,s,a,r=!1,i="channelsLast",o){let[l,u,c,h,p]=[-1,-1,-1,-1,-1];if("channelsLast"===i)[l,u,c,h,p]=e;else{if("channelsFirst"!==i)throw new Error(`Unknown dataFormat ${i}`);[l,p,u,c,h]=e}const[d,f,m,,g]=t,[y,b,x]=jr(n),[w,k,v]=jr(s),N=Kr(d,w),I=Kr(f,k),$=Kr(m,v),{padInfo:C,outDepth:S,outHeight:T,outWidth:E}=function(e,t,n,s,a,r,i,o,l,u,c){let h,p,d,f;if("number"==typeof e){h={top:e,bottom:e,left:e,right:e,front:e,back:e,type:0===e?"VALID":"NUMBER"};const r=function(e,t,n,s,a,r){null==a&&(a=Gr(e,t,s));const i=e[1],o=e[2];return[qr((e[0]-t+2*a)/s+1,r),qr((i-t+2*a)/s+1,r),qr((o-t+2*a)/s+1,r),1]}([t,n,s,1],o,0,a,e,c);p=r[0],d=r[1],f=r[2]}else if("same"===e){p=Math.ceil(t/a),d=Math.ceil(n/r),f=Math.ceil(s/i);const e=(p-1)*a+o-t,c=(d-1)*r+l-n,m=(f-1)*i+u-s,g=Math.floor(e/2),y=e-g,b=Math.floor(c/2),x=c-b,w=Math.floor(m/2);h={top:b,bottom:x,left:w,right:m-w,front:g,back:y,type:"SAME"}}else{if("valid"!==e)throw Error(`Unknown padding parameter: ${e}`);h={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},p=Math.ceil((t-o+1)/a),d=Math.ceil((n-l+1)/r),f=Math.ceil((s-u+1)/i)}return{padInfo:h,outDepth:p,outHeight:d,outWidth:f}}(a,u,c,h,y,b,x,N,I,$,o),A=r?g*p:g;let R;return"channelsFirst"===i?R=[l,A,S,T,E]:"channelsLast"===i&&(R=[l,S,T,E,A]),{batchSize:l,dataFormat:i,inDepth:u,inHeight:c,inWidth:h,inChannels:p,outDepth:S,outHeight:T,outWidth:E,outChannels:A,padInfo:C,strideDepth:y,strideHeight:b,strideWidth:x,filterDepth:d,filterHeight:f,filterWidth:m,effectiveFilterDepth:N,effectiveFilterHeight:I,effectiveFilterWidth:$,dilationDepth:w,dilationHeight:k,dilationWidth:v,inShape:e,outShape:R,filterShape:t}}function Gr(e,t,n,s=1){const a=Kr(t,s);return Math.floor((e[0]*(n-1)-n+a)/2)}function Hr(e){return"number"==typeof e?[e,e,e]:2===e.length?[e[0],e[1],1]:e}function jr(e){return"number"==typeof e?[e,e,e]:e}function Kr(e,t){return t<=1?e:e+(e-1)*(t-1)}function qr(e,t){if(!t)return Math.trunc(e);switch(t){case"round":return Math.round(e);case"ceil":return Math.ceil(e);case"floor":return Math.floor(e);default:throw new Error(`Unknown roundingMode ${t}`)}}function Xr(e){const[t,n,s]=Hr(e);return 1===t&&1===n&&1===s}function Yr(e,t){return Xr(e)||Xr(t)}function Jr(e){if("NHWC"===e)return"channelsLast";if("NCHW"===e)return"channelsFirst";throw new Error(`Unknown dataFormat ${e}`)}const Zr=la({reshape_:function(e,t){const n={x:ia(e,"x","reshape","string_or_numeric")},s={shape:t};return Zs.runKernel(bn,n,s)}}),Qr=la({avgPool_:function(e,t,n,s,a){const r=ia(e,"x","avgPool","float32");E(Yr(n,1),(()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${n} and dilations '1'`));let i=r,o=!1;3===r.rank&&(o=!0,i=Zr(r,[1,r.shape[0],r.shape[1],r.shape[2]])),E(4===i.rank,(()=>`Error in avgPool: x must be rank 4 but got rank ${i.rank}.`)),null!=a&&E(O(s),(()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`));const l={x:i},u={filterSize:t,strides:n,pad:s,dimRoundingMode:a};let c=Zs.runKernel(Te,l,u);return c=ja(c,r.dtype),o?Zr(c,[c.shape[1],c.shape[2],c.shape[3]]):c}}),ei=la({avgPool3d_:function(e,t,n,s,a,r="NDHWC"){const i=ia(e,"x","avgPool3d","float32");let o=i,l=!1;4===i.rank&&(l=!0,o=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2],i.shape[3]])),E(5===o.rank,(()=>`Error in avgPool3d: x must be rank 5 but got rank ${o.rank}.`)),E("NDHWC"===r,(()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${r}`)),null!=a&&E(O(s),(()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`));const u={x:o},c={filterSize:t,strides:n,pad:s,dimRoundingMode:a,dataFormat:r};let h=Zs.runKernel(Ae,u,c);return h=ja(h,o.dtype),l?Zr(h,[h.shape[1],h.shape[2],h.shape[3],h.shape[4]]):h}}),ti=la({batchNorm_:function(e,t,n,s,a,r){null==r&&(r=.001);const i=ia(e,"x","batchNorm"),o=ia(t,"mean","batchNorm"),l=ia(n,"variance","batchNorm");let u,c;null!=a&&(u=ia(a,"scale","batchNorm")),null!=s&&(c=ia(s,"offset","batchNorm")),E(o.rank===l.rank,(()=>"Batch normalization gradient requires mean and variance to have equal ranks.")),E(null==c||o.rank===c.rank,(()=>"Batch normalization gradient requires mean and offset to have equal ranks.")),E(null==u||o.rank===u.rank,(()=>"Batch normalization gradient requires mean and scale to have equal ranks."));const h={x:function(e){let t;return t=0===e.rank||1===e.rank?Zr(e,[1,1,1,e.size]):2===e.rank?Zr(e,[1,1,e.shape[0],e.shape[1]]):3===e.rank?Zr(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e,t}(i),scale:u,offset:c,mean:o,variance:l},p={varianceEpsilon:r},d=Zs.runKernel(xt,h,p);return Zr(d,i.shape)}}),ni=la({batchNorm2d_:function(e,t,n,s,a,r){const i=ia(e,"x","batchNorm"),o=ia(t,"mean","batchNorm"),l=ia(n,"variance","batchNorm");let u,c;return null!=a&&(u=ia(a,"scale","batchNorm")),null!=s&&(c=ia(s,"offset","batchNorm")),E(2===i.rank,(()=>`Error in batchNorm2D: x must be rank 2 but got rank ${i.rank}.`)),E(2===o.rank||1===o.rank,(()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${o.rank}.`)),E(2===l.rank||1===l.rank,(()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${l.rank}.`)),null!=u&&E(2===u.rank||1===u.rank,(()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${u.rank}.`)),null!=c&&E(2===c.rank||1===c.rank,(()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${c.rank}.`)),ti(i,o,l,c,u,r)}}),si=la({batchNorm3d_:function(e,t,n,s,a,r){const i=ia(e,"x","batchNorm"),o=ia(t,"mean","batchNorm"),l=ia(n,"variance","batchNorm");let u,c;return null!=a&&(u=ia(a,"scale","batchNorm")),null!=s&&(c=ia(s,"offset","batchNorm")),E(3===i.rank,(()=>`Error in batchNorm3D: x must be rank 3 but got rank ${i.rank}.`)),E(3===o.rank||1===o.rank,(()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${o.rank}.`)),E(3===l.rank||1===l.rank,(()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${l.rank}.`)),null!=u&&E(3===u.rank||1===u.rank,(()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${u.rank}.`)),null!=c&&E(3===c.rank||1===c.rank,(()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${c.rank}.`)),ti(i,o,l,c,u,r)}}),ai=la({batchNorm4d_:function(e,t,n,s,a,r){const i=ia(e,"x","batchNorm"),o=ia(t,"mean","batchNorm"),l=ia(n,"variance","batchNorm");let u,c;return null!=a&&(u=ia(a,"scale","batchNorm")),null!=s&&(c=ia(s,"offset","batchNorm")),E(4===i.rank,(()=>`Error in batchNorm4D: x must be rank 4 but got rank ${i.rank}.`)),E(4===o.rank||1===o.rank,(()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${o.rank}.`)),E(4===l.rank||1===l.rank,(()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${l.rank}.`)),null!=u&&E(4===u.rank||1===u.rank,(()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${u.rank}.`)),null!=c&&E(4===c.rank||1===c.rank,(()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${c.rank}.`)),ti(i,o,l,c,u,r)}}),ri=la({clipByValue_:function(e,t,n){const s=ia(e,"x","clipByValue");E(t<=n,(()=>`Error in clip: min (${t}) must be less than or equal to max (${n}).`));const a={x:s},r={clipValueMin:t,clipValueMax:n};return Zs.runKernel(Le,a,r)}}),ii=la({concat_:function(e,t=0){E(e.length>=1,(()=>"Pass at least one tensor to concat"));const n=oa(e,"tensors","concat","string_or_numeric");if("complex64"===n[0].dtype&&n.forEach((e=>{if("complex64"!==e.dtype)throw new Error(`Cannot concatenate complex64 tensors with a tensor\n with dtype ${e.dtype}. `)})),1===n.length)return Ka(n[0]);const s=n,a={axis:t};return Zs.runKernel(Be,s,a)}}),oi=la({concat1d_:function(e){return ii(e,0)}}),li=la({concat2d_:function(e,t){return ii(e,t)}}),ui=la({concat3d_:function(e,t){return ii(e,t)}}),ci=la({concat4d_:function(e,t){return ii(e,t)}}),hi=la({conv2d_:function(e,t,n,s,a="NHWC",r=[1,1],i){const o=ia(e,"x","conv2d"),l=ia(t,"filter","conv2d");let u=o,c=!1;3===o.rank&&(c=!0,u=Zr(o,[1,o.shape[0],o.shape[1],o.shape[2]])),E(4===u.rank,(()=>`Error in conv2d: input must be rank 4, but got rank ${u.rank}.`)),E(4===l.rank,(()=>`Error in conv2d: filter must be rank 4, but got rank ${l.rank}.`)),null!=i&&E(O(s),(()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`));const h="NHWC"===a?u.shape[3]:u.shape[1];E(h===l.shape[2],(()=>`Error in conv2d: depth of input (${h}) must match input depth for filter ${l.shape[2]}.`)),E(Yr(n,r),(()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${r}'`));const p={x:u,filter:l},d={strides:n,pad:s,dataFormat:a,dilations:r,dimRoundingMode:i},f=Zs.runKernel(We,p,d);return c?Zr(f,[f.shape[1],f.shape[2],f.shape[3]]):f}}),pi=la({conv1d_:function(e,t,n,s,a="NWC",r=1,i){const o=ia(e,"x","conv1d"),l=ia(t,"filter","conv1d");let u=o,c=!1;2===o.rank&&(c=!0,u=Zr(o,[1,o.shape[0],o.shape[1]])),E(3===u.rank,(()=>`Error in conv1d: input must be rank 3, but got rank ${u.rank}.`)),E(3===l.rank,(()=>`Error in conv1d: filter must be rank 3, but got rank ${l.rank}.`)),null!=i&&E(O(s),(()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`)),E(u.shape[2]===l.shape[1],(()=>`Error in conv1d: depth of input (${u.shape[2]}) must match input depth for filter ${l.shape[1]}.`)),E(Yr(n,r),(()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${n} and dilation '${r}'`)),E("NWC"===a,(()=>`Error in conv1d: got dataFormat of ${a} but only NWC is currently supported.`));const h=Zr(l,[1,l.shape[0],l.shape[1],l.shape[2]]),p=Zr(u,[u.shape[0],1,u.shape[1],u.shape[2]]),d=hi(p,h,[1,n],s,"NHWC",[1,r],i);return Zr(d,c?[d.shape[2],d.shape[3]]:[d.shape[0],d.shape[2],d.shape[3]])}}),di=la({conv2DBackpropInput_:function(e,t,n,s,a,r="NHWC",i){E(e.length===t.rank,(()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`));let o=e,l=t,u=!1;3===t.rank&&(u=!0,l=Zr(t,[1,t.shape[0],t.shape[1],t.shape[2]]),o=[1,e[0],e[1],e[2]]),E(4===o.length,(()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${o.length}.`)),E(4===l.rank,(()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${l.rank}`)),E(4===n.rank,(()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${n.rank}`));const c="NHWC"===r?o[3]:o[1],h="NHWC"===r?l.shape[3]:l.shape[1];E(c===n.shape[2],(()=>`Error in conv2dDerInput: depth of input (${c}) must match input depth for filter ${n.shape[2]}.`)),E(h===n.shape[3],(()=>`Error in conv2dDerInput: depth of output (${h}) must match output depth for filter ${n.shape[3]}.`)),null!=i&&E(O(a),(()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${i} but got pad ${a}.`));const p={dy:l,filter:n},d={strides:s,pad:a,dataFormat:r,dimRoundingMode:i,inputShape:o},f=Zs.runKernel(Ue,p,d);return u?Zr(f,[f.shape[1],f.shape[2],f.shape[3]]):f}}),fi=la({conv2dTranspose_:function(e,t,n,s,a,r){const i=ia(e,"x","conv2dTranspose"),o=ia(t,"filter","conv2dTranspose");return di(n,i,o,s,a,"NHWC",r)}}),mi=la({conv3d_:function(e,t,n,s,a="NDHWC",r=[1,1,1]){const i=ia(e,"x","conv3d"),o=ia(t,"filter","conv3d");let l=i,u=!1;4===i.rank&&(u=!0,l=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2],i.shape[3]])),E(5===l.rank,(()=>`Error in conv3d: input must be rank 5, but got rank ${l.rank}.`)),E(5===o.rank,(()=>`Error in conv3d: filter must be rank 5, but got rank ${o.rank}.`)),E(l.shape[4]===o.shape[3],(()=>`Error in conv3d: depth of input (${l.shape[4]}) must match input depth for filter ${o.shape[3]}.`)),E(Yr(n,r),(()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${n} and dilations '${r}'`)),E("NDHWC"===a,(()=>`Error in conv3d: got dataFormat of ${a} but only NDHWC is currently supported.`));const c={x:l,filter:o},h={strides:n,pad:s,dataFormat:a,dilations:r},p=Zs.runKernel(Ge,c,h);return u?Zr(p,[p.shape[1],p.shape[2],p.shape[3],p.shape[4]]):p}}),gi=la({depthwiseConv2d_:function(e,t,n,s,a="NHWC",r=[1,1],i){const o=ia(e,"x","depthwiseConv2d"),l=ia(t,"filter","depthwiseConv2d");let u=o,c=!1;3===o.rank&&(c=!0,u=Zr(o,[1,o.shape[0],o.shape[1],o.shape[2]])),E(4===u.rank,(()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${u.rank}.`)),E(4===l.rank,(()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${l.rank}.`)),E(u.shape[3]===l.shape[2],(()=>`Error in depthwiseConv2d: number of input channels (${u.shape[3]}) must match the inChannels dimension in filter ${l.shape[2]}.`)),null!=i&&E(O(s),(()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`));const h={x:u,filter:l},p={strides:n,pad:s,dataFormat:a,dilations:r,dimRoundingMode:i},d=Zs.runKernel(Qe,h,p);return c?Zr(d,[d.shape[1],d.shape[2],d.shape[3]]):d}}),yi=la({floorDiv_:function(e,t){let n=ia(e,"a","floorDiv"),s=ia(t,"b","floorDiv");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(bt,a)}}),bi=la({div_:function(e,t){let n=ia(e,"a","div"),s=ia(t,"b","div");if([n,s]=Gs(n,s),"int32"===n.dtype&&"int32"===s.dtype)return yi(n,s);const a={a:n,b:s};return Zs.runKernel(it,a,{})}}),xi=la({elu_:function(e){const t={x:ia(e,"x","elu")};return Zs.runKernel(ot,t)}});function wi(e,t){const n=e.length,s=[];for(let a=0;a<n;a++){const r=n-1-a,i=e[r]||1;(t[t.length-1-a]||1)>1&&1===i&&s.unshift(r)}return s}function ki(e,t){const n=[];for(let s=0;s<t.length;s++){const a=e[e.length-s-1],r=t.length-s-1,i=t[r];(null==a||1===a&&i>1)&&n.unshift(r)}return n}function vi(e,t){const n=[],s=Math.max(e.length,t.length);for(let a=0;a<s;a++){let s=e[e.length-a-1];null==s&&(s=1);let r=t[t.length-a-1];if(null==r&&(r=1),1===s)n.unshift(r);else if(1===r)n.unshift(s);else{if(s!==r)throw Error(`Operands could not be broadcast together with shapes ${e} and ${t}.`);n.unshift(s)}}return n}const Ni=la({equal_:function(e,t){let n=ia(e,"a","equal"),s=ia(t,"b","equal");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(ct,a)}}),Ii=la({expandDims_:function(e,t=0){const n=ia(e,"x","expandDims","string_or_numeric");E(t<=n.rank,(()=>"Axis must be <= rank of the tensor"));const s={input:n},a={dim:t};return Zs.runKernel(pt,s,a)}}),$i=la({tile_:function(e,t){const n=ia(e,"x","tile","string_or_numeric");E(n.rank===t.length,(()=>`Error in transpose: rank of input ${n.rank} must match length of reps ${t}.`));const s={x:n},a={reps:t};return Zs.runKernel(qn,s,a)}}),Ci=la({eye_:function(e,t,n,s="float32"){null==t&&(t=e);const a=Ha([e,t],s),r=e<=t?e:t;for(let e=0;e<r;++e)a.set(1,e,e);const i=Zr(a.toTensor(),[e,t]);if(null==n)return i;if(1===n.length)return $i(Ii(i,0),[n[0],1,1]);if(2===n.length)return $i(Ii(Ii(i,0),0),[n[0],n[1],1,1]);if(3===n.length)return $i(Ii(Ii(Ii(i,0),0),0),[n[0],n[1],n[2],1,1]);throw new Error(`eye() currently supports only 1D and 2D batchShapes, but received ${n.length}D.`)}});function Si(e,t,n){const s={shape:e,value:t,dtype:n};return Zs.runKernel(mt,{},s)}const Ti=la({floor_:function(e){const t={x:ia(e,"x","floor")};return Zs.runKernel(yt,t)}}),Ei=la({gather_:function(e,t,n=0,s=0){const a={x:ia(e,"x","gather"),indices:ia(t,"indices","gather","int32")},r={axis:n,batchDims:s};return Zs.runKernel(wt,a,r)}}),Ai=la({greater_:function(e,t){let n=ia(e,"a","greater"),s=ia(t,"b","greater");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(vt,a)}}),Ri=la({greaterEqual_:function(e,t){let n=ia(e,"a","greaterEqual"),s=ia(t,"b","greaterEqual");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Nt,a)}}),Fi=la({leakyRelu_:function(e,t=.2){const n={x:ia(e,"x","leakyRelu")},s={alpha:t};return Zs.runKernel(At,n,s)}}),Di=la({log_:function(e){const t={x:ia(e,"x","log")};return Zs.runKernel(_t,t)}}),_i=la({exp_:function(e){const t={x:ia(e,"x","exp")};return Zs.runKernel(ht,t)}}),Oi=la({max_:function(e,t=null,n=!1){const s={x:ia(e,"x","max")},a={reductionIndices:t,keepDims:n};return Zs.runKernel(Wt,s,a)}}),Mi=la({mul_:function(e,t){let n=ia(e,"a","mul"),s=ia(t,"b","mul");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(en,a)}}),Li=la({sub_:function(e,t){let n=ia(e,"a","sub"),s=ia(t,"b","sub");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(Un,a)}}),zi=la({sum_:function(e,t=null,n=!1){let s=ia(e,"x","sum");"bool"===s.dtype&&(s=ja(s,"int32"));const a={x:s},r={axis:t,keepDims:n};return Zs.runKernel(Ln,a,r)}}),Pi=la({logSoftmax_:function(e,t=-1){const n=ia(e,"logits","logSoftmax");if(-1===t&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);return Rr(((e,n)=>{const s=Oi(e,t,!0),a=Li(e,s),r=Li(ja(a,"float32"),Di(zi(_i(a),t,!0)));return n([r]),{value:r,gradFunc:(e,n)=>{const[s]=n,a=_i(s);return Li(e,Mi(zi(e,t,!0),a))}}}))(n)}}),Bi=la({logicalAnd_:function(e,t){const n=ia(e,"a","logicalAnd","bool"),s=ia(t,"b","logicalAnd","bool");vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Mt,a)}}),Wi=la({maxPool_:function(e,t,n,s,a){const r=ia(e,"x","maxPool");let i=r,o=!1;3===r.rank&&(o=!0,i=Zr(r,[1,r.shape[0],r.shape[1],r.shape[2]])),E(4===i.rank,(()=>`Error in maxPool: input must be rank 4 but got rank ${i.rank}.`)),E(Yr(n,1),(()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '1'`)),null!=a&&E(O(s),(()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`));const l={x:i},u={filterSize:t,strides:n,pad:s,dimRoundingMode:a},c=Zs.runKernel(Ut,l,u);return o?Zr(c,[c.shape[1],c.shape[2],c.shape[3]]):c}}),Vi=la({maxPool3d_:function(e,t=[1,1,1],n,s,a,r="NDHWC"){const i=ia(e,"x","maxPool3d");let o=i,l=!1;4===i.rank&&(l=!0,o=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2],i.shape[3]])),E(5===o.rank,(()=>`Error in maxPool3d: x must be rank 5 but got rank ${o.rank}.`)),E("NDHWC"===r,(()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${r}`)),null!=a&&E(O(s),(()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`));const u={x:o},c={filterSize:t,strides:n,pad:s,dimRoundingMode:a,dataFormat:r},h=Zs.runKernel(Ht,u,c);return l?Zr(h,[h.shape[1],h.shape[2],h.shape[3],h.shape[4]]):h}}),Ui=la({maximum_:function(e,t){let n=ia(e,"a","maximum"),s=ia(t,"b","maximum");[n,s]=Gs(n,s),"bool"===n.dtype&&(n=ja(n,"int32"),s=ja(s,"int32")),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Vt,a)}}),Gi=la({mean_:function(e,t=null,n=!1){const s={x:ia(e,"x","mean")},a={axis:t,keepDims:n};return Zs.runKernel(qt,s,a)}}),Hi=la({min_:function(e,t=null,n=!1){const s={x:ia(e,"x","min")},a={axis:t,keepDims:n};return Zs.runKernel(Xt,s,a)}}),ji=la({minimum_:function(e,t){let n=ia(e,"a","minimum"),s=ia(t,"b","minimum");[n,s]=Gs(n,s),"bool"===n.dtype&&(n=ja(n,"int32"),s=ja(s,"int32")),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Yt,a)}});function Ki(e,t){for(let n=0;n<e.length;++n)if(e[e.length-n-1]!==t-1-n)return!1;return!0}function qi(e,t,n){const s=e.length+t.length,a=[];let r=0,i=0;for(let o=0;o<s;o++)-1===n.indexOf(o)?a.push(e[r++]):a.push(t[i++]);return a}function Xi(e,t){const n=[],s=e.length;for(let a=0;a<s;a++)-1===t.indexOf(a)&&n.push(e[a]);return[n,t.map((t=>e[t]))]}function Yi(e,t){return qi(e,t.map((e=>1)),t)}function Ji(e,t,n){E(Ki(t,n),(()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`))}function Zi(e,t){if(Ki(e,t))return null;const n=[];for(let s=0;s<t;++s)-1===e.indexOf(s)&&n.push(s);return e.forEach((e=>n.push(e))),n}function Qi(e){return e.map(((e,t)=>[t,e])).sort(((e,t)=>e[1]-t[1])).map((e=>e[0]))}function eo(e,t){const n=[];for(let s=t-e;s<t;++s)n.push(s);return n}const to=la({square_:function(e){const t=ia(e,"x","square");return Zs.runKernel("Square",{x:t},{})}}),no=la({moments_:function(e,t=null,n=!1){const s=B(t,(e=ia(e,"x","moments")).shape),a=Gi(e,s,n);let r=a.shape;n||(r=Yi(a.shape,s));const i=to(Li(ja(e,"float32"),Zr(a,r)));return{mean:a,variance:Gi(i,s,n)}}}),so=la({neg_:function(e){const t={x:ia(e,"x","neg")};return Zs.runKernel(tn,t)}}),ao=la({notEqual_:function(e,t){let n=ia(e,"a","notEqual"),s=ia(t,"b","notEqual");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(nn,a)}}),ro=la({oneHot_:function(e,t,n=1,s=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);const a={indices:ia(e,"indices","oneHot","int32")},r={depth:t,onValue:n,offValue:s};return Zs.runKernel(ln,a,r)}});function io(e,t="float32"){if("complex64"===t){const t=io(e,"float32"),n=io(e,"float32");return ua(t,n)}const n=ne(D(e),t);return Zs.makeTensor(n,e,t)}function oo(e,t="float32"){if("complex64"===t){const t=oo(e,"float32"),n=io(e,"float32");return ua(t,n)}const n=te(D(e),t);return Zs.makeTensor(n,e,t)}const lo=la({onesLike_:function(e){const t={x:ia(e,"x","onesLike")};return Zs.runKernel(on,t)}}),uo=la({pad_:function(e,t,n=0){const s=ia(e,"x","pad");if(0===s.rank)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const a={paddings:t,constantValue:n},r={x:s};return Zs.runKernel(cn,r,a)}}),co=la({prelu_:function(e,t){const n={x:ia(e,"x","prelu"),alpha:ia(t,"alpha","prelu")};return Zs.runKernel(pn,n)}});var ho=n(377);class po{constructor(e,t,n,s,a){this.mean=e,this.stdDev=t,this.dtype=n,this.nextVal=NaN,this.truncated=s,this.truncated&&(this.upper=this.mean+2*this.stdDev,this.lower=this.mean-2*this.stdDev);const r=a||Math.random();this.random=ho.alea(r.toString())}nextValue(){if(!isNaN(this.nextVal)){const e=this.nextVal;return this.nextVal=NaN,e}let e,t,n=!1;for(;!n;){let s,a,r;do{s=2*this.random()-1,a=2*this.random()-1,r=s*s+a*a}while(r>=1||0===r);const i=Math.sqrt(-2*Math.log(r)/r);e=this.mean+this.stdDev*s*i,t=this.mean+this.stdDev*a*i,this.truncated&&!this.isValidTruncated(e)||(n=!0)}return this.truncated&&!this.isValidTruncated(t)||(this.nextVal=this.convertValue(t)),this.convertValue(e)}convertValue(e){return null==this.dtype||"float32"===this.dtype?e:Math.round(e)}isValidTruncated(e){return e<=this.upper&&e>=this.lower}}class fo{constructor(e=0,t=1,n,s){if(this.canReturnFloat=()=>null==this.dtype||"float32"===this.dtype,this.min=e,this.range=t-e,this.dtype=n,null==s&&(s=Math.random()),"number"==typeof s&&(s=s.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${e} - ${t} <= 1 and dtype is not float`);this.random=ho.alea(s)}convertValue(e){return this.canReturnFloat()?e:Math.round(e)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}const mo=la({randomNormal_:function(e,t=0,n=1,s,a){if(null!=s&&"bool"===s)throw new Error(`Unsupported data type ${s}`);const r=new po(t,n,s,!1,a),i=Ha(e,s);for(let e=0;e<i.values.length;e++)i.values[e]=r.nextValue();return i.toTensor()}}),go=la({randomUniform_:function(e,t=0,n=1,s="float32",a){const r=Ha(e,s),i=new fo(t,n,null,a);for(let e=0;e<r.values.length;e++)r.values[e]=i.nextValue();return r.toTensor()}}),yo=la({relu_:function(e){const t={x:ia(e,"x","relu")};return Zs.runKernel(yn,t)}}),bo=la({reverse_:function(e,t){const n={x:ia(e,"x","reverse")},s={dims:t};return Zs.runKernel(In,n,s)}}),xo=la({selu_:function(e){const t={x:ia(e,"x","selu")};return Zs.runKernel(En,t)}}),wo=la({separableConv2d_:function(e,t,n,s,a,r=[1,1],i="NHWC"){const o=ia(e,"x","separableConv2d"),l=ia(t,"depthwiseFilter","separableConv2d"),u=ia(n,"pointwiseFilter","separableConv2d");let c=o,h=!1;if(3===o.rank&&(h=!0,c=Zr(o,[1,o.shape[0],o.shape[1],o.shape[2]])),"NCHW"===i)throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");E(4===c.rank,(()=>`Error in separableConv2d: input must be rank 4, but got rank ${c.rank}.`)),E(4===l.rank,(()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${l.rank}.`)),E(4===u.rank,(()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${l.rank}.`)),E(1===u.shape[0],(()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${u.shape[0]}.`)),E(1===u.shape[1],(()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${u.shape[1]}.`));const p=l.shape[2],d=l.shape[3];E(u.shape[2]===p*d,(()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${p*d}, but got ${u.shape[2]}.`));const f=gi(c,l,s,a,i,r),m=hi(f,u,1,"valid",i);return h?Zr(m,[m.shape[1],m.shape[2],m.shape[3]]):m}}),ko=la({sigmoid_:function(e){const t={x:ia(e,"x","sigmoid")};return Zs.runKernel(_n,t)}}),vo=la({slice_:function(e,t,n){const s=ia(e,"x","slice","string_or_numeric");if(0===s.rank)throw new Error("Slicing scalar is not possible");const a={x:s},r={begin:t,size:n};return Zs.runKernel(An,a,r)}}),No=la({slice1d_:function(e,t,n){const s=ia(e,"x","slice1d");return E(1===s.rank,(()=>`slice1d expects a rank-1 tensor, but got a rank-${s.rank} tensor`)),vo(s,[t],[n])}}),Io=la({slice2d_:function(e,t,n){const s=ia(e,"x","slice2d");return E(2===s.rank,(()=>`slice2d expects a rank-2 tensor, but got a rank-${s.rank} tensor`)),vo(s,t,n)}}),$o=la({slice3d_:function(e,t,n){const s=ia(e,"x","slice3d");return E(3===s.rank,(()=>`slice3d expects a rank-3 tensor, but got a rank-${s.rank} tensor`)),vo(s,t,n)}}),Co=la({slice4d_:function(e,t,n){const s=ia(e,"x","slice4d");return E(4===s.rank,(()=>`slice4d expects a rank-4 tensor, but got a rank-${s.rank} tensor`)),vo(s,t,n)}}),So=la({softmax_:function(e,t=-1){const n=ia(e,"logits","softmax","float32");if(-1===t&&(t=n.rank-1),t!==n.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and dim was ${t}`);const s={logits:n},a={dim:t};return Zs.runKernel(Bn,s,a)}}),To=la({softplus_:function(e){const t={x:ia(e,"x","softplus")};return Zs.runKernel(On,t)}}),Eo=la({split_:function(e,t,n=0){const s={x:ia(e,"x","split")},a={numOrSizeSplits:t,axis:n};return Zs.runKernel(Pn,s,a)}}),Ao=la({sqrt_:function(e){const t={x:ia(e,"x","sqrt")};return Zs.runKernel(Mn,t)}}),Ro=la({squeeze_:function(e,t){const n=ia(e,"x","squeeze");return Zr(n,W(n.shape,t).newShape)}}),Fo=la({stack_:function(e,t=0){const n=oa(e,"tensors","stack","string_or_numeric");E(n.length>=1,(()=>"Pass at least one tensor to tf.stack")),n.length>0&&E(t<=n[0].rank,(()=>"Axis must be <= rank of the tensor"));const s=n,a={axis:t};return Zs.runKernel(un,s,a)}}),Do=la({tanh_:function(e){const t={x:ia(e,"x","tanh")};return Zs.runKernel(Kn,t)}});function _o(e,t){R(e);const n=sa(e,t);if(1!==n.length)throw new Error("tensor1d() requires values to be a flat/TypedArray");return ca(e,null,n,t)}function Oo(e,t,n){if(R(e),null!=t&&2!==t.length)throw new Error("tensor2d() requires shape to have two numbers");const s=sa(e,n);if(2!==s.length&&1!==s.length)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(1===s.length&&null==t)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return ca(e,t,s,n)}const Mo=la({truncatedNormal_:function(e,t=0,n=1,s,a){if(null!=s&&"bool"===s)throw new Error("Unsupported data type $ { dtype }");const r=new po(t,n,s,!0,a),i=Ha(e,s);for(let e=0;e<i.values.length;e++)i.values[e]=r.nextValue();return i.toTensor()}}),Lo=la({unstack_:function(e,t=0){const n=ia(e,"x","unstack","string_or_numeric");E(t>=-n.shape.length&&t<n.shape.length,(()=>`Axis = ${t} is not in [-${n.shape.length}, ${n.shape.length})`));const s={value:n},a={axis:t};return Zs.runKernel(Qn,s,a)}}),zo=la({broadcastTo_:function(e,t){let n=ia(e,"broadcastTo","x");const s=n.shape;if(t.some((e=>!(e>0)||e%1!=0)))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.length<n.rank)throw new Error(`broadcastTo(): shape.length=${t.length} < input.rank=${n.rank}.`);if(t.length>n.rank){const e=n.shape.slice();for(;e.length<t.length;)e.unshift(1);n=Zr(n,e)}const a=n.shape,r=Array.from(t);for(let e=t.length-1;e>=0;e--)if(a[e]===t[e])r[e]=1;else if(1!==n.shape[e])throw new Error(`broadcastTo(): [${s}] cannot be broadcast to [${t}].`);if(0===r.map(((e,t)=>e>1?t:-1)).filter((e=>e>=0)).length)return Ka(n);const i={x:n},o={reps:r};return Zs.runKernel(qn,i,o)}}),Po=la({where_:function(e,t,n){const s=ia(t,"a","where"),a=ia(n,"b","where"),r=ia(e,"condition","where","bool"),i=vi(s.shape,a.shape),o=zo(s,i),l=zo(a,i);1===r.rank&&E(r.shape[0]===s.shape[0],(()=>"The first dimension of `a` must match the size of `condition`.")),1!==r.rank&&A(r.shape,l.shape,"Error in where: ");const u={condition:r,t:o,e:l};return Zs.runKernel(Tn,u)}}),Bo=la({zerosLike_:function(e){const t={x:ia(e,"x","zerosLike")};return Zs.runKernel(ts,t)}}),Wo=la({transpose_:function(e,t){const n=ia(e,"x","transpose");if(null==t&&(t=n.shape.map(((e,t)=>t)).reverse()),E(n.rank===t.length,(()=>`Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`)),t.forEach((e=>{E(e>=0&&e<n.rank,(()=>"All entries in 'perm' must be between 0 and "+(n.rank-1)+` but got ${t}`))})),n.rank<=1)return n.clone();const s={x:n},a={perm:t};return Zs.runKernel(Jn,s,a)}}),Vo=la({dropout_:function(e,t,n,s){const a=ia(e,"x","dropout");if(E("float32"===a.dtype,(()=>`x has to be a floating point tensor since it's going to be scaled, but got a ${a.dtype} tensor instead.`)),E(t>=0&&t<1,(()=>`rate must be a float in the range [0, 1), but got ${t}.`)),0===t)return e instanceof Ds?a.clone():a;const r=function(e,t){if(null==t)return e.shape.slice();if(_(e.shape,t))return t;if(e.shape.length===t.length){const n=[];for(let s=0;s<e.shape.length;s++)null==t[s]&&null!=e.shape[s]?n.push(e.shape[s]):n.push(t[s]);return n}return t}(a,n),i=1-t,o=bi(Ti(Or(go(r,0,1,"float32",s),i)),i);return Mi(a,o)}}),Uo=la({imag_:function(e){const t={input:ia(e,"input","imag")};return Zs.runKernel(Ct,t)}}),Go=la({real_:function(e){const t={input:ia(e,"input","real")};return Zs.runKernel(mn,t)}}),Ho=la({fft_:function(e){E("complex64"===e.dtype,(()=>`The dtype for tf.spectral.fft() must be complex64 but got ${e.dtype}.`));const t={input:e};return Zs.runKernel(ft,t)}}),jo=la({rfft_:function(e,t){E("float32"===e.dtype,(()=>`The dtype for rfft() must be real value but got ${e.dtype}`));let n=e.shape[e.shape.length-1];const s=e.size/n;let a;if(null!=t&&t<n){const s=e.shape.map((e=>0)),r=e.shape.map((e=>e));r[e.shape.length-1]=t,a=vo(e,s,r),n=t}else if(null!=t&&t>n){const s=e.shape.map((e=>e));s[e.shape.length-1]=t-n,a=ii([e,io(s)],e.shape.length-1),n=t}else a=e;const r=Bo(a),i=Zr(ua(a,r),[s,n]),o=Ho(i),l=Math.floor(n/2)+1,u=Go(o),c=Uo(o),h=Eo(u,[l,n-l],u.shape.length-1),p=Eo(c,[l,n-l],c.shape.length-1),d=a.shape.slice();return d[a.shape.length-1]=l,Zr(ua(h[0],p[0]),d)}}),Ko=la({ifft_:function(e){E("complex64"===e.dtype,(()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${e.dtype}.`));const t={input:e};return Zs.runKernel($t,t)}}),qo=la({irfft_:function(e){const t=e.shape[e.shape.length-1],n=e.size/t;let s;if(t<=2){const a=Zr(e,[n,t]);s=Ko(a)}else{const a=[n,2*(t-1)],r=Zr(Go(e),[n,t]),i=Zr(Uo(e),[n,t]),o=bo(vo(r,[0,1],[n,t-2]),1),l=Mi(bo(vo(i,[0,1],[n,t-2]),1),Fr(-1)),u=ii([r,o],1),c=ii([i,l],1),h=Zr(ua(u,c),[a[0],a[1]]);s=Ko(h)}if(s=Go(s),3===e.rank&&0!==e.shape[0]){const t=s,n=e.shape[0];s=Zr(s,[n,s.shape[0]/n,s.shape[1]]),t.dispose()}return s}}),Xo=la({conv2DBackpropFilter_:function(e,t,n,s,a,r="NHWC",i){let o=e;3===e.rank&&(o=Zr(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let l=t;3===l.rank&&(l=Zr(t,[1,t.shape[0],t.shape[1],t.shape[2]])),E(4===o.rank,(()=>`Error in conv2dDerFilter: input must be rank 4, but got shape ${o.shape}.`)),E(4===l.rank,(()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${l.shape}.`)),E(4===n.length,(()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${n}.`));const u="NHWC"===r?o.shape[3]:o.shape[1],c="NHWC"===r?l.shape[3]:l.shape[1];E(u===n[2],(()=>`Error in conv2dDerFilter: depth of input ${u}) must match input depth in filter (${n[2]}.`)),E(c===n[3],(()=>`Error in conv2dDerFilter: depth of dy (${c}) must match output depth for filter (${n[3]}).`)),null!=i&&E(O(a),(()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${i} but got pad ${a}.`));const h={x:o,dy:l},p={strides:s,pad:a,dataFormat:r,dimRoundingMode:i,filterShape:n};return Zs.runKernel(Ve,h,p)}}),Yo=la({relu6_:function(e){const t={x:ia(e,"x","relu6")};return Zs.runKernel(Nn,t)}}),Jo=la({step_:function(e,t=0){const n={x:ia(e,"x","step")},s={alpha:t};return Zs.runKernel(ns,n,s)}});function Zo(e,t,n){if(null==n||"linear"===n)return e;if("relu"===n)return Mi(e,Jo(t));throw new Error(`Cannot compute gradient for fused activation ${n}.`)}function Qo(e,t){let n=t;const s=ki(e.shape,t.shape);return s.length>0&&(n=zi(n,s)),Zr(n,e.shape)}function el(e,t,n,s){if("linear"===t)return e;if("relu"===t)return yo(e);if("elu"===t)return xi(e);if("relu6"===t)return Yo(e);if("prelu"===t)return co(e,n);if("leakyrelu"===t)return Fi(e,s);throw new Error(`Unknown fused activation ${t}.`)}const tl=(e,t)=>!(e>0)||"linear"===t,nl=la({fusedConv2d_:function({x:e,filter:t,strides:n,pad:s,dataFormat:a="NHWC",dilations:r=[1,1],dimRoundingMode:i,bias:o,activation:l="linear",preluActivationWeights:u,leakyreluAlpha:c}){if(l=l||"linear",!1===tl(Zs.state.gradientDepth,l)){let h=hi(e,t,n,s,a,r,i);return null!=o&&(h=Or(h,o)),el(h,l,u,c)}const h=ia(e,"x","conv2d"),p=ia(t,"filter","conv2d");let d=h,f=!1;3===h.rank&&(f=!0,d=Zr(h,[1,h.shape[0],h.shape[1],h.shape[2]])),E(4===d.rank,(()=>`Error in fused conv2d: input must be rank 4, but got rank ${d.rank}.`)),E(4===p.rank,(()=>`Error in fused conv2d: filter must be rank 4, but got rank ${p.rank}.`)),null!=i&&E(O(s),(()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`)),E(d.shape[3]===p.shape[2],(()=>`Error in conv2d: depth of input (${d.shape[3]}) must match input depth for filter ${p.shape[2]}.`)),E(Yr(n,r),(()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${r}'`)),E("NHWC"===a,(()=>`Error in conv2d: got dataFormat of ${a} but only NHWC is currently supported.`));const m=Vr(d.shape,p.shape,n,r,s,i);let g,y;null!=o&&(g=ia(o,"bias","fused conv2d"),[g]=Gs(g,h),vi(m.outShape,g.shape)),null!=u&&(y=ia(u,"prelu weights","fused conv2d"));const b=(e,t)=>{const[a,i,o,u]=t,c=Zo(e,o,l);E(Xr(r),(()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${r}'`));const h=[di(i.shape,c,a,n,s),Xo(i,c,a.shape,n,s)];if(null!=u){const e=Qo(u,c);h.push(e)}return h},x={x:d,filter:p,bias:g,preluActivationWeights:y},w={strides:n,pad:s,dataFormat:a,dilations:r,dimRoundingMode:i,activation:l,leakyreluAlpha:c};return null==o?Rr(((e,t,n)=>{let s=Zs.runKernel(is,x,w);return n([t,e,s]),f&&(s=Zr(s,[s.shape[1],s.shape[2],s.shape[3]])),{value:s,gradFunc:b}}))(d,p):Rr(((e,t,n,s)=>{let a=Zs.runKernel(is,x,w);return s([t,e,a,n]),f&&(a=Zr(a,[a.shape[1],a.shape[2],a.shape[3]])),{value:a,gradFunc:b}}))(d,p,g)}}),sl=la({depthwiseConv2dNativeBackpropFilter_:function(e,t,n,s,a,r=[1,1],i){let o=e;3===e.rank&&(o=Zr(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let l=t;3===l.rank&&(l=Zr(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const u={x:o,dy:l},c={strides:s,pad:a,dimRoundingMode:i,dilations:r,filterShape:n};return Zs.runKernel(et,u,c)}}),al=la({depthwiseConv2dNativeBackpropInput_:function(e,t,n,s,a,r=[1,1],i){let o=t,l=!1;3===t.rank&&(l=!0,o=Zr(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const u={dy:o,filter:n},c={strides:s,pad:a,dimRoundingMode:i,dilations:r,inputShape:e},h=Zs.runKernel(tt,u,c);return l?Zr(h,[h.shape[1],h.shape[2],h.shape[3]]):h}}),rl=la({fusedDepthwiseConv2d_:function({x:e,filter:t,strides:n,pad:s,dataFormat:a="NHWC",dilations:r=[1,1],dimRoundingMode:i,bias:o,activation:l="linear",preluActivationWeights:u,leakyreluAlpha:c}){if(!1===tl(Zs.state.gradientDepth,l)){let h=gi(e,t,n,s,a,r,i);return null!=o&&(h=Or(h,o)),el(h,l,u,c)}const h=ia(e,"x","depthwiseConv2d"),p=ia(t,"filter","depthwiseConv2d");let d=h,f=!1;3===h.rank&&(f=!0,d=Zr(h,[1,h.shape[0],h.shape[1],h.shape[2]])),E(4===d.rank,(()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${d.rank}.`)),E(4===p.rank,(()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${p.rank}.`)),E(d.shape[3]===p.shape[2],(()=>`Error in fused depthwiseConv2d: number of input channels (${d.shape[3]}) must match the inChannels dimension in filter ${p.shape[2]}.`)),null==r&&(r=[1,1]),E(Yr(n,r),(()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${r}'`)),null!=i&&E(O(s),(()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${i} but got pad ${s}.`));const m=Vr(d.shape,p.shape,n,r,s,i,!0);let g,y;null!=o&&(g=ia(o,"bias","fused conv2d"),[g]=Gs(g,h),vi(m.outShape,g.shape)),null!=u&&(y=ia(u,"prelu weights","fused depthwiseConv2d"));const b=(e,t)=>{E(Xr(r),(()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${r}'`));const[a,o,u,c]=t,h=Zo(e,u,l),p=al(o.shape,h,a,n,s,r,i),d=sl(o,h,a.shape,n,s,r,i);return null!=c?[p,d,Qo(g,h)]:[p,d]},x={x:d,filter:p,bias:g,preluActivationWeights:y},w={strides:n,pad:s,dataFormat:a,dilations:r,dimRoundingMode:i,activation:l,leakyreluAlpha:c};return null==o?Rr(((e,t,n)=>{let s=Zs.runKernel(os,x,w);return n([t,e,s]),f&&(s=Zr(s,[s.shape[1],s.shape[2],s.shape[3]])),{value:s,gradFunc:b}}))(d,p):Rr(((e,t,n,s)=>{let a=Zs.runKernel(os,x,w);return s([t,e,a,n]),f&&(a=Zr(a,[a.shape[1],a.shape[2],a.shape[3]])),{value:a,gradFunc:b}}))(d,p,g)}}),il=la({matMul_:function(e,t,n=!1,s=!1){let a=ia(e,"a","matMul"),r=ia(t,"b","matMul");[a,r]=Gs(a,r);const i={a,b:r},o={transposeA:n,transposeB:s};return Zs.runKernel(Fe,i,o)}}),ol=la({fusedMatMul_:function({a:e,b:t,transposeA:n=!1,transposeB:s=!1,bias:a,activation:r="linear",preluActivationWeights:i,leakyreluAlpha:o}){if(!1===tl(Zs.state.gradientDepth,r)){let l=il(e,t,n,s);return null!=a&&(l=Or(l,a)),el(l,r,i,o)}let l=ia(e,"a","fused matMul"),u=ia(t,"b","fused matMul");[l,u]=Gs(l,u);const c=n?l.shape[l.rank-2]:l.shape[l.rank-1],h=s?u.shape[u.rank-1]:u.shape[u.rank-2],p=n?l.shape[l.rank-1]:l.shape[l.rank-2],d=s?u.shape[u.rank-2]:u.shape[u.rank-1],f=l.shape.slice(0,-2),m=u.shape.slice(0,-2),g=D(f),y=D(m);E(l.rank>=2&&u.rank>=2&&l.rank===u.rank,(()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${l.rank} and ${u.rank}.`)),E(_(f,m),(()=>`Error in fused matMul: outer dimensions (${f}) and (${m}) of Tensors with shapes ${l.shape} and ${u.shape} must match.`)),E(c===h,(()=>`Error in fused matMul: inner shapes (${c}) and (${h}) of Tensors with shapes ${l.shape} and ${u.shape} and transposeA=${n} and transposeB=${s} must match.`));const b=l.shape.slice(0,-2).concat([p,d]),x=Zr(l,n?[g,c,p]:[g,p,c]),w=Zr(u,s?[y,d,h]:[y,h,d]);let k,v;null!=a&&(k=ia(a,"bias","fused matMul"),[k]=Gs(k,l),vi(b,k.shape)),null!=i&&(v=ia(i,"prelu weights","fused matMul"));const N=(e,t)=>{const[i,o,l,u]=t,c=Zo(Zr(e,l.shape),l,r);let h,p;return n||s?!n&&s?(h=il(c,o,!1,!1),p=il(c,i,!0,!1)):n&&!s?(h=il(o,c,!1,!0),p=il(i,c,!1,!1)):(h=il(o,c,!0,!0),p=il(c,i,!0,!0)):(h=il(c,o,!1,!0),p=il(i,c,!0,!1)),null!=a?[h,p,Qo(u,c)]:[h,p]},I={a:x,b:w,bias:k,preluActivationWeights:v},$={transposeA:n,transposeB:s,activation:r,leakyreluAlpha:o};return null==a?Rr(((e,t,n)=>{const s=Zs.runKernel(rs,I,$);return n([e,t,s]),{value:Zr(s,b),gradFunc:N}}))(x,w):Rr(((e,t,n,s)=>{const a=Zs.runKernel(rs,I,$);return s([e,t,a,n]),{value:Zr(a,b),gradFunc:N}}))(x,w,k)}});function ll(e,t,n){const s=1-e%2,a=new Float32Array(e);for(let r=0;r<e;++r){const i=2*Math.PI*r/(e+s-1);a[r]=t-n*Math.cos(i)}return _o(a,"float32")}la({hammingWindow_:function(e){return ll(e,.54,.46)}});const ul=la({hannWindow_:function(e){return ll(e,.5,.5)}}),cl=la({frame_:function(e,t,n,s=!1,a=0){let r=0;const i=[];for(;r+t<=e.size;)i.push(vo(e,r,t)),r+=n;if(s)for(;r<e.size;){const s=r+t-e.size,o=ii([vo(e,r,t-s),Si([s],a)]);i.push(o),r+=n}return 0===i.length?Oo([],[0,t]):Zr(ii(i),[i.length,t])}});la({stft_:function(e,t,n,s,a=ul){var r;null==s&&(r=t,s=Math.floor(Math.pow(2,Math.ceil(Math.log(r)/Math.log(2)))));const i=cl(e,t,n),o=Mi(i,a(t)),l=[];for(let e=0;e<i.shape[0];e++)l.push(jo(vo(o,[e,0],[1,t]),s));return ii(l)}});const hl=la({cropAndResize_:function(e,t,n,s,a="bilinear",r=0){const i=ia(e,"image","cropAndResize"),o=ia(t,"boxes","cropAndResize","float32"),l=ia(n,"boxInd","cropAndResize","int32"),u=o.shape[0];E(4===i.rank,(()=>`Error in cropAndResize: image must be rank 4,but got rank ${i.rank}.`)),E(2===o.rank&&4===o.shape[1],(()=>`Error in cropAndResize: boxes must be have size [${u},4] but had shape ${o.shape}.`)),E(1===l.rank&&l.shape[0]===u,(()=>`Error in cropAndResize: boxInd must be have size [${u}] but had shape ${o.shape}.`)),E(2===s.length,(()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${s.length}.`)),E(s[0]>=1&&s[1]>=1,(()=>`cropSize must be atleast [1,1], but was ${s}`)),E("bilinear"===a||"nearest"===a,(()=>`method must be bilinear or nearest, but was ${a}`));const c={image:i,boxes:o,boxInd:l},h={method:a,extrapolationValue:r,cropSize:s};return Zs.runKernel(Ye,c,h)}}),pl=la({flipLeftRight_:function(e){const t=ia(e,"image","flipLeftRight","float32");E(4===t.rank,(()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`));const n={image:t};return Zs.runKernel(gt,n,{})}}),dl=la({rotateWithOffset_:function(e,t,n=0,s=.5){const a=ia(e,"image","rotateWithOffset","float32");E(4===a.rank,(()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${a.rank}.`));const r={image:a},i={radians:t,fillValue:n,center:s};return Zs.runKernel(as,r,i)}});function fl(e,t,n,s,a,r){null==s&&(s=.5),null==a&&(a=Number.NEGATIVE_INFINITY),null==r&&(r=0);const i=e.shape[0];return n=Math.min(n,i),E(0<=s&&s<=1,(()=>`iouThreshold must be in [0, 1], but was '${s}'`)),E(2===e.rank,(()=>`boxes must be a 2D tensor, but was of rank '${e.rank}'`)),E(4===e.shape[1],(()=>`boxes must have 4 columns, but 2nd dimension was ${e.shape[1]}`)),E(1===t.rank,(()=>"scores must be a 1D tensor")),E(t.shape[0]===i,(()=>`scores has incompatible shape with boxes. Expected ${i}, but was ${t.shape[0]}`)),E(0<=r&&r<=1,(()=>`softNmsSigma must be in [0, 1], but was '${r}'`)),{maxOutputSize:n,iouThreshold:s,scoreThreshold:a,softNmsSigma:r}}const ml=la({nonMaxSuppression_:function(e,t,n,s=.5,a=Number.NEGATIVE_INFINITY){const r=ia(e,"boxes","nonMaxSuppression"),i=ia(t,"scores","nonMaxSuppression"),o=fl(r,i,n,s,a),l={maxOutputSize:n=o.maxOutputSize,iouThreshold:s=o.iouThreshold,scoreThreshold:a=o.scoreThreshold};return Zs.runKernel(sn,{boxes:r,scores:i},l)}});function gl(e,t,n){const s=function(e,t,n){return function(e,t,n){let s=0,a=e.length,r=0,i=!1;for(;s<a;){r=s+(a-s>>>1);const o=n(t,e[r]);o>0?s=r+1:(a=r,i=!o)}return i?s:-s-1}(e,t,n||yl)}(e,t,n),a=s<0?-(s+1):s;e.splice(a,0,t)}function yl(e,t){return e>t?1:e<t?-1:0}function bl(e,t,n,s,a){return kl(e,t,n,s,a,0)}function xl(e,t,n,s,a,r){return kl(e,t,n,s,a,0,!1,r,!0)}function wl(e,t,n,s,a,r){return kl(e,t,n,s,a,r,!0)}function kl(e,t,n,s,a,r,i=!1,o=!1,l=!1){const u=[];for(let e=0;e<t.length;e++)t[e]>a&&u.push({score:t[e],boxIndex:e,suppressBeginIndex:0});u.sort(Il);const c=r>0?-.5/r:0,h=[],p=[];for(;h.length<n&&u.length>0;){const t=u.pop(),{score:n,boxIndex:r,suppressBeginIndex:i}=t;if(n<a)break;let o=!1;for(let n=h.length-1;n>=i;--n){const i=vl(e,r,h[n]);if(i>=s){o=!0;break}if(t.score=t.score*Nl(s,c,i),t.score<=a)break}t.suppressBeginIndex=h.length,o||(t.score===n?(h.push(r),p.push(t.score)):t.score>a&&gl(u,t,Il))}const d=h.length,f=n-d;o&&f>0&&(h.push(...new Array(f).fill(0)),p.push(...new Array(f).fill(0)));const m={selectedIndices:h};return i&&(m.selectedScores=p),l&&(m.validOutputs=d),m}function vl(e,t,n){const s=e.subarray(4*t,4*t+4),a=e.subarray(4*n,4*n+4),r=Math.min(s[0],s[2]),i=Math.min(s[1],s[3]),o=Math.max(s[0],s[2]),l=Math.max(s[1],s[3]),u=Math.min(a[0],a[2]),c=Math.min(a[1],a[3]),h=Math.max(a[0],a[2]),p=Math.max(a[1],a[3]),d=(o-r)*(l-i),f=(h-u)*(p-c);if(d<=0||f<=0)return 0;const m=Math.max(r,u),g=Math.max(i,c),y=Math.min(o,h),b=Math.min(l,p),x=Math.max(y-m,0)*Math.max(b-g,0);return x/(d+f-x)}function Nl(e,t,n){const s=Math.exp(t*n*n);return n<=e?s:0}function Il(e,t){return e.score-t.score||e.score===t.score&&t.boxIndex-e.boxIndex}const $l=la({nonMaxSuppressionWithScore_:function(e,t,n,s=.5,a=Number.NEGATIVE_INFINITY,r=0){const i=ia(e,"boxes","nonMaxSuppression"),o=ia(t,"scores","nonMaxSuppression"),l=fl(i,o,n,s,a,r),u={boxes:i,scores:o},c={maxOutputSize:n=l.maxOutputSize,iouThreshold:s=l.iouThreshold,scoreThreshold:a=l.scoreThreshold,softNmsSigma:r=l.softNmsSigma},h=Zs.runKernel(rn,u,c);return{selectedIndices:h[0],selectedScores:h[1]}}}),Cl=la({nonMaxSuppressionPadded_:function(e,t,n,s=.5,a=Number.NEGATIVE_INFINITY,r=!1){const i=ia(e,"boxes","nonMaxSuppression"),o=ia(t,"scores","nonMaxSuppression"),l=fl(i,o,n,s,a,null),u={boxes:i,scores:o},c={maxOutputSize:l.maxOutputSize,iouThreshold:l.iouThreshold,scoreThreshold:l.scoreThreshold,padToMaxOutputSize:r},h=Zs.runKernel(an,u,c);return{selectedIndices:h[0],validOutputs:h[1]}}}),Sl=la({resizeBilinear_:function(e,t,n=!1,s=!1){const a=ia(e,"images","resizeBilinear");E(3===a.rank||4===a.rank,(()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${a.rank}.`)),E(2===t.length,(()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`)),E(!1===s||!1===n,(()=>"Error in resizeBilinear: If halfPixelCenters is true, alignCorners must be false."));let r=a,i=!1;3===a.rank&&(i=!0,r=Zr(a,[1,a.shape[0],a.shape[1],a.shape[2]]));const[]=t,o={images:r},l={alignCorners:n,halfPixelCenters:s,size:t},u=Zs.runKernel(kn,o,l);return i?Zr(u,[u.shape[1],u.shape[2],u.shape[3]]):u}}),Tl=la({resizeNearestNeighbor_:function(e,t,n=!1,s=!1){const a=ia(e,"images","resizeNearestNeighbor");E(3===a.rank||4===a.rank,(()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${a.rank}.`)),E(2===t.length,(()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`)),E("float32"===a.dtype||"int32"===a.dtype,(()=>"`images` must have `int32` or `float32` as dtype")),E(!1===s||!1===n,(()=>"Error in resizeNearestNeighbor: If halfPixelCenters is true, alignCorners must be false."));let r=a,i=!1;3===a.rank&&(i=!0,r=Zr(a,[1,a.shape[0],a.shape[1],a.shape[2]]));const[]=t,o={images:r},l={alignCorners:n,halfPixelCenters:s,size:t},u=Zs.runKernel(xn,o,l);return i?Zr(u,[u.shape[1],u.shape[2],u.shape[3]]):u}}),El=la({transform_:function(e,t,n="nearest",s="constant",a=0,r){const i=ia(e,"image","transform","float32"),o=ia(t,"transforms","transform","float32");E(4===i.rank,(()=>`Error in transform: image must be rank 4,but got rank ${i.rank}.`)),E(2===o.rank&&(o.shape[0]===i.shape[0]||1===o.shape[0])&&8===o.shape[1],(()=>"Error in transform: Input transform should be batch x 8 or 1 x 8")),E(null==r||2===r.length,(()=>`Error in transform: outputShape must be [height, width] or null, but got ${r}.`));const l={image:i,transforms:o},u={interpolation:n,fillMode:s,fillValue:a,outputShape:r};return Zs.runKernel(Yn,l,u)}}),Al=la({lessEqual_:function(e,t){let n=ia(e,"a","lessEqual"),s=ia(t,"b","lessEqual");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Ft,a)}});function Rl(e,t,n=1,s="float32"){if(0===n)throw new Error("Cannot have a step of zero");const a={start:e,stop:t,step:n,dtype:s};return Zs.runKernel(fn,{},a)}const Fl=la({bandPart_:function(e,t,n){E(t%1==0,(()=>`bandPart(): numLower must be an integer, got ${t}.`)),E(n%1==0,(()=>`bandPart(): numUpper must be an integer, got ${n}.`));const s=ia(e,"a","bandPart");E(s.rank>=2,(()=>`bandPart(): Rank must be at least 2, got ${s.rank}.`));const a=s.shape,[r,i]=s.shape.slice(-2);if(!(t<=r))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${r}).`);if(!(n<=i))throw new Error(`bandPart(): numUpper (${n}) must not be greater than the number of columns (${i}).`);t<0&&(t=r),n<0&&(n=i);const o=Zr(Rl(0,r,1,"int32"),[-1,1]),l=Rl(0,i,1,"int32"),u=Li(o,l),c=Bi(Al(u,Fr(+t,"int32")),Ri(u,Fr(-n,"int32"))),h=io([r,i],s.dtype);return Zr(Fo(Lo(Zr(s,[-1,r,i])).map((e=>Po(c,e,h)))),a)}}),Dl=la({pow_:function(e,t){let n=ia(e,"base","pow"),s=ia(t,"exp","pow");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(hn,a)}});function _l(e,t,n=null){if(0===e.rank)return _r(e);if(1!==e.rank&&null===n)return _l(Zr(e,[-1]),t,n);if(1===e.rank||"number"==typeof n||Array.isArray(n)&&1===n.length){if(1===t)return zi(_r(e),n);if(t===1/0)return Oi(_r(e),n);if(t===-1/0)return Hi(_r(e),n);if("euclidean"===t||2===t)return Ao(zi(Dl(_r(e),Fr(2,"int32")),n));throw new Error(`Error in norm: invalid ord value: ${t}`)}if(Array.isArray(n)&&2===n.length){if(1===t)return Oi(zi(_r(e),n[0]),n[1]-1);if(t===1/0)return Oi(zi(_r(e),n[1]),n[0]);if(t===-1/0)return Hi(zi(_r(e),n[1]),n[0]);if("fro"===t||"euclidean"===t)return Ao(zi(to(e),n));throw new Error(`Error in norm: invalid ord value: ${t}`)}throw new Error(`Error in norm: invalid axis: ${n}`)}const Ol=la({norm_:function(e,t="euclidean",n=null,s=!1){const a=_l(e=ia(e,"x","norm"),t,n);let r=a.shape;if(s){const t=B(n,e.shape);r=Yi(a.shape,t)}return Zr(a,r)}}),Ml=la({gramSchmidt_:function(e){let t;if(Array.isArray(e)){t=!1,E(null!=e&&e.length>0,(()=>"Gram-Schmidt process: input must not be null, undefined, or empty"));const n=e[0].shape[0];for(let t=1;t<e.length;++t)E(e[t].shape[0]===n,(()=>`Gram-Schmidt: Non-unique lengths found in the input vectors: (${e[t].shape[0]} vs. ${n})`))}else t=!0,e=Eo(e,e.shape[0],0).map((e=>Ro(e,[0])));E(e.length<=e[0].shape[0],(()=>`Gram-Schmidt: Number of vectors (${e.length}) exceeds number of dimensions (${e[0].shape[0]}).`));const n=[],s=e;for(let t=0;t<e.length;++t)n.push(Zs.tidy((()=>{let e=s[t];if(t>0)for(let s=0;s<t;++s){const t=Mi(zi(Mi(n[s],e)),n[s]);e=Li(e,t)}return bi(e,Ol(e,"euclidean"))})));return t?Fo(n,0):n}});function Ll(e,t=!1){return Zs.tidy((()=>{E(2===e.shape.length,(()=>`qr2d() requires a 2D Tensor, but got a ${e.shape.length}D Tensor.`));const n=e.shape[0],s=e.shape[1];let a=Ci(n),r=Ka(e);const i=Oo([[1]],[1,1]);let o=Ka(i);const l=n>=s?s:n;for(let e=0;e<l;++e){const t=r,l=o,u=a;[o,r,a]=Zs.tidy((()=>{const t=vo(r,[e,e],[n-e,1]),l=Ol(t),u=vo(r,[e,e],[1,1]),c=Po(Ai(u,0),Oo([[-1]]),Oo([[1]])),h=Li(u,Mi(c,l)),p=bi(t,h);o=1===p.shape[0]?Ka(i):ii([i,vo(p,[1,0],[p.shape[0]-1,p.shape[1]])],0);const d=so(bi(il(c,h),l)),f=vo(r,[e,0],[n-e,s]),m=Mi(d,o),g=Wo(o);if(0===e)r=Li(f,il(m,il(g,f)));else{const t=Li(f,il(m,il(g,f)));r=ii([vo(r,[0,0],[e,s]),t],0)}const y=Wo(m),b=vo(a,[0,e],[n,a.shape[1]-e]);if(0===e)a=Li(b,il(il(b,o),y));else{const t=Li(b,il(il(b,o),y));a=ii([vo(a,[0,0],[n,e]),t],1)}return[o,r,a]})),Sr([t,l,u])}return!t&&n>s&&(a=vo(a,[0,0],[n,s]),r=vo(r,[0,0],[s,s])),[a,r]}))}const zl=la({qr_:function(e,t=!1){if(E(e.rank>=2,(()=>`qr() requires input tensor to have a rank >= 2, but got rank ${e.rank}`)),2===e.rank)return Ll(e,t);{const n=e.shape.slice(0,e.shape.length-2).reduce(((e,t)=>e*t)),s=Lo(Zr(e,[n,e.shape[e.shape.length-2],e.shape[e.shape.length-1]]),0),a=[],r=[];return s.forEach((e=>{const[n,s]=Ll(e,t);a.push(n),r.push(s)})),[Zr(Fo(a,0),e.shape),Zr(Fo(r,0),e.shape)]}}});var Pl;!function(e){e[e.NONE=0]="NONE",e[e.MEAN=1]="MEAN",e[e.SUM=2]="SUM",e[e.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"}(Pl||(Pl={}));const Bl=la({computeWeightedLoss_:function(e,t,n=Pl.SUM_BY_NONZERO_WEIGHTS){const s=ia(e,"losses","computeWeightedLoss");let a=null;null!=t&&(a=ia(t,"weights","computeWeightedLoss"));const r=null==a?s:Mi(s,a);if(n===Pl.NONE)return r;if(n===Pl.SUM)return zi(r);if(n===Pl.MEAN){if(null==a)return Gi(r);{const e=s.size/a.size,t=bi(zi(r),zi(a));return e>1?bi(t,Fr(e)):t}}if(n===Pl.SUM_BY_NONZERO_WEIGHTS){if(null==a)return bi(zi(r),Fr(s.size));{const e=Mi(a,oo(s.shape)),t=ja(zi(ao(e,Fr(0))),"float32");return bi(zi(r),t)}}throw Error(`Unknown reduction: ${n}`)}});la({absoluteDifference_:function(e,t,n,s=Pl.SUM_BY_NONZERO_WEIGHTS){const a=ia(e,"labels","absoluteDifference"),r=ia(t,"predictions","absoluteDifference");let i=null;null!=n&&(i=ia(n,"weights","absoluteDifference")),A(a.shape,r.shape,"Error in absoluteDifference: ");const o=_r(Li(a,r));return Bl(o,i,s)}}),la({cosineDistance_:function(e,t,n,s,a=Pl.SUM_BY_NONZERO_WEIGHTS){const r=ia(e,"labels","cosineDistance"),i=ia(t,"predictions","cosineDistance");let o=null;null!=s&&(o=ia(s,"weights","cosineDistance")),A(r.shape,i.shape,"Error in cosineDistance: ");const l=Fr(1),u=Li(l,zi(Mi(r,i),n,!0));return Bl(u,o,a)}}),la({hingeLoss_:function(e,t,n,s=Pl.SUM_BY_NONZERO_WEIGHTS){let a=ia(e,"labels","hingeLoss");const r=ia(t,"predictions","hingeLoss");let i=null;null!=n&&(i=ia(n,"weights","hingeLoss")),A(a.shape,r.shape,"Error in hingeLoss: ");const o=Fr(1);a=Li(Mi(Fr(2),a),o);const l=yo(Li(o,Mi(a,r)));return Bl(l,i,s)}}),la({huberLoss_:function(e,t,n,s=1,a=Pl.SUM_BY_NONZERO_WEIGHTS){const r=ia(e,"labels","huberLoss"),i=ia(t,"predictions","huberLoss");let o=null;null!=n&&(o=ia(n,"weights","huberLoss")),A(r.shape,i.shape,"Error in huberLoss: ");const l=Fr(s),u=_r(Li(i,r)),c=ji(u,l),h=Li(u,c),p=Or(Mi(Fr(.5),to(c)),Mi(l,h));return Bl(p,o,a)}}),la({logLoss_:function(e,t,n,s=1e-7,a=Pl.SUM_BY_NONZERO_WEIGHTS){const r=ia(e,"labels","logLoss"),i=ia(t,"predictions","logLoss");let o=null;null!=n&&(o=ia(n,"weights","logLoss")),A(r.shape,i.shape,"Error in logLoss: ");const l=Fr(1),u=Fr(s),c=so(Mi(r,Di(Or(i,u)))),h=Mi(Li(l,r),Di(Or(Li(l,i),u))),p=Li(c,h);return Bl(p,o,a)}});const Wl=la({squaredDifference_:function(e,t){let n=ia(e,"a","squaredDifference"),s=ia(t,"b","squaredDifference");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Wn,a,{})}});la({meanSquaredError_:function(e,t,n,s=Pl.SUM_BY_NONZERO_WEIGHTS){const a=ia(e,"labels","meanSquaredError"),r=ia(t,"predictions","meanSquaredError");let i=null;null!=n&&(i=ia(n,"weights","meanSquaredError")),A(a.shape,r.shape,"Error in meanSquaredError: ");const o=Wl(a,r);return Bl(o,i,s)}});const Vl=la({log1p_:function(e){const t={x:ia(e,"x","log1p")};return Zs.runKernel(Ot,t)}});la({sigmoidCrossEntropy_:function(e,t,n,s=0,a=Pl.SUM_BY_NONZERO_WEIGHTS){let r=ia(e,"multiClassLabels","sigmoidCrossEntropy");const i=ia(t,"logits","sigmoidCrossEntropy");let o=null;if(null!=n&&(o=ia(n,"weights","sigmoidCrossEntropy")),A(r.shape,i.shape,"Error in sigmoidCrossEntropy: "),s>0){const e=Fr(s),t=Fr(1),n=Fr(.5);r=Or(Mi(r,Li(t,e)),Mi(n,e))}const l=function(e,t){const n=ia(e,"labels","sigmoidCrossEntropyWithLogits"),s=ia(t,"logits","sigmoidCrossEntropyWithLogits");A(n.shape,s.shape,"Error in sigmoidCrossEntropyWithLogits: ");const a=yo(s),r=Mi(s,n),i=Vl(_i(so(_r(s))));return Or(Li(a,r),i)}(r,i);return Bl(l,o,a)}});const Ul=la({logSumExp_:function(e,t=null,n=!1){const s=ia(e,"x","logSumExp"),a=B(t,s.shape),r=Oi(s,a,!0),i=Li(s,r),o=_i(i),l=zi(o,a),u=Di(l),c=Or(Zr(r,u.shape),u);if(n){const e=Yi(c.shape,a);return Zr(c,e)}return c}});la({softmaxCrossEntropy_:function(e,t,n,s=0,a=Pl.SUM_BY_NONZERO_WEIGHTS){let r=ia(e,"onehotLabels","softmaxCrossEntropy");const i=ia(t,"logits","softmaxCrossEntropy");let o=null;if(null!=n&&(o=ia(n,"weights","softmaxCrossEntropy")),A(r.shape,i.shape,"Error in softmaxCrossEntropy: "),s>0){const e=Fr(s),t=Fr(1),n=Fr(r.shape[1]);r=Or(Mi(r,Li(t,e)),bi(e,n))}const l=function(e,t,n=-1){if(-1===n&&(n=t.rank-1),n!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${n}`);return Rr(((e,t,s)=>{const a=Ul(t,[n],!0),r=Li(ja(t,"float32"),a);s([e,r]);const i=so(Mi(r,e));return{value:zi(i,[n]),gradFunc:(e,t)=>{const[s,a]=t,r=Yi(e.shape,[n]);return[Mi(Zr(e,r),Li(ja(s,"float32"),_i(a))),Mi(Zr(e,r),Li(_i(a),ja(s,"float32")))]}}}))(e,t)}(r,i);return Bl(l,o,a)}});const Gl={flipLeftRight:pl,resizeNearestNeighbor:Tl,resizeBilinear:Sl,rotateWithOffset:dl,cropAndResize:hl,nonMaxSuppression:ml,nonMaxSuppressionAsync:async function(e,t,n,s=.5,a=Number.NEGATIVE_INFINITY){const r=ia(e,"boxes","nonMaxSuppressionAsync"),i=ia(t,"scores","nonMaxSuppressionAsync"),o=fl(r,i,n,s,a);n=o.maxOutputSize,s=o.iouThreshold,a=o.scoreThreshold;const l=await Promise.all([r.data(),i.data()]),u=l[0],c=l[1],{selectedIndices:h}=bl(u,c,n,s,a);return r!==e&&r.dispose(),i!==t&&i.dispose(),_o(h,"int32")},nonMaxSuppressionWithScore:$l,nonMaxSuppressionWithScoreAsync:async function(e,t,n,s=.5,a=Number.NEGATIVE_INFINITY,r=0){const i=ia(e,"boxes","nonMaxSuppressionAsync"),o=ia(t,"scores","nonMaxSuppressionAsync"),l=fl(i,o,n,s,a,r);n=l.maxOutputSize,s=l.iouThreshold,a=l.scoreThreshold,r=l.softNmsSigma;const u=await Promise.all([i.data(),o.data()]),c=u[0],h=u[1],{selectedIndices:p,selectedScores:d}=wl(c,h,n,s,a,r);return i!==e&&i.dispose(),o!==t&&o.dispose(),{selectedIndices:_o(p,"int32"),selectedScores:_o(d)}},nonMaxSuppressionPadded:Cl,nonMaxSuppressionPaddedAsync:async function(e,t,n,s=.5,a=Number.NEGATIVE_INFINITY,r=!1){const i=ia(e,"boxes","nonMaxSuppressionAsync"),o=ia(t,"scores","nonMaxSuppressionAsync"),l=fl(i,o,n,s,a,null),u=l.maxOutputSize,c=l.iouThreshold,h=l.scoreThreshold,[p,d]=await Promise.all([i.data(),o.data()]),{selectedIndices:f,validOutputs:m}=xl(p,d,u,c,h,r);return i!==e&&i.dispose(),o!==t&&o.dispose(),{selectedIndices:_o(f,"int32"),validOutputs:Fr(m,"int32")}},transform:El},Hl={bandPart:Fl,gramSchmidt:Ml,qr:zl};class jl extends Dr{constructor(e,t,n=null){super(),this.learningRate=e,this.rho=t,this.epsilon=n,this.accumulatedGrads=[],this.accumulatedUpdates=[],null==n&&(this.epsilon=Zs.backend.epsilon())}applyGradients(e){(Array.isArray(e)?e.map((e=>e.name)):Object.keys(e)).forEach(((t,n)=>{const s=Zs.registeredVariables[t];null==this.accumulatedGrads[n]&&(this.accumulatedGrads[n]={originalName:`${t}/accum_grad`,variable:Cr((()=>Bo(s).variable(!1)))}),null==this.accumulatedUpdates[n]&&(this.accumulatedUpdates[n]={originalName:`${t}/accum_var`,variable:Cr((()=>Bo(s).variable(!1)))});const a=Array.isArray(e)?e[n].tensor:e[t];if(null==a)return;const r=this.accumulatedGrads[n].variable,i=this.accumulatedUpdates[n].variable;Cr((()=>{const e=Or(Mi(r,this.rho),Mi(to(a),1-this.rho)),t=Mi(bi(Ao(Or(i,this.epsilon)),Ao(Or(r,this.epsilon))),a),n=Or(Mi(i,this.rho),Mi(to(t),1-this.rho));r.assign(e),i.assign(n);const o=Or(Mi(t,-this.learningRate),s);s.assign(o)}))})),this.incrementIterations()}dispose(){null!=this.accumulatedUpdates&&(Sr(this.accumulatedGrads.map((e=>e.variable))),Sr(this.accumulatedUpdates.map((e=>e.variable))))}async getWeights(){const e=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(e.map((e=>({name:e.originalName,tensor:e.variable}))))}async setWeights(e){const t=(e=await this.extractIterations(e)).length/2;this.accumulatedGrads=e.slice(0,t).map((e=>({originalName:e.name,variable:e.tensor.variable(!1)}))),this.accumulatedUpdates=e.slice(t,2*t).map((e=>({originalName:e.name,variable:e.tensor.variable(!1)})))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.rho,t.epsilon)}}jl.className="Adadelta",Nr(jl);class Kl extends Dr{constructor(e,t=.1){super(),this.learningRate=e,this.initialAccumulatorValue=t,this.accumulatedGrads=[]}applyGradients(e){(Array.isArray(e)?e.map((e=>e.name)):Object.keys(e)).forEach(((t,n)=>{const s=Zs.registeredVariables[t];if(null==this.accumulatedGrads[n]){const e=!1;this.accumulatedGrads[n]={originalName:`${t}/accumulator`,variable:Cr((()=>Si(s.shape,this.initialAccumulatorValue).variable(e)))}}const a=Array.isArray(e)?e[n].tensor:e[t];if(null==a)return;const r=this.accumulatedGrads[n].variable;Cr((()=>{const e=Or(r,to(a));r.assign(e);const t=Or(Mi(bi(a,Ao(Or(e,Zs.backend.epsilon()))),-this.learningRate),s);s.assign(t)}))})),this.incrementIterations()}dispose(){null!=this.accumulatedGrads&&Sr(this.accumulatedGrads.map((e=>e.variable)))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map((e=>({name:e.originalName,tensor:e.variable}))))}async setWeights(e){e=await this.extractIterations(e),this.accumulatedGrads=e.map((e=>({originalName:e.name,variable:e.tensor.variable(!1)})))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(e,t){return new e(t.learningRate,t.initialAccumulatorValue)}}Kl.className="Adagrad",Nr(Kl);class ql extends Dr{constructor(e,t,n,s=null){super(),this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],Cr((()=>{this.accBeta1=Fr(t).variable(),this.accBeta2=Fr(n).variable()})),null==s&&(this.epsilon=Zs.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map((e=>e.name)):Object.keys(e);Cr((()=>{const n=Li(1,this.accBeta1),s=Li(1,this.accBeta2);t.forEach(((t,a)=>{const r=Zs.registeredVariables[t];null==this.accumulatedFirstMoment[a]&&(this.accumulatedFirstMoment[a]={originalName:`${t}/m`,variable:Cr((()=>Bo(r).variable(!1)))}),null==this.accumulatedSecondMoment[a]&&(this.accumulatedSecondMoment[a]={originalName:`${t}/v`,variable:Cr((()=>Bo(r).variable(!1)))});const i=Array.isArray(e)?e[a].tensor:e[t];if(null==i)return;const o=this.accumulatedFirstMoment[a].variable,l=this.accumulatedSecondMoment[a].variable,u=Or(Mi(o,this.beta1),Mi(i,1-this.beta1)),c=Or(Mi(l,this.beta2),Mi(to(i),1-this.beta2)),h=bi(u,n),p=bi(c,s);o.assign(u),l.assign(c);const d=Or(Mi(bi(h,Or(Ao(p),this.epsilon)),-this.learningRate),r);r.assign(d)})),this.accBeta1.assign(Mi(this.accBeta1,this.beta1)),this.accBeta2.assign(Mi(this.accBeta2,this.beta2))})),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),null!=this.accumulatedFirstMoment&&Sr(this.accumulatedFirstMoment.map((e=>e.variable))),null!=this.accumulatedSecondMoment&&Sr(this.accumulatedSecondMoment.map((e=>e.variable)))}async getWeights(){const e=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(e.map((e=>({name:e.originalName,tensor:e.variable}))))}async setWeights(e){e=await this.extractIterations(e),Cr((()=>{this.accBeta1.assign(Dl(this.beta1,this.iterations_+1)),this.accBeta2.assign(Dl(this.beta2,this.iterations_+1))}));const t=e.length/2;this.accumulatedFirstMoment=e.slice(0,t).map((e=>({originalName:e.name,variable:e.tensor.variable(!1)}))),this.accumulatedSecondMoment=e.slice(t,2*t).map((e=>({originalName:e.name,variable:e.tensor.variable(!1)})))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon)}}ql.className="Adam",Nr(ql);class Xl extends Dr{constructor(e,t,n,s=null,a=0){super(),this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.decay=a,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],Cr((()=>{this.iteration=Fr(0).variable(),this.accBeta1=Fr(t).variable()})),null==s&&(this.epsilon=Zs.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map((e=>e.name)):Object.keys(e);Cr((()=>{const n=Li(1,this.accBeta1),s=bi(-this.learningRate,Or(Mi(this.iteration,this.decay),1));t.forEach(((t,a)=>{const r=Zs.registeredVariables[t];null==this.accumulatedFirstMoment[a]&&(this.accumulatedFirstMoment[a]={originalName:`${t}/m`,variable:Bo(r).variable(!1)}),null==this.accumulatedWeightedInfNorm[a]&&(this.accumulatedWeightedInfNorm[a]={originalName:`${t}/v`,variable:Bo(r).variable(!1)});const i=Array.isArray(e)?e[a].tensor:e[t];if(null==i)return;const o=this.accumulatedFirstMoment[a].variable,l=this.accumulatedWeightedInfNorm[a].variable,u=Or(Mi(o,this.beta1),Mi(i,1-this.beta1)),c=Mi(l,this.beta2),h=_r(i),p=Ui(c,h);o.assign(u),l.assign(p);const d=Or(Mi(bi(s,n),bi(u,Or(p,this.epsilon))),r);r.assign(d)})),this.iteration.assign(Or(this.iteration,1)),this.accBeta1.assign(Mi(this.accBeta1,this.beta1))})),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),null!=this.accumulatedFirstMoment&&Sr(this.accumulatedFirstMoment.map((e=>e.variable))),null!=this.accumulatedWeightedInfNorm&&Sr(this.accumulatedWeightedInfNorm.map((e=>e.variable)))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(e){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon,t.decay)}}Xl.className="Adamax",Nr(Xl);class Yl extends Dr{constructor(e){super(),this.learningRate=e,this.setLearningRate(e)}applyGradients(e){(Array.isArray(e)?e.map((e=>e.name)):Object.keys(e)).forEach(((t,n)=>{const s=Array.isArray(e)?e[n].tensor:e[t];if(null==s)return;const a=Zs.registeredVariables[t];Cr((()=>{const e=Or(Mi(this.c,s),a);a.assign(e)}))})),this.incrementIterations()}setLearningRate(e){this.learningRate=e,null!=this.c&&this.c.dispose(),this.c=Tr(Fr(-e))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(e){if(0!==(e=await this.extractIterations(e)).length)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(e,t){return new e(t.learningRate)}}Yl.className="SGD",Nr(Yl);class Jl extends Yl{constructor(e,t,n=!1){super(e),this.learningRate=e,this.momentum=t,this.useNesterov=n,this.accumulations=[],this.m=Fr(this.momentum)}applyGradients(e){(Array.isArray(e)?e.map((e=>e.name)):Object.keys(e)).forEach(((t,n)=>{const s=Zs.registeredVariables[t];if(null==this.accumulations[n]){const e=!1;this.accumulations[n]={originalName:`${t}/momentum`,variable:Cr((()=>Bo(s).variable(e)))}}const a=this.accumulations[n].variable,r=Array.isArray(e)?e[n].tensor:e[t];null!=r&&Cr((()=>{let e;const t=Or(Mi(this.m,a),r);e=this.useNesterov?Or(Mi(this.c,Or(r,Mi(t,this.m))),s):Or(Mi(this.c,t),s),a.assign(t),s.assign(e)}))})),this.incrementIterations()}dispose(){this.m.dispose(),null!=this.accumulations&&Sr(this.accumulations.map((e=>e.variable)))}setMomentum(e){this.momentum=e}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map((e=>({name:e.originalName,tensor:e.variable}))))}async setWeights(e){e=await this.extractIterations(e),this.accumulations=e.map((e=>({originalName:e.name,variable:e.tensor.variable(!1)})))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(e,t){return new e(t.learningRate,t.momentum,t.useNesterov)}}Jl.className="Momentum",Nr(Jl);class Zl extends Dr{constructor(e,t=.9,n=0,s=null,a=!1){if(super(),this.learningRate=e,this.decay=t,this.momentum=n,this.epsilon=s,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=a,null==s&&(this.epsilon=Zs.backend.epsilon()),null==e)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(e){(Array.isArray(e)?e.map((e=>e.name)):Object.keys(e)).forEach(((t,n)=>{const s=Zs.registeredVariables[t],a=!1;null==this.accumulatedMeanSquares[n]&&(this.accumulatedMeanSquares[n]={originalName:`${t}/rms`,variable:Cr((()=>Bo(s).variable(a)))}),null==this.accumulatedMoments[n]&&(this.accumulatedMoments[n]={originalName:`${t}/momentum`,variable:Cr((()=>Bo(s).variable(a)))}),null==this.accumulatedMeanGrads[n]&&this.centered&&(this.accumulatedMeanGrads[n]={originalName:`${t}/mg`,variable:Cr((()=>Bo(s).variable(a)))});const r=Array.isArray(e)?e[n].tensor:e[t];if(null==r)return;const i=this.accumulatedMeanSquares[n].variable,o=this.accumulatedMoments[n].variable;Cr((()=>{const e=Or(Mi(i,this.decay),Mi(to(r),1-this.decay));if(this.centered){const t=this.accumulatedMeanGrads[n].variable,a=Or(Mi(t,this.decay),Mi(r,1-this.decay)),l=bi(Mi(r,this.learningRate),Ao(Li(e,Or(to(a),this.epsilon)))),u=Or(Mi(o,this.momentum),l);i.assign(e),t.assign(a),o.assign(u);const c=Li(s,u);s.assign(c)}else{const e=Or(Mi(i,this.decay),Mi(to(r),1-this.decay)),t=Or(Mi(o,this.momentum),bi(Mi(r,this.learningRate),Ao(Or(e,this.epsilon))));i.assign(e),o.assign(t);const n=Li(s,t);s.assign(n)}}))})),this.incrementIterations()}dispose(){null!=this.accumulatedMeanSquares&&Sr(this.accumulatedMeanSquares.map((e=>e.variable))),null!=this.accumulatedMeanGrads&&this.centered&&Sr(this.accumulatedMeanGrads.map((e=>e.variable))),null!=this.accumulatedMoments&&Sr(this.accumulatedMoments.map((e=>e.variable)))}async getWeights(){const e=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&e.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(e.map((e=>({name:e.originalName,tensor:e.variable}))))}async setWeights(e){e=await this.extractIterations(e);const t=this.centered?e.length/3:e.length/2,n=!1;this.accumulatedMeanSquares=e.slice(0,t).map((e=>({originalName:e.name,variable:e.tensor.variable(n)}))),this.accumulatedMoments=e.slice(t,2*t).map((e=>({originalName:e.name,variable:e.tensor.variable(n)}))),this.centered&&(this.accumulatedMeanGrads=e.slice(2*t,3*t).map((e=>({originalName:e.name,variable:e.tensor.variable(n)}))))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(e,t){return new e(t.learningRate,t.decay,t.momentum,t.epsilon,t.centered)}}Zl.className="RMSProp",Nr(Zl);class Ql{static sgd(e){return new Yl(e)}static momentum(e,t,n=!1){return new Jl(e,t,n)}static rmsprop(e,t=.9,n=0,s=null,a=!1){return new Zl(e,t,n,s,a)}static adam(e=.001,t=.9,n=.999,s=null){return new ql(e,t,n,s)}static adadelta(e=.001,t=.95,n=null){return new jl(e,t,n)}static adamax(e=.002,t=.9,n=.999,s=null,a=0){return new Xl(e,t,n,s,a)}static adagrad(e,t=.1){return new Kl(e,t)}}const eu={sgd:Ql.sgd,momentum:Ql.momentum,adadelta:Ql.adadelta,adagrad:Ql.adagrad,rmsprop:Ql.rmsprop,adamax:Ql.adamax,adam:Ql.adam},tu="undefined"!=typeof requestAnimationFrame?requestAnimationFrame:"undefined"!=typeof setImmediate?setImmediate:e=>e();function nu(){return new Promise((e=>tu((()=>e()))))}function su(e,t){const n=e[0].length;e.forEach(((e,t)=>{E(e.length===n,(()=>`Error in concat${n}D: rank of tensors[${t}] must be the same as the rank of the rest (${n})`))})),E(t>=0&&t<n,(()=>`Error in concat${n}D: axis must be between 0 and ${n-1}.`));const s=e[0];e.forEach(((e,a)=>{for(let r=0;r<n;r++)E(r===t||e[r]===s[r],(()=>`Error in concat${n}D: Shape of tensors[${a}] (${e}) does not match the shape of the rest (${s}) along the non-concatenated axis ${a}.`))}))}function au(e,t){const n=e[0].slice();for(let s=1;s<e.length;s++)n[t]+=e[s][t];return n}const ru=30;function iu(e){return e<=ru?e:J(e,Math.floor(Math.sqrt(e)))}function ou(e,t,n){return[n*("number"==typeof e?e:e[0]),t*("number"==typeof e?e:e[1])]}function lu(e,t,n,s=!0){let a=[];if(s)a=a.concat(t.slice(0)),a.push(e[0]/n),a=a.concat(e.slice(1));else{a=a.concat(e[0]);const n=t.length;for(let s=0;s<n;++s)a=a.concat([e[s+1]/t[s],t[s]]);a=a.concat(e.slice(n+1))}return a}function uu(e,t,n=!0){const s=[];if(n){s.push(t);for(let n=t+1;n<e;++n)n<=2*t?(s.push(n),s.push(n-(t+1))):s.push(n)}else{const n=[],a=[];for(let s=1;s<e;++s)s>=2*t+1||s%2==1?a.push(s):n.push(s);s.push(...n),s.push(0),s.push(...a)}return s}function cu(e,t,n,s=!0){const a=[];s?a.push(e[0]/n):a.push(e[0]*n);for(let n=1;n<e.length;++n)n<=t.length?s?a.push(t[n-1]*e[n]):a.push(e[n]/t[n-1]):a.push(e[n]);return a}function hu(e,t){const n=[0];for(let s=0;s<t;++s)n.push(e[s][0]);return n}function pu(e,t,n){const s=e.slice(0,1);for(let a=0;a<n;++a)s.push(e[a+1]-t[a][0]-t[a][1]);return s}function du(e,t){const n=e.shape.length,s=t.shape.length;if(n<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${n}.`);if(s<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${s}.`);if("int32"!==t.dtype)throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[s-1]>n)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[s-1]} vs. ${n}`);if(0===D(e.shape))throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${e.shape}.`);const a=t.shape,r=a[a.length-1];let i=1;for(let e=0;e<a.length-1;++e)i*=a[e];const o=e.shape,l=a.slice();l.pop();let u=1;for(let e=r;e<n;++e)u*=o[e],l.push(o[e]);const c=[...Z(e.shape).map((e=>e/u)),1].slice(0,r);return[l,i,u,c]}function fu(e,t,n){const s=t.rank>1?t.shape[t.rank-1]:1,a=t.rank>1?t.rank-1:1,r=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${n.shape}, indices.shape: ${t.shape}, shape: ${e}, sliceDim: ${s}, and batchDim: ${a}.`;if(n.rank<a)throw new Error(r+` update.rank < ${a}. `);if(e.length<s+(n.rank-a))throw new Error(r+` Output shape length < ${s+(n.rank-a)}`);if(n.rank!==a+e.length-s)throw new Error(r+" update.rank != "+(a+e.length-s));for(let e=0;e<a;++e)if(n.shape[e]!==t.shape[e])throw new Error(r+` updates.shape[${e}] (${n.shape[e]}) != indices.shape[${e}] (${t.shape[e]}).`);for(let t=0;t<n.rank-a;++t)if(n.shape[t+a]!==e[t+s])throw new Error(r+` updates.shape[${t+a}] (${n.shape[t+a]}) != shape[${t+a}] (${e[t+a]})`)}function mu(e,t,n){if(t.rank<1)throw new Error(`tf.scatterND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(e.rank<1)throw new Error(`tf.scatterND() expects the updates to be rank 1 or higher, but the rank was ${e.rank}.`);if("int32"!==t.dtype)throw new Error(`The dtype of 'indices' should be int32, but got dtype: ${t.dtype}`);if(n.length<1)throw new Error(`Output rank must be greater or equal to 1, but got shape: ${n}`);if(0===n.length){if(0===t.size)throw new Error(`Indices specified for empty output. indices shape: ${t.shape}`);if(0===e.size)throw new Error(`Updates specified for empty output. updates shape: ${e.shape}`)}fu(n,t,e)}function gu(e,t,n){const s=t.shape.length,a=s>1?t.shape[s-1]:1,r=n.length;let i=1;for(let e=a;e<r;++e)i*=n[e];const o=a<1?1:a;return{sliceRank:a,numUpdates:D(t.shape)/o,sliceSize:i,strides:[...Z(n.slice(0,a)),1],outputSize:D(n)}}const yu=1.7580993408473768,bu=1.0507009873554805,xu=.3275911,wu=.254829592,ku=-.284496736,vu=1.421413741,Nu=-1.453152027,Iu=1.061405429;function $u(...e){ue().getBool("IS_TEST")||console.warn(...e)}function Cu(...e){ue().getBool("IS_TEST")||console.log(...e)}function Su(e,t){if(e.length!==t.length)throw new Error(`Cannot merge real and imag arrays of different lengths. real:${e.length}, imag: ${t.length}.`);const n=new Float32Array(2*e.length);for(let s=0;s<n.length;s+=2)n[s]=e[s/2],n[s+1]=t[s/2];return n}function Tu(e){const t=new Float32Array(e.length/2),n=new Float32Array(e.length/2);for(let s=0;s<e.length;s+=2)t[s/2]=e[s],n[s/2]=e[s+1];return{real:t,imag:n}}function Eu(e){const t=Math.ceil(e.length/4),n=new Float32Array(t),s=new Float32Array(t);for(let t=0;t<e.length;t+=4)n[Math.floor(t/4)]=e[t],s[Math.floor(t/4)]=e[t+1];return{real:n,imag:s}}function Au(e){const t=Math.floor(e.length/4),n=new Float32Array(t),s=new Float32Array(t);for(let t=2;t<e.length;t+=4)n[Math.floor(t/4)]=e[t],s[Math.floor(t/4)]=e[t+1];return{real:n,imag:s}}function Ru(e,t){return{real:e[2*t],imag:e[2*t+1]}}function Fu(e,t,n,s){e[2*s]=t,e[2*s+1]=n}function Du(e,t){const n=new Float32Array(e/2),s=new Float32Array(e/2);for(let a=0;a<Math.ceil(e/2);a++){const r=(t?2:-2)*Math.PI*(a/e);n[a]=Math.cos(r),s[a]=Math.sin(r)}return{real:n,imag:s}}function _u(e,t,n){const s=(n?2:-2)*Math.PI*(e/t);return{real:Math.cos(s),imag:Math.sin(s)}}function Ou(e,t,n=0){let s=[];if("number"==typeof t)E(e.shape[n]%t==0,(()=>"Number of splits must evenly divide the axis.")),s=new Array(t).fill(e.shape[n]/t);else{E(t.reduce(((e,t)=>(-1===t&&(e+=1),e)),0)<=1,(()=>"There should be only one negative value in split array."));const a=t.indexOf(-1);if(-1!==a){const s=t.reduce(((e,t)=>t>0?e+t:e));t[a]=e.shape[n]-s}E(e.shape[n]===t.reduce(((e,t)=>e+t)),(()=>"The sum of sizes must match the size of the axis dimension.")),s=t}return s}function Mu(e,t){let n,s=!1;for(e<=ru?(n=e,s=!0):n=J(e,Math.floor(Math.sqrt(e)));!s;)n>t||n===e?s=!0:n=J(e,n+1);return n}function Lu(e,t,n){const s=[],a=e.length;for(let r=0;r<a;r++)r!==t?s.push(e[r]):s.push(n);return s}function zu(e,t,n,s){const a=t.shape.length,r=e.shape.length;if(0!==s&&(s<-a||s>a))throw new Error(`Expect batchDims in the range of [-${a}, ${a}], but got ${s}`);if(s<0&&(s+=a),s>r)throw new Error(`batchDims (${s}) must be less than rank(x) (\n ${r}).`);if(n<s)throw new Error(`batchDims (${s}) must be less than or equal to axis (${n}).`);for(let n=0;n<s;++n)if(e.shape[n]!==t.shape[n])throw new Error(`x.shape[${n}]: ${e.shape[n]} should be equal to indices.shape[${n}]: ${t.shape[n]}.`);const i=e.shape[n],o=[];let l=1,u=1,c=1;for(let t=0;t<s;++t)o.push(e.shape[t]),l*=e.shape[t];for(let t=s;t<n;t++)o.push(e.shape[t]),u*=e.shape[t];for(let e=s;e<a;e++)o.push(t.shape[e]);for(let t=n+1;t<r;t++)o.push(e.shape[t]),c*=e.shape[t];return{batchSize:l,sliceSize:c,outerSize:u,dimSize:i,outputShape:o}}function Pu(e){try{return e.map((e=>ws(e)))}catch(e){throw new Error(`Failed to decode encoded string bytes into utf-8, error: ${e}`)}}function Bu(e){return e.map((e=>xs(e)))}function Wu(e,t){const n=[];for(let e=0;e<t.length;e++)t[e]&&n.push(e);const s=Ha(e,"int32"),a=Ha([n.length,e.length],"int32");for(let t=0;t<n.length;t++){const r=s.indexToLoc(n[t]),i=t*e.length;a.values.set(r,i)}return a.toTensor()}const Vu={kernelName:fe,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,Jo(ja(n,"float32"),-1))}}},Uu={kernelName:me,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const t=to(ja(n,"float32")),s=Ao(Li(Fr(1),t));return so(bi(e,s))}}}},Gu={kernelName:ge,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const t=Ao(Li(to(ja(n,"float32")),1));return bi(e,t)}}}},Hu={kernelName:ye,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{let t=e;const s=ki(n.shape,a);return s.length>0&&(t=zi(t,s)),Zr(t,n.shape)},b:()=>{let t=e;const n=ki(s.shape,a);return n.length>0&&(t=zi(t,n)),Zr(t,s.shape)}}}},ju={kernelName:be,saveAllInputs:!0,gradFunc:(e,t)=>{const n={};return t.forEach(((t,s)=>{n[s]=()=>e.clone()})),n}},Ku={kernelName:ke,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Bo(n)}}},qu={kernelName:ve,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Bo(n)}}},Xu={kernelName:Ne,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,Ao(Li(Fr(1),to(ja(n,"float32")))))}}},Yu={kernelName:Ie,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const t=Ao(Or(Fr(1),to(ja(n,"float32"))));return bi(e,t)}}}},Ju={kernelName:Se,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{const t=Or(to(n),to(s));let r=Mi(e,bi(s,t));const i=ki(n.shape,a);return i.length>0&&(r=zi(r,i)),Zr(r,n.shape)},b:()=>{const t=Or(to(n),to(s));let r=so(Mi(e,bi(n,t)));const i=ki(s.shape,a);return i.length>0&&(r=zi(r,i)),Zr(r,s.shape)}}}},Zu={kernelName:$e,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,Or(to(ja(n,"float32")),1))}}},Qu={kernelName:Ce,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,Li(Fr(1),to(ja(n,"float32"))))}}},ec=la({avgPool3dGrad_:function(e,t,n,s,a,r){const i=ia(e,"dy","avgPool3dGrad"),o=ia(t,"input","avgPool3dGrad");let l=i,u=o,c=!1;4===o.rank&&(c=!0,l=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2],i.shape[3]]),u=Zr(o,[1,o.shape[0],o.shape[1],o.shape[2],o.shape[3]])),E(5===l.rank,(()=>`Error in avgPool3dGrad: dy must be rank 5 but got rank ${l.rank}.`)),E(5===u.rank,(()=>`Error in avgPool3dGrad: input must be rank 5 but got rank ${u.rank}.`)),null!=r&&E(O(a),(()=>`Error in avgPool3dGrad: pad must be an integer when using, dimRoundingMode ${r} but got pad ${a}.`));const h={dy:l,input:u},p={filterSize:n,strides:s,pad:a,dimRoundingMode:r},d=Zs.runKernel(Re,h,p);return c?Zr(d,[d.shape[1],d.shape[2],d.shape[3],d.shape[4]]):d}}),tc={kernelName:Ae,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:a,strides:r,pad:i,dimRoundingMode:o}=n;return{x:()=>ec(e,s,a,r,i,o)}}},nc=la({avgPoolGrad_:function(e,t,n,s,a){const r=ia(e,"dy","avgPoolGrad"),i=ia(t,"input","avgPoolGrad");E(i.rank===r.rank,(()=>`Rank of input (${i.rank}) does not match rank of dy (${r.rank})`));let o=i,l=r,u=!1;3===i.rank&&(u=!0,o=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2]]),l=Zr(r,[1,r.shape[0],r.shape[1],r.shape[2]])),E(4===l.rank,(()=>`Error in avgPoolGrad: dy must be rank 4 but got rank ${l.rank}.`)),E(4===o.rank,(()=>`Error in avgPoolGrad: input must be rank 4 but got rank ${o.rank}.`));const c={dy:l,input:o},h={filterSize:n,strides:s,pad:a},p=Zs.runKernel(Ee,c,h);return u?Zr(p,[p.shape[1],p.shape[2],p.shape[3]]):p}}),sc={kernelName:Te,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:a,strides:r,pad:i}=n;return{x:()=>nc(e,s,a,r,i)}}},ac={kernelName:Fe,inputsToSave:["a","b"],gradFunc:(e,t,n)=>{const[s,a]=t,{transposeA:r,transposeB:i}=n;return r||i?!r&&i?{a:()=>il(e,a,!1,!1),b:()=>il(e,s,!0,!1)}:r&&!i?{a:()=>il(a,e,!1,!0),b:()=>il(s,e,!1,!1)}:{a:()=>il(a,e,!0,!0),b:()=>il(e,s,!0,!0)}:{a:()=>il(e,a,!1,!0),b:()=>il(s,e,!0,!1)}}},rc=la({spaceToBatchND_:function(e,t,n){const s=ia(e,"x","spaceToBatchND");E(s.rank>=1+t.length,(()=>`input rank ${s.rank} should be > than [blockShape] ${t.length}`)),E(n.length===t.length,(()=>`paddings.shape[0] ${n.length} must be equal to [blockShape] ${t.length}`)),E(s.shape.reduce(((e,s,a)=>a>0&&a<=t.length?e&&(s+n[a-1][0]+n[a-1][1])%t[a-1]==0:e),!0),(()=>`input spatial dimensions ${s.shape.slice(1)} with paddings ${n.toString()} must be divisible by blockShapes ${t.toString()}`));const a={x:s},r={blockShape:t,paddings:n};return Zs.runKernel(zn,a,r)}}),ic={kernelName:De,gradFunc:(e,t,n)=>{const{blockShape:s,crops:a}=n;return{x:()=>rc(e,s,a)}}},oc={kernelName:"BroadcastTo",gradFunc:(e,t,n)=>{const s=n,a=s.inputShape,r=s.shape,i=Array.from(r);for(let e=a.length-1;e>=0;e--)if(a[e]===r[e])i[e]=1;else if(1!==a[e])throw new Error(`broadcastTo(): [${a}] cannot be broadcast to [${r}].`);const o=[];for(let e=0;e<i.length;e++)i[e]>1&&o.push(e);return{x:()=>zi(e,o,!0)}}},lc={kernelName:Oe,gradFunc:e=>({x:()=>e.clone()})},uc={kernelName:Me,gradFunc:e=>({x:()=>Bo(e)})},cc={kernelName:Le,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{clipValueMin:a,clipValueMax:r}=n;return{x:()=>Po(Bi(Ri(s,a),Al(s,r)),e,Bo(e))}}},hc={kernelName:Pe,inputsToSave:["x"],gradFunc:Vu.gradFunc},pc={kernelName:Be,saveAllInputs:!0,gradFunc:(e,t,n)=>{const s=t.map((e=>e.shape)),{axis:a}=n,r=B(a,t[0].shape)[0],i=s.map((e=>e[r]));return Eo(e,i,r).map((e=>()=>e))}},dc={kernelName:We,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,a]=t,{dilations:r,strides:i,pad:o,dataFormat:l}=n;return E(Xr(r),(()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${r}'`)),{x:()=>di(s.shape,e,a,i,o,l),filter:()=>Xo(s,e,a.shape,i,o,l)}}},fc={kernelName:Ue,inputsToSave:["dy","filter"],gradFunc:(e,t,n)=>{const[s,a]=t,{strides:r,pad:i,dataFormat:o,dimRoundingMode:l}=n;return{dy:()=>hi(e,a,r,i,o,1,l),filter:()=>Xo(e,s,a.shape,r,i,o,l)}}},mc=la({conv3DBackpropFilter_:function(e,t,n,s,a){let r=e;4===e.rank&&(r=Zr(e,[1,e.shape[0],e.shape[1],e.shape[2],e.shape[3]]));let i=t;4===i.rank&&(i=Zr(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),E(5===r.rank,(()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${r.shape}.`)),E(5===i.rank,(()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${i.shape}.`)),E(5===n.length,(()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${n}.`)),E(r.shape[4]===n[3],(()=>`Error in conv3dDerFilter: depth of input ${r.shape[4]}) must match input depth in filter (${n[3]}.`)),E(i.shape[4]===n[4],(()=>`Error in conv3dDerFilter: depth of dy (${i.shape[4]}) must match output depth for filter (${n[4]}).`));const o={x:r,dy:i},l={strides:s,pad:a,filterShape:n};return Zs.runKernel(He,o,l)}}),gc=la({conv3DBackpropInput_:function(e,t,n,s,a){E(e.length===t.rank,(()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`));let r=e,i=t,o=!1;4===t.rank&&(o=!0,i=Zr(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),r=[1,e[0],e[1],e[2],e[3]]);const l=r[4],u=i.shape[4];E(5===r.length,(()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${r.length}.`)),E(5===i.rank,(()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${i.rank}`)),E(5===n.rank,(()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${n.rank}`)),E(l===n.shape[3],(()=>`Error in conv3dDerInput: depth of input (${l}) must match input depth for filter ${n.shape[3]}.`)),E(u===n.shape[4],(()=>`Error in conv3dDerInput: depth of output (${u}) must match output depth for filter ${n.shape[4]}.`));const c={dy:i,filter:n},h={pad:a,strides:s,inputShape:r},p=Zs.runKernel(je,c,h);return o?Zr(p,[p.shape[1],p.shape[2],p.shape[3],p.shape[4]]):p}}),yc={kernelName:Ge,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:a,pad:r}=n;E(Xr(s),(()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`));const[i,o]=t;return{x:()=>gc(i.shape,e,o,a,r),filter:()=>mc(i,e,o.shape,a,r)}}},bc=la({sin_:function(e){const t={x:ia(e,"x","sin")};return Zs.runKernel(Rn,t)}}),xc={kernelName:Ke,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(so(bc(ja(n,"float32"))),e)}}},wc=la({sinh_:function(e){const t={x:ia(e,"x","sinh")};return Zs.runKernel(Fn,t)}}),kc={kernelName:qe,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(wc(ja(n,"float32")),e)}}},vc=la({cumsum_:function(e,t=0,n=!1,s=!1){const a={x:ia(e,"x","cumsum")},r={axis:t,exclusive:n,reverse:s};return Zs.runKernel(Xe,a,r)}}),Nc={kernelName:Xe,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:a,exclusive:r,reverse:i}=n;return{x:()=>{const t=Zi([a],s.rank);let n=vc(e,a,r,!i);return null!=t&&(n=Wo(n,t)),n}}}},Ic={kernelName:Qe,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:a,pad:r,dimRoundingMode:i}=n,o=null==s?[1,1]:s;E(Xr(o),(()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${o}'`));const[l,u]=t;return E(4===l.rank,(()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${l.rank}.`)),E(4===u.rank,(()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${u.rank}.`)),E(l.shape[3]===u.shape[2],(()=>`Error in gradient of depthwiseConv2d: number of input channels (${l.shape[3]}) must match the inChannels dimension in filter ${u.shape[2]}.`)),E(Yr(a,o),(()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${a} and dilations '${o}'.`)),null!=i&&E(O(r),(()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${r}.`)),{x:()=>al(l.shape,e,u,a,r,s,i),filter:()=>sl(l,e,u.shape,a,r,s,i)}}},$c={kernelName:st,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,a]=t,r={x:s,filter:a,dy:e},i={x:s,filter:a,dy:e};return{x:()=>Zs.runKernel(at,r,n),filter:()=>Zs.runKernel(rt,i,n)}}},Cc={kernelName:ot,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t,s={dy:e,y:n};return{x:()=>Zs.runKernel(lt,s)}}},Sc={kernelName:ut,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=Mi(_i(so(to(n))),2/Math.sqrt(Math.PI));return{x:()=>Mi(e,s)}}},Tc={kernelName:ht,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,n)}}},Ec={kernelName:pt,inputsToSave:["input"],gradFunc:(e,t)=>{const[n]=t;return{input:()=>Zr(e,n.shape)}}},Ac={kernelName:dt,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,_i(n))}}},Rc={kernelName:yt,gradFunc:e=>({x:()=>Bo(e)})},Fc={kernelName:bt,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{const t=bi(e,ja(s,"float32")),r=ki(n.shape,a);return r.length>0?Zr(zi(t,r),n.shape):t},b:()=>{let t=Mi(e,ja(n,"float32"));const r=ki(s.shape,a);r.length>0&&(t=Zr(zi(t,r),s.shape));const i=to(s);return so(bi(t,ja(i,"float32")))}}}},Dc=la({rsqrt_:function(e){const t={x:ia(e,"x","rsqrt")};return Zs.runKernel(Cn,t)}}),_c={kernelName:xt,inputsToSave:["x","mean","variance","scale"],gradFunc:(e,t,n)=>{const{varianceEpsilon:s}=n,[a,r,i,o]=t,l=null==o?Fr(1):o,u=ki(r.shape,a.shape),c=[];if(1===r.rank){for(let e=0;e<a.shape.length-1;++e)c.push(a.shape[e]);c.push(1)}const h=Li(a,r),p=Mi(e,l),d=Dc(Or(i,Fr(s))),f=Mi(Mi(Mi(d,d),d),Fr(-.5));return{x:()=>1===r.rank?Zr(Mi(Mi(e,$i(Zr(d,[1,1,1,r.shape[0]]),c)),l),a.shape):Zr(Mi(Mi(e,d),l),a.shape),mean:()=>{let e=Mi(Mi(d,Fr(-1)),p);return 1===r.rank&&(e=zi(e,u)),Zr(e,r.shape)},variance:()=>{let e=Mi(Mi(f,h),p);return 1===r.rank&&(e=zi(e,u)),Zr(e,r.shape)},scale:()=>{const t=Mi(h,d);let n=Mi(e,t);return 1===r.rank&&(n=zi(n,u)),Zr(n,r.shape)},offset:()=>{let t=e;return 1===r.rank&&(t=zi(t,u)),Zr(t,r.shape)}}}},Oc=la({unsortedSegmentSum_:function(e,t,n){const s=ia(e,"x","unsortedSegmentSum"),a=ia(t,"segmentIds","unsortedSegmentSum","int32");E(O(n),(()=>"numSegments must be of dtype int"));const r={x:s,segmentIds:a},i={numSegments:n};return Zs.runKernel(es,r,i)}}),Mc={kernelName:wt,inputsToSave:["x","indices"],gradFunc:(e,t,n)=>{const[s,a]=t,{axis:r}=n,i=B(r,s.shape)[0];return{x:()=>{const t=s.shape,n=a.size,o=t.slice(0,i),l=o.length,u=t.slice(r,t.length).slice(1),c=u.length,h=Lc(0,l),p=Lc(l+1,l+1+c),d=zc([o,[n],u]),f=Zr(e,d),m=Zr(a,[n]),g=zc([[l],h,p]),y=Wo(f,g);let b=Oc(y,m,s.shape[i]);const x=Qi(g);return b=Wo(b,x),b},indices:()=>a}}};function Lc(e,t){const n=[];for(let s=e;s<t;++s)n.push(s);return n}function zc(e){const t=[];for(let n=0;n<e.length;++n)for(let s=0;s<e[n].length;++s)t.push(e[n][s]);return t}const Pc={kernelName:Nt,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t;return{a:()=>Bo(n),b:()=>Bo(s)}}},Bc={kernelName:It,gradFunc:e=>({x:()=>ja(e,"float32")})},Wc={kernelName:St,gradFunc:e=>({x:()=>Bo(e)})},Vc={kernelName:Tt,gradFunc:e=>({x:()=>Bo(e)})},Uc={kernelName:Et,gradFunc:e=>({x:()=>Bo(e)})},Gc={kernelName:At,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{alpha:a}=n,r=Ai(s,0);return{x:()=>Po(r,e,Mi(e,a))}}},Hc={kernelName:Ot,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,Or(n,1))}}},jc={kernelName:_t,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,ja(n,"float32"))}}},Kc={kernelName:"LogSoftmax",inputsToSave:[],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{axis:a}=n;return{logits:()=>{const t=_i(s);return Li(e,Mi(zi(e,a,!0),t))}}}},qc=la({localResponseNormalizationBackprop_:function(e,t,n,s=5,a=1,r=1,i=.5){const o={x:e,y:t,dy:n},l={depthRadius:s,bias:a,alpha:r,beta:i};return Zs.runKernel(Bt,o,l)}}),Xc={kernelName:Pt,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,a]=t,{depthRadius:r,bias:i,alpha:o,beta:l}=n;return{x:()=>qc(s,a,e,r,i,o,l)}}};function Yc(e,t,n,s){return t.rank<n.rank&&(t=Zr(t,Yi(t.shape,s))),e.rank<n.rank&&(e=Zr(e,Yi(e.shape,s))),{x:()=>Mi(e,ja(Ni(n,t),e.dtype))}}const Jc={kernelName:Wt,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{reductionIndices:a}=s,r=t[0],i=Yc(e,t[1],r,B(a,r.shape));return{x:()=>i.x()}}},Zc=la({less_:function(e,t){let n=ia(e,"a","less"),s=ia(t,"b","less");[n,s]=Gs(n,s),vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(Rt,a)}}),Qc={kernelName:Vt,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t;return{a:()=>Mi(e,ja(Ri(n,s),"float32")),b:()=>Mi(e,ja(Zc(n,s),"float32"))}}},eh=la({maxPool3dGrad_:function(e,t,n,s,a,r,i){const o=ia(e,"dy","maxPool3dGrad"),l=ia(t,"input","maxPool3dGrad"),u=ia(n,"output","maxPool3dGrad");let c=o,h=l,p=u,d=!1;4===l.rank&&(d=!0,c=Zr(o,[1,o.shape[0],o.shape[1],o.shape[2],o.shape[3]]),h=Zr(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),p=Zr(u,[1,u.shape[0],u.shape[1],u.shape[2],u.shape[3]])),E(5===c.rank,(()=>`Error in maxPool3dGrad: dy must be rank 5 but got rank ${c.rank}.`)),E(5===h.rank,(()=>`Error in maxPool3dGrad: input must be rank 5 but got rank ${h.rank}.`)),E(5===p.rank,(()=>`Error in maxPool3dGrad: output must be rank 5 but got rank ${p.rank}.`)),null!=i&&E(O(r),(()=>`Error in maxPool3dGrad: pad must be an integer when using, dimRoundingMode ${i} but got pad ${r}.`));const f={dy:c,input:h,output:p},m={filterSize:s,strides:a,pad:r,dimRoundingMode:i},g=Zs.runKernel(jt,f,m);return d?Zr(g,[g.shape[1],g.shape[2],g.shape[3],g.shape[4]]):g}}),th={kernelName:Ht,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,a]=t,{filterSize:r,strides:i,pad:o,dimRoundingMode:l}=n;return{x:()=>eh(e,s,a,r,i,o,l)}}},nh=la({maxPoolGrad_:function(e,t,n,s,a,r,i){const o=ia(e,"dy","maxPoolGrad"),l=ia(t,"input","maxPoolGrad"),u=ia(n,"output","maxPoolGrad");E(l.rank===o.rank,(()=>`Rank of input (${l.rank}) does not match rank of dy (${o.rank})`)),E(4===o.rank,(()=>`Error in maxPoolGrad: dy must be rank 4 but got rank ${o.rank}.`)),E(4===l.rank,(()=>`Error in maxPoolGrad: input must be rank 4 but got rank ${l.rank}.`)),null!=i&&E(O(r),(()=>`Error in maxPoolGrad: pad must be an integer when using, dimRoundingMode ${i} but got pad ${r}.`));const c={dy:o,input:l,output:u},h={filterSize:s,strides:a,pad:r,dimRoundingMode:i};return Zs.runKernel(Gt,c,h)}}),sh={kernelName:Ut,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,a]=t,{filterSize:r,strides:i,pad:o}=n;return{x:()=>nh(e,s,a,r,i,o)}}},ah={kernelName:qt,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:a}=n,r=B(a,s.shape),i=D(Xi(s.shape,r)[1]);return{x:()=>{const t=s.shape.slice();r.forEach((e=>{t[e]=1}));const n=Zr(e,t);return bi(Mi(n,oo(s.shape,"float32")),i)}}}},rh={kernelName:Xt,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{axis:a}=s,[r,i]=t,o=Yc(e,i,r,B(a,r.shape));return{x:()=>o.x()}}},ih={kernelName:Yt,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t;return{a:()=>Mi(e,ja(Al(n,s),"float32")),b:()=>Mi(e,ja(Ai(n,s),"float32"))}}},oh={kernelName:Jt,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:a}=n,r=a.map((e=>e[0]));return{x:()=>vo(e,r,s.shape)}}},lh={kernelName:Zt,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{const t=ki(n.shape,a);return t.length>0?Zr(zi(e,t),n.shape):e},b:()=>{const t=Mi(e,so(Ti(bi(n,s)))),r=ki(s.shape,a);return r.length>0?Zr(zi(t,r),s.shape):t}}}},uh={kernelName:en,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{const t=Mi(e,ja(s,"float32")),r=ki(n.shape,a);return r.length>0?Zr(zi(t,r),n.shape):t},b:()=>{const t=Mi(e,ja(n,"float32")),r=ki(s.shape,a);return r.length>0?Zr(zi(t,r),s.shape):t}}}},ch={kernelName:tn,gradFunc:e=>({x:()=>so(e)})},hh={kernelName:ln,inputsToSave:["indices"],gradFunc:(e,t)=>{const n=t[0];return{indices:()=>io(n.shape,"float32")}}},ph={kernelName:on,gradFunc:e=>({x:()=>Bo(e)})},dh={kernelName:un,saveAllInputs:!0,gradFunc:(e,t,n)=>{const{axis:s}=n;return Lo(e,s).map((e=>()=>e))}},fh={kernelName:cn,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:a}=n,r=a.map((e=>e[0]));return{x:()=>vo(e,r,s.shape)}}},mh={kernelName:hn,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(e,t)=>{const[n,s,a]=t,r=n,i=s,o=vi(r.shape,i.shape);return{a:()=>{const t=ja(i,"float32");let n=Mi(e,Mi(t,Dl(r,Li(t,Fr(1)))));const s=ki(r.shape,o);return s.length>0&&(n=zi(n,s)),Zr(n,r.shape)},b:()=>{const t=Ai(r,0),n=Po(t,Di(r),Bo(r));let s=Mi(e,Mi(a,n));const l=ki(i.shape,o);return l.length>0&&(s=zi(s,l)),Zr(s,i.shape)}}}},gh={kernelName:pn,inputsToSave:["x","alpha"],gradFunc:(e,t)=>{const[n,s]=t,a=Ai(n,0);return{x:()=>Po(a,e,Mi(e,s)),alpha:()=>{let t=Po(a,Bo(e),Mi(e,n));const r=ki(s.shape,e.shape);return r.length>0&&(t=zi(t,r)),Zr(t,s.shape)}}}},yh={kernelName:it,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{const t=bi(e,ja(s,"float32")),r=ki(n.shape,a);return r.length>0?Zr(zi(t,r),n.shape):t},b:()=>{let t=Mi(e,ja(n,"float32"));const r=ki(s.shape,a);r.length>0&&(t=Zr(zi(t,r),s.shape));const i=to(s);return so(bi(t,ja(i,"float32")))}}}},bh={kernelName:gn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,so(to(n)))}}},xh={kernelName:Nn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=Mi(Al(n,6),Jo(n));return{x:()=>Mi(e,ja(s,"float32"))}}},wh={kernelName:yn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,ja(Jo(n),"float32"))}}},kh={kernelName:bn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Zr(e,n.shape)}}},vh={kernelName:kn,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,a={dy:e,images:s};return{images:()=>Zs.runKernel(vn,a,n)}}},Nh={kernelName:xn,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,a={dy:e,images:s};return{images:()=>Zs.runKernel(wn,a,n)}}},Ih={kernelName:In,gradFunc:(e,t,n)=>{const{dims:s}=n,a=B(s,e.shape);return{x:()=>bo(e,a)}}},$h={kernelName:$n,gradFunc:e=>({x:()=>Bo(e)})},Ch={kernelName:Cn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>so(bi(e,Mi(Dl(n,1.5),2)))}}},Sh=la({logicalNot_:function(e){const t={x:ia(e,"x","logicalNot","bool")};return Zs.runKernel(Lt,t)}}),Th={kernelName:Tn,inputsToSave:["condition"],gradFunc:(e,t)=>{const[n]=t;return{condition:()=>ja(Bo(n),"float32"),t:()=>Mi(e,ja(n,e.dtype)),e:()=>Mi(e,ja(Sh(n),e.dtype))}}},Eh={kernelName:En,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const t=Ai(n,Fr(0)),s=Fr(yu),a=Fr(bu),r=Mi(e,a),i=Mi(Mi(e,s),_i(ja(n,"float32")));return Po(t,r,i)}}}},Ah={kernelName:_n,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,Mi(n,Li(Fr(1),n)))}}},Rh={kernelName:Dn,gradFunc:e=>({x:()=>Bo(e)})},Fh=la({cos_:function(e){const t={x:ia(e,"x","cos")};return Zs.runKernel(Ke,t)}}),Dh={kernelName:Rn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(Fh(ja(n,"float32")),e)}}},_h=la({cosh_:function(e){const t={x:ia(e,"x","cosh")};return Zs.runKernel(qe,t)}}),Oh={kernelName:Fn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(_h(ja(n,"float32")),e)}}},Mh={kernelName:An,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{begin:a,size:r}=n,i=s.shape,[o,l]=xr(s,a,r),u=[];for(let t=0;t<e.rank;t++)u.push([o[t],i[t]-o[t]-l[t]]);return{x:()=>uo(e,u)}}},Lh={kernelName:Bn,outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{dim:a}=n,r=Mi(e,s);return{logits:()=>Li(r,Mi(zi(r,[a],!0),s))}}},zh={kernelName:On,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,ko(n))}}},Ph=la({batchToSpaceND_:function(e,t,n){const s=ia(e,"x","batchToSpaceND"),a=t.reduce(((e,t)=>e*t));E(s.rank>=1+t.length,(()=>`input rank is ${s.rank} but should be > than blockShape.length ${t.length}`)),E(n.length===t.length,(()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`)),E(s.shape[0]%a==0,(()=>`input tensor batch is ${s.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${a}`));const r={x:s},i={blockShape:t,crops:n};return Zs.runKernel(De,r,i)}}),Bh={kernelName:zn,gradFunc:(e,t,n)=>{const{blockShape:s,paddings:a}=n;return{x:()=>Ph(e,s,a)}}},Wh={kernelName:Pn,gradFunc:(e,t,n)=>{const{axis:s}=n;return{x:()=>ii(e,s)}}},Vh=[Vu,Uu,Gu,Hu,ju,Ku,qu,Xu,Yu,Ju,Zu,Qu,tc,sc,ac,ic,oc,lc,uc,cc,hc,pc,fc,dc,yc,xc,kc,Nc,Ic,$c,yh,Cc,Sc,Tc,Ec,Ac,Fc,Rc,_c,Mc,Pc,Bc,Wc,Vc,Uc,Gc,Hc,jc,Kc,Xc,Jc,Jc,Qc,th,sh,ah,rh,ih,oh,lh,uh,ch,hh,ph,dh,fh,fh,mh,gh,bh,xh,wh,kh,vh,Nh,Ih,$h,Ch,Th,Eh,Ah,Rh,Dh,Oh,Mh,Lh,zh,Bh,Bh,Wh,Wh,{kernelName:Mn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,Mi(Ao(ja(n,"float32")),2))}}},{kernelName:Wn,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=Fr(2);return{a:()=>Mi(e,Mi(a,Li(n,s))),b:()=>Mi(e,Mi(a,Li(s,n)))}}},{kernelName:Vn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(e,Mi(ja(n,"float32"),2))}}},{kernelName:ns,gradFunc:e=>({x:()=>Bo(e)})},{kernelName:Un,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,a=vi(n.shape,s.shape);return{a:()=>{let t=e;const s=ki(n.shape,a);return s.length>0&&(t=zi(t,s)),Zr(t,n.shape)},b:()=>{let t=e;const n=ki(s.shape,a);return n.length>0&&(t=zi(t,n)),Zr(so(t),s.shape)}}}},{kernelName:Ln,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,a=s.shape.slice(),{axis:r}=n;B(r,s.shape).forEach((e=>{a[e]=1}));const i=Zr(e,a),o=Mi(i,oo(s.shape,"float32"));return{x:()=>o}}},{kernelName:jn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>bi(e,to(Fh(n)))}}},{kernelName:Kn,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Mi(Li(Fr(1),to(n)),e)}}},{kernelName:qn,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{reps:a}=n;return{x:()=>{let t=Bo(s);if(1===s.rank)for(let n=0;n<a[0];++n)t=Or(t,vo(e,[n*s.shape[0]],[s.shape[0]]));else if(2===s.rank)for(let n=0;n<a[0];++n)for(let r=0;r<a[1];++r)t=Or(t,vo(e,[n*s.shape[0],r*s.shape[1]],[s.shape[0],s.shape[1]]));else if(3===s.rank)for(let n=0;n<a[0];++n)for(let r=0;r<a[1];++r)for(let i=0;i<a[2];++i)t=Or(t,vo(e,[n*s.shape[0],r*s.shape[1],i*s.shape[2]],[s.shape[0],s.shape[1],s.shape[2]]));else{if(4!==s.rank)throw new Error(`Gradient for tile operation is not implemented for rank-${s.rank} tensors yet.`);for(let n=0;n<a[0];++n)for(let r=0;r<a[1];++r)for(let i=0;i<a[2];++i)for(let o=0;o<a[3];++o)t=Or(t,vo(e,[n*s.shape[0],r*s.shape[1],i*s.shape[2],o*s.shape[3]],[s.shape[0],s.shape[1],s.shape[2],s.shape[3]]))}return t}}}},{kernelName:Jn,gradFunc:(e,t,n)=>{const s=n,{perm:a}=s,r=Qi(a);return{x:()=>Wo(e,r)}}},{kernelName:Qn,gradFunc:(e,t,n)=>{const s=n,{axis:a}=s;return{value:()=>Fo(e,a)}}},{kernelName:es,inputsToSave:["segmentIds"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>function(e,t){const n=Ui(t,Bo(t)),s=Ei(e,n);let a=Ri(t,Fr(0,"int32"));const r=s.rank-a.rank;for(let e=0;e<r;++e)a=Ii(a,e+1);a=Bi(a,oo(s.shape,"bool"));const i=Bo(s);return Po(a,s,i)}(e,n)}}},{kernelName:ts,gradFunc:e=>({x:()=>Bo(e)})}];for(const e of Vh)fs(e);_s().prototype.abs=function(){return this.throwIfDisposed(),_r(this)};const Uh=la({acos_:function(e){const t={x:ia(e,"x","acos")};return Zs.runKernel(me,t)}});_s().prototype.acos=function(){return this.throwIfDisposed(),Uh(this)};const Gh=la({acosh_:function(e){const t={x:ia(e,"x","acosh")};return Zs.runKernel(ge,t)}});_s().prototype.acosh=function(){return this.throwIfDisposed(),Gh(this)},_s().prototype.add=function(e){return this.throwIfDisposed(),Or(this,e)},_s().prototype.all=function(e,t){return this.throwIfDisposed(),Mr(this,e,t)},_s().prototype.any=function(e,t){return this.throwIfDisposed(),Lr(this,e,t)},_s().prototype.argMax=function(e){return this.throwIfDisposed(),zr(this,e)};const Hh=la({argMin_:function(e,t=0){const n={x:ia(e,"x","argMin")},s={axis:t};return Zs.runKernel(ve,n,s)}});_s().prototype.argMin=function(e){return this.throwIfDisposed(),Hh(this,e)},_s().prototype.asScalar=function(){return this.throwIfDisposed(),E(1===this.size,(()=>"The array must have only 1 element.")),Zr(this,[])},_s().prototype.asType=function(e){return this.throwIfDisposed(),ja(this,e)},_s().prototype.as1D=function(){return this.throwIfDisposed(),Zr(this,[this.size])},_s().prototype.as2D=function(e,t){return this.throwIfDisposed(),Zr(this,[e,t])},_s().prototype.as3D=function(e,t,n){return this.throwIfDisposed(),Zr(this,[e,t,n])},_s().prototype.as4D=function(e,t,n,s){return this.throwIfDisposed(),Zr(this,[e,t,n,s])},_s().prototype.as5D=function(e,t,n,s,a){return this.throwIfDisposed(),Zr(this,[e,t,n,s,a])};const jh=la({asin_:function(e){const t={x:ia(e,"x","asin")};return Zs.runKernel(Ne,t)}});_s().prototype.asin=function(){return this.throwIfDisposed(),jh(this)};const Kh=la({asinh_:function(e){const t={x:ia(e,"x","asinh")};return Zs.runKernel(Ie,t)}});_s().prototype.asinh=function(){return this.throwIfDisposed(),Kh(this)};const qh=la({atan_:function(e){const t={x:ia(e,"x","atan")};return Zs.runKernel($e,t)}});_s().prototype.atan=function(){return this.throwIfDisposed(),qh(this)};const Xh=la({atan2_:function(e,t){let n=ia(e,"a","atan2"),s=ia(t,"b","atan2");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(Se,a)}});_s().prototype.atan2=function(e){return this.throwIfDisposed(),Xh(this,e)};const Yh=la({atanh_:function(e){const t={x:ia(e,"x","atanh")};return Zs.runKernel(Ce,t)}});_s().prototype.atanh=function(){return this.throwIfDisposed(),Yh(this)},_s().prototype.avgPool=function(e,t,n,s){return this.throwIfDisposed(),Qr(this,e,t,n,s)},_s().prototype.batchToSpaceND=function(e,t){return this.throwIfDisposed(),Ph(this,e,t)},_s().prototype.batchNorm=function(e,t,n,s,a){return this.throwIfDisposed(),ti(this,e,t,n,s,a)},_s().prototype.broadcastTo=function(e){return this.throwIfDisposed(),zo(this,e)},_s().prototype.cast=function(e){return this.throwIfDisposed(),ja(this,e)};const Jh=la({ceil_:function(e){const t={x:ia(e,"x","ceil")};return Zs.runKernel(Me,t)}});_s().prototype.ceil=function(){return this.throwIfDisposed(),Jh(this)},_s().prototype.clipByValue=function(e,t){return this.throwIfDisposed(),ri(this,e,t)},_s().prototype.concat=function(e,t){return this.throwIfDisposed(),e instanceof Ds&&(e=[e]),ii([this,...e],t)},_s().prototype.conv1d=function(e,t,n,s,a,r){return this.throwIfDisposed(),pi(this,e,t,n,s,a,r)},_s().prototype.conv2dTranspose=function(e,t,n,s,a){return this.throwIfDisposed(),fi(this,e,t,n,s,a)},_s().prototype.conv2d=function(e,t,n,s,a,r){return this.throwIfDisposed(),hi(this,e,t,n,s,a,r)},_s().prototype.cos=function(){return this.throwIfDisposed(),Fh(this)},_s().prototype.cosh=function(){return this.throwIfDisposed(),_h(this)},_s().prototype.cumsum=function(e,t,n){return this.throwIfDisposed(),vc(this,e,t,n)};const Zh=la({depthToSpace_:function(e,t,n="NHWC"){const s=ia(e,"x","depthToSpace"),a="NHWC"===n?s.shape[1]:s.shape[2],r="NHWC"===n?s.shape[2]:s.shape[3],i="NHWC"===n?s.shape[3]:s.shape[1];E(a*t>=0,(()=>`Negative dimension size caused by overflow when multiplying\n ${a} and ${t} for depthToSpace with input shape\n ${s.shape}`)),E(r*t>=0,(()=>`Negative dimension size caused by overflow when multiplying\n ${r} and ${t} for depthToSpace with input shape\n ${s.shape}`)),E(i%(t*t)==0,(()=>`Dimension size must be evenly divisible by ${t*t} but is ${i} for depthToSpace with input shape ${s.shape}`));const o={x:s},l={blockSize:t,dataFormat:n};return Zs.runKernel(Ze,o,l)}});_s().prototype.depthToSpace=function(e,t){return this.throwIfDisposed(),Zh(this,e,t)},_s().prototype.depthwiseConv2d=function(e,t,n,s,a,r){return this.throwIfDisposed(),gi(this,e,t,n,s,a,r)};const Qh=la({dilation2d_:function(e,t,n,s,a=[1,1],r="NHWC"){const i=ia(e,"x","dilation2d"),o=ia(t,"filter","dilation2d");E(3===i.rank||4===i.rank,(()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${i.rank}.`)),E(3===o.rank,(()=>`Error in dilation2d: filter must be rank 3, but got rank ${o.rank}.`)),E("NHWC"===r,(()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${r}`));let l=i,u=!1;3===i.rank&&(l=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2]]),u=!0);const c={x:l,filter:o},h={strides:n,pad:s,dilations:a},p=Zs.runKernel(st,c,h);return u?Zr(p,[p.shape[1],p.shape[2],p.shape[3]]):p}});_s().prototype.dilation2d=function(e,t,n,s,a){return this.throwIfDisposed(),Qh(this,e,t,n,s,a)};const ep=la({divNoNan_:function(e,t){let n=ia(e,"a","div"),s=ia(t,"b","div");[n,s]=Gs(n,s);const a=bi(n,s),r=Bo(a),i=Ni(s,r);return Po(i,r,a)}});_s().prototype.divNoNan=function(e){return this.throwIfDisposed(),ep(this,e)},_s().prototype.div=function(e){return this.throwIfDisposed(),bi(this,e)};const tp=la({dot_:function(e,t){const n=ia(e,"t1","dot"),s=ia(t,"t2","dot");E(!(1!==n.rank&&2!==n.rank||1!==s.rank&&2!==s.rank),(()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${s.rank}.`));const a=1===n.rank?n.size:n.shape[1],r=1===s.rank?s.size:s.shape[0];if(E(a===r,(()=>`Error in dot: inner dimensions of inputs must match, but got ${a} and ${r}.`)),1===n.rank&&1===s.rank){const e=Zr(n,[1,-1]),t=Zr(s,[-1,1]),a=il(e,t);return Zr(a,[])}if(1===n.rank&&2===s.rank){const e=Zr(n,[1,-1]),t=Zr(s,[s.shape[0],s.shape[1]]),a=il(e,t);return Zr(a,[a.size])}if(2===n.rank&&1===s.rank){const e=Zr(s,[-1,1]),t=il(n,e);return Zr(t,[t.size])}{const e=Zr(s,[s.shape[0],s.shape[1]]);return il(n,e)}}});_s().prototype.dot=function(e){return this.throwIfDisposed(),tp(this,e)},_s().prototype.elu=function(){return this.throwIfDisposed(),xi(this)},_s().prototype.equal=function(e){return this.throwIfDisposed(),Ni(this,e)};const np=la({erf_:function(e){let t=ia(e,"x","erf");E("int32"===t.dtype||"float32"===t.dtype,(()=>"Input dtype must be `int32` or `float32`.")),"int32"===t.dtype&&(t=ja(t,"float32"));const n={x:t};return Zs.runKernel(ut,n)}});_s().prototype.erf=function(){return this.throwIfDisposed(),np(this)},_s().prototype.exp=function(){return this.throwIfDisposed(),_i(this)},_s().prototype.expandDims=function(e){return this.throwIfDisposed(),Ii(this,e)};const sp=la({expm1_:function(e){const t={x:ia(e,"x","expm1")};return Zs.runKernel(dt,t)}});_s().prototype.expm1=function(){return this.throwIfDisposed(),sp(this)},_s().prototype.fft=function(){return this.throwIfDisposed(),Ho(this)},_s().prototype.flatten=function(){return this.throwIfDisposed(),Zr(this,[this.size])},_s().prototype.floor=function(){return this.throwIfDisposed(),Ti(this)},_s().prototype.floorDiv=function(e){return this.throwIfDisposed(),yi(this,e)},_s().prototype.gather=function(e,t){return this.throwIfDisposed(),Ei(this,e,t)},_s().prototype.greaterEqual=function(e){return this.throwIfDisposed(),Ri(this,e)},_s().prototype.greater=function(e){return this.throwIfDisposed(),Ai(this,e)},_s().prototype.ifft=function(){return this.throwIfDisposed(),Ko(this)},_s().prototype.irfft=function(){return this.throwIfDisposed(),qo(this)};const ap=la({isFinite_:function(e){const t={x:ia(e,"x","isFinite")};return Zs.runKernel(St,t)}});_s().prototype.isFinite=function(){return this.throwIfDisposed(),ap(this)};const rp=la({isInf_:function(e){const t={x:ia(e,"x","isInf")};return Zs.runKernel(Tt,t)}});_s().prototype.isInf=function(){return this.throwIfDisposed(),rp(this)};const ip=la({isNaN_:function(e){const t={x:ia(e,"x","isNaN")};return Zs.runKernel(Et,t)}});_s().prototype.isNaN=function(){return this.throwIfDisposed(),ip(this)},_s().prototype.leakyRelu=function(e){return this.throwIfDisposed(),Fi(this,e)},_s().prototype.lessEqual=function(e){return this.throwIfDisposed(),Al(this,e)},_s().prototype.less=function(e){return this.throwIfDisposed(),Zc(this,e)};const op=la({localResponseNormalization_:function(e,t=5,n=1,s=1,a=.5){const r=ia(e,"x","localResponseNormalization");E(4===r.rank||3===r.rank,(()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got\n rank ${r.rank}.`)),E(O(t),(()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`));let i=r,o=!1;3===r.rank&&(o=!0,i=Zr(r,[1,r.shape[0],r.shape[1],r.shape[2]]));const l={x:i},u={depthRadius:t,bias:n,alpha:s,beta:a},c=Zs.runKernel(Pt,l,u);return o?Zr(c,[c.shape[1],c.shape[2],c.shape[3]]):c}});_s().prototype.localResponseNormalization=function(e,t,n,s){return this.throwIfDisposed(),op(this,e,t,n,s)};const lp=la({logSigmoid_:function(e){const t=ia(e,"x","logSigmoid");return Rr((e=>({value:so(To(so(e))),gradFunc:t=>Mi(t,ko(so(e)))})))(t)}});_s().prototype.logSigmoid=function(){return this.throwIfDisposed(),lp(this)},_s().prototype.logSoftmax=function(e){return this.throwIfDisposed(),Pi(this,e)},_s().prototype.logSumExp=function(e,t){return this.throwIfDisposed(),Ul(this,e,t)},_s().prototype.log=function(){return this.throwIfDisposed(),Di(this)},_s().prototype.log1p=function(){return this.throwIfDisposed(),Vl(this)},_s().prototype.logicalAnd=function(e){return this.throwIfDisposed(),Bi(this,e)},_s().prototype.logicalNot=function(){return this.throwIfDisposed(),Sh(this)};const up=la({logicalOr_:function(e,t){const n=ia(e,"a","logicalOr","bool"),s=ia(t,"b","logicalOr","bool");vi(n.shape,s.shape);const a={a:n,b:s};return Zs.runKernel(zt,a)}});_s().prototype.logicalOr=function(e){return this.throwIfDisposed(),up(this,e)};const cp=la({logicalXor_:function(e,t){const n=ia(e,"a","logicalXor","bool"),s=ia(t,"b","logicalXor","bool");return vi(n.shape,s.shape),Bi(up(e,t),Sh(Bi(e,t)))}});_s().prototype.logicalXor=function(e){return this.throwIfDisposed(),cp(this,e)},_s().prototype.matMul=function(e,t,n){return this.throwIfDisposed(),il(this,e,t,n)},_s().prototype.maxPool=function(e,t,n,s){return this.throwIfDisposed(),Wi(this,e,t,n,s)},_s().prototype.max=function(e,t){return this.throwIfDisposed(),Oi(this,e,t)},_s().prototype.maximum=function(e){return this.throwIfDisposed(),Ui(this,e)},_s().prototype.mean=function(e,t){return this.throwIfDisposed(),Gi(this,e,t)},_s().prototype.min=function(e,t){return this.throwIfDisposed(),Hi(this,e,t)},_s().prototype.minimum=function(e){return this.throwIfDisposed(),ji(this,e)};const hp=la({mirrorPad_:function(e,t,n){E("reflect"===n||"symmetric"===n,(()=>`Invalid mode. Mode must be either reflect or symmetric. Got ${n}.`));const s=ia(e,"x","mirrorPad");if(0===s.rank)throw new Error("mirrorPad(scalar) is not defined. Pass non-scalar to mirrorPad");E(t.length===s.rank,(()=>`Padding doesn't match input. Must be ${s.rank}. Got ${t.length}.`));const a="reflect"===n?1:0;for(let e=0;e<s.rank;e++)E(2===t[e].length,(()=>"Invalid number of paddings. Must be length of 2 each.")),E(t[e][0]>=0&&t[e][0]<=s.shape[e]-a&&t[e][1]>=0&&t[e][1]<=s.shape[e]-a,(()=>`Padding in dimension ${e} cannot be greater than or equal to ${s.shape[e]-a} or less than 0 for input of shape ${s.shape}`));const r={paddings:t,mode:n},i={x:s};return Zs.runKernel(Jt,i,r)}});_s().prototype.mirrorPad=function(e,t){return this.throwIfDisposed(),hp(this,e,t)};const pp=la({mod_:function(e,t){let n=ia(e,"a","mod"),s=ia(t,"b","mod");[n,s]=Gs(n,s);const a={a:n,b:s};return Zs.runKernel(Zt,a)}});_s().prototype.mod=function(e){return this.throwIfDisposed(),pp(this,e)},_s().prototype.mul=function(e){return this.throwIfDisposed(),Mi(this,e)},_s().prototype.neg=function(){return this.throwIfDisposed(),so(this)},_s().prototype.norm=function(e,t,n){return this.throwIfDisposed(),Ol(this,e,t,n)},_s().prototype.notEqual=function(e){return this.throwIfDisposed(),ao(this,e)},_s().prototype.oneHot=function(e,t=1,n=0){return this.throwIfDisposed(),ro(this,e,t,n)},_s().prototype.onesLike=function(){return this.throwIfDisposed(),lo(this)},_s().prototype.pad=function(e,t){return this.throwIfDisposed(),uo(this,e,t)};const dp=la({pool_:function(e,t,n,s,a,r){null==a&&(a=[1,1]),null==r&&(r=1),0===s&&(s="valid");const i=ia(e,"x","maxPool");let o=i,l=!1;3===i.rank&&(l=!0,o=Zr(i,[1,i.shape[0],i.shape[1],i.shape[2]])),E(Yr(r,a),(()=>`Error in pool: Either strides or dilations must be 1. Got strides ${r} and dilations '${a}'`));const u=Br(o.shape,t,r,a,s),c=[u.dilationHeight,u.dilationWidth];let h;h="same"===s?function(e,t){const n=e.map(((e,n)=>e+(e-1)*(t[n]-1))).map((e=>e-1)),s=n.map((e=>Math.floor(e/2))),a=n.map(((e,t)=>e-s[t]));return n.map(((e,t)=>[s[t],a[t]]))}([u.filterHeight,u.filterWidth],c):[[0,0],[0,0]];const p=1===c[0]&&1===c[1],[d,f]=function(e,t,n){const s=n.map((e=>e[0])),a=n.map((e=>e[1])),r=e.concat(s,a),i=t.map(((e,t)=>(e-r[t]%e)%e)),o=a.map(((e,t)=>e+i[t]));return[t.map(((e,t)=>[s[t],o[t]])),t.map(((e,t)=>[0,i[t]]))]}([u.inHeight,u.inWidth],c,h),m=p?s:"valid",g=p?o:rc(o,c,d),y=("avg"===n?()=>Qr(g,t,r,m):()=>Wi(g,t,r,m))(),b=p?y:Ph(y,c,f);return l?Zr(b,[b.shape[1],b.shape[2],b.shape[3]]):b}});_s().prototype.pool=function(e,t,n,s,a){return this.throwIfDisposed(),dp(this,e,t,n,s,a)},_s().prototype.pow=function(e){return this.throwIfDisposed(),Dl(this,e)},_s().prototype.prelu=function(e){return this.throwIfDisposed(),co(this,e)};const fp=la({prod_:function(e,t=null,n=!1){let s=ia(e,"x","prod");"bool"===s.dtype&&(s=ja(s,"int32"));const a={x:s},r={axis:t,keepDims:n};return Zs.runKernel(dn,a,r)}});_s().prototype.prod=function(e,t){return this.throwIfDisposed(),fp(this,e,t)};const mp=la({reciprocal_:function(e){const t={x:ia(e,"x","reciprocal")};return Zs.runKernel(gn,t)}});_s().prototype.reciprocal=function(){return this.throwIfDisposed(),mp(this)},_s().prototype.relu=function(){return this.throwIfDisposed(),yo(this)},_s().prototype.relu6=function(){return this.throwIfDisposed(),Yo(this)},_s().prototype.reshapeAs=function(e){return this.throwIfDisposed(),Zr(this,e.shape)},_s().prototype.reshape=function(e){return this.throwIfDisposed(),Zr(this,e)},_s().prototype.resizeBilinear=function(e,t,n){return this.throwIfDisposed(),Sl(this,e,t,n)},_s().prototype.resizeNearestNeighbor=function(e,t,n){return this.throwIfDisposed(),Tl(this,e,t,n)},_s().prototype.reverse=function(e){return this.throwIfDisposed(),bo(this,e)},_s().prototype.rfft=function(){return this.throwIfDisposed(),jo(this)};const gp=la({round_:function(e){const t={x:ia(e,"x","round")};return Zs.runKernel($n,t)}});_s().prototype.round=function(){return this.throwIfDisposed(),gp(this)},_s().prototype.rsqrt=function(){return this.throwIfDisposed(),Dc(this)},_s().prototype.selu=function(){return this.throwIfDisposed(),xo(this)},_s().prototype.separableConv2d=function(e,t,n,s,a,r){return this.throwIfDisposed(),wo(this,e,t,n,s,a,r)},_s().prototype.sigmoid=function(){return this.throwIfDisposed(),ko(this)};const yp=la({sign_:function(e){const t={x:ia(e,"x","sign")};return Zs.runKernel(Dn,t)}});_s().prototype.sign=function(){return this.throwIfDisposed(),yp(this)},_s().prototype.sin=function(){return this.throwIfDisposed(),bc(this)},_s().prototype.sinh=function(){return this.throwIfDisposed(),wc(this)},_s().prototype.slice=function(e,t){return this.throwIfDisposed(),vo(this,e,t)},_s().prototype.softmax=function(e){return this.throwIfDisposed(),So(this,e)},_s().prototype.softplus=function(){return this.throwIfDisposed(),To(this)},_s().prototype.spaceToBatchND=function(e,t){return this.throwIfDisposed(),rc(this,e,t)},_s().prototype.split=function(e,t){return this.throwIfDisposed(),Eo(this,e,t)},_s().prototype.sqrt=function(){return this.throwIfDisposed(),Ao(this)},_s().prototype.square=function(){return this.throwIfDisposed(),to(this)},_s().prototype.squaredDifference=function(e){return this.throwIfDisposed(),Wl(this,e)},_s().prototype.squeeze=function(e){return this.throwIfDisposed(),Ro(this,e)},_s().prototype.stack=function(e,t){this.throwIfDisposed();const n=e instanceof Ds?[this,e]:[this,...e];return Fo(n,t)},_s().prototype.step=function(e){return this.throwIfDisposed(),Jo(this,e)};const bp=la({stridedSlice_:function(e,t,n,s,a=0,r=0,i=0,o=0,l=0){const u={x:ia(e,"x","stridedSlice")},c={begin:t,end:n,strides:s,beginMask:a,endMask:r,ellipsisMask:i,newAxisMask:o,shrinkAxisMask:l};return Zs.runKernel(Hn,u,c)}});_s().prototype.stridedSlice=function(e,t,n,s,a,r,i,o){return this.throwIfDisposed(),bp(this,e,t,n,s,a,r,i,o)},_s().prototype.sub=function(e){return this.throwIfDisposed(),Li(this,e)},_s().prototype.sum=function(e,t){return this.throwIfDisposed(),zi(this,e,t)};const xp=la({tan_:function(e){const t={x:ia(e,"x","tan")};return Zs.runKernel(jn,t)}});_s().prototype.tan=function(){return this.throwIfDisposed(),xp(this)},_s().prototype.tanh=function(){return this.throwIfDisposed(),Do(this)},_s().prototype.tile=function(e){return this.throwIfDisposed(),$i(this,e)},_s().prototype.toBool=function(){return this.throwIfDisposed(),ja(this,"bool")},_s().prototype.toFloat=function(){return this.throwIfDisposed(),ja(this,"float32")},_s().prototype.toInt=function(){return this.throwIfDisposed(),ja(this,"int32")};const wp=la({topk_:function(e,t=1,n=!0){const s=ia(e,"x","topk");if(0===s.rank)throw new Error("topk() expects the input to be of rank 1 or higher");const a=s.shape[s.shape.length-1];if(t>a)throw new Error(`'k' passed to topk() must be <= the last dimension (${a}) but got ${t}`);const r={x:s},i={k:t,sorted:n},[o,l]=Zs.runKernel(Xn,r,i);return{values:o,indices:l}}});_s().prototype.topk=function(e,t){return this.throwIfDisposed(),wp(this,e,t)},_s().prototype.transpose=function(e){return this.throwIfDisposed(),Wo(this,e)};const kp=la({unique_:function(e,t=0){const n=ia(e,"x","unique","string_or_numeric");E(n.rank>0,(()=>"The input tensor must be at least 1D"));const s={x:n},a={axis:t},[r,i]=Zs.runKernel(Zn,s,a);return{values:r,indices:i}}});let vp;function Np(){return null==vp&&(vp=Zs.backend.epsilon()),vp}_s().prototype.unique=function(e){return this.throwIfDisposed(),kp(this,e)},_s().prototype.unsortedSegmentSum=function(e,t){return this.throwIfDisposed(),Oc(this,e,t)},_s().prototype.unstack=function(e){return this.throwIfDisposed(),Lo(this,e)},_s().prototype.where=function(e,t){return this.throwIfDisposed(),Po(e,this,t)},_s().prototype.zerosLike=function(){return this.throwIfDisposed(),Bo(this)};class Ip extends Error{constructor(e){super(e),Object.setPrototypeOf(this,Ip.prototype)}}class $p extends Error{constructor(e){super(e),Object.setPrototypeOf(this,$p.prototype)}}class Cp extends Error{constructor(e){super(e),Object.setPrototypeOf(this,Cp.prototype)}}class Sp extends Error{constructor(e){super(e),Object.setPrototypeOf(this,Sp.prototype)}}class Tp extends Error{constructor(e){super(e),Object.setPrototypeOf(this,Tp.prototype)}}function Ep(e,t){if(Array.isArray(e)){let n=[];for(let s=0;s<t;s++)n=n.concat(e);return n}{const n=new Array(t);return n.fill(e),n}}function Ap(e,t){if(!e)throw new Tp(t)}function Rp(e,t){let n=0;for(const s of e)s===t&&n++;return n}function Fp(e){return 1===e.length?e[0]:e}function Dp(e){return Array.isArray(e)?e:[e]}function _p(e){const t=e.replace(/(.)([A-Z][a-z0-9]+)/g,"$1_$2").replace(/([a-z])([A-Z])/g,"$1_$2").toLowerCase();return"_"!==t[0]?t:"private"+t}function Op(e){return e.length<=1||-1===e.indexOf("_")?e:e.replace(/[_]+(\w|$)/g,((e,t)=>t.toUpperCase()))}Error;let Mp={};function Lp(e){if(null==e)return null;const t={};return t.className=e.getClassName(),t.config=e.getConfig(),t}function zp(e){if(null!=e&&"object"==typeof e)if(Array.isArray(e))e.forEach((e=>zp(e)));else{const t=Object.keys(e);for(const n of t){const t=e[n];null!=t&&"object"==typeof t&&(Array.isArray(t)||"ndarray"!==t.type||"number"!=typeof t.value?zp(t):e[n]=t.value)}}}function Pp(e,t={},n={},s="object",a=!1){if("string"==typeof e){const a=e;let r;if(a in n)r=n[a];else if(a in Mp)r=Mp[a];else if(r=t[a],null==r)throw new Cp(`Unknown ${s}: ${e}. This may be due to one of the following reasons:\n1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.\n2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return r}{const r=e;if(null==r.className||null==r.config)throw new Cp(`${s}: Improper config format: ${JSON.stringify(r)}.\n'className' and 'config' must set.`);const i=r.className;let o,l;if(i in n?[o,l]=n[i]:i in Mp?[o,l]=Mp.className:i in t&&([o,l]=t[i]),null==o)throw new Cp(`Unknown ${s}: ${i}. This may be due to one of the following reasons:\n1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.\n2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(null!=l){const e={};for(const t of Object.keys(Mp))e[t]=Mp[t];for(const t of Object.keys(n))e[t]=n[t];r.config.customObjects=e;const t=Object.assign({},Mp);for(const e of Object.keys(n))Mp[e]=n[e];zp(r.config);const s=l(o,r.config,n,a);return Mp=Object.assign({},t),s}{const e=Object.assign({},Mp);for(const e of Object.keys(n))Mp[e]=n[e];const t=new o(r.config);return Mp=Object.assign({},e),t}}}function Bp(e,t){return-1*function(e,t){return e<t?-1:e>t?1:0}(e,t)}function Wp(e){if(null==e)return e;const t=[];for(const n of e)-1===t.indexOf(n)&&t.push(n);return t}function Vp(e){if(null==e)throw new Cp(`Invalid value in obj: ${JSON.stringify(e)}`);for(const t in e)if(e.hasOwnProperty(t))return!1;return!0}function Up(e,t,n){if(null!=n&&e.indexOf(n)<0)throw new Cp(`${n} is not a valid ${t}. Valid values are ${e} or null/undefined.`)}function Gp(e,t,n=0,s=1/0){return Ap(n>=0),Ap(s>=n),Array.isArray(e)&&e.length>=n&&e.length<=s&&e.every((e=>typeof e===t))}function Hp(e,t){Array.isArray(e)?(E(e.length>0,(()=>`${t} is unexpectedly an empty array.`)),e.forEach(((e,n)=>Hp(e,`element ${n+1} of ${t}`)))):E(Number.isInteger(e)&&e>0,(()=>`Expected ${t} to be a positive integer, but got ${jp(e)}.`))}function jp(e){return null===e?"null":Array.isArray(e)?"["+e.map((e=>jp(e))).join(",")+"]":"string"==typeof e?`"${e}"`:`${e}`}function Kp(e){return"relu"===e?"relu":"linear"===e?"linear":"elu"===e?"elu":null}function qp(e,t){return Cr((()=>Ao(zi(Mi(e,e),t,!0))))}class Xp extends kr{getConfig(){return{}}}class Yp extends Xp{constructor(e){super(),this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=null!=e.maxValue?e.maxValue:this.defaultMaxValue,this.axis=null!=e.axis?e.axis:this.defaultAxis}apply(e){return Cr((()=>{const t=qp(e,this.axis),n=ri(t,0,this.maxValue);return Mi(e,bi(n,Or(Np(),t)))}))}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}Yp.className="MaxNorm",Nr(Yp);class Jp extends Xp{constructor(e){super(),this.defaultAxis=0,this.axis=null!=e.axis?e.axis:this.defaultAxis}apply(e){return Cr((()=>bi(e,Or(Np(),qp(e,this.axis)))))}getConfig(){return{axis:this.axis}}}Jp.className="UnitNorm",Nr(Jp);class Zp extends Xp{apply(e){return yo(e)}}Zp.className="NonNeg",Nr(Zp);class Qp extends Xp{constructor(e){super(),this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=null!=e.minValue?e.minValue:this.defaultMinValue,this.maxValue=null!=e.maxValue?e.maxValue:this.defaultMaxValue,this.rate=null!=e.rate?e.rate:this.defaultRate,this.axis=null!=e.axis?e.axis:this.defaultAxis}apply(e){return Cr((()=>{const t=qp(e,this.axis),n=Or(Mi(this.rate,ri(t,this.minValue,this.maxValue)),Mi(1-this.rate,t));return Mi(e,bi(n,Or(Np(),t)))}))}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}Qp.className="MinMaxNorm",Nr(Qp);const ed={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function td(e){return Lp(e)}function nd(e,t={}){return Pp(e,vr.getMap().classNameMap,t,"constraint")}function sd(e){return null==e?null:"string"==typeof e?nd({className:e in ed?ed[e]:e,config:{}}):e instanceof Xp?e:nd(e)}const ad=["channelsFirst","channelsLast"],rd=["nearest","bilinear"],id=["valid","same","causal"],od=["max","avg"],ld=["sum","mul","concat","ave"],ud=new Map;function cd(e){Up(ad,"DataFormat",e)}function hd(e){Up(id,"PaddingMode",e)}function pd(e){Up(od,"PoolMode",e)}const dd=[];function fd(e,t){dd.push(e);try{const e=t();return dd.pop(),e}catch(e){throw dd.pop(),e}}function md(e){if(!bd(e))throw new Error("Not a valid tensor name: '"+e+"'");return(0===dd.length?"":dd.join("/")+"/")+e}function gd(e){if(!bd(e))throw new Error("Not a valid tensor name: '"+e+"'");ud.has(e)||ud.set(e,0);const t=ud.get(e);if(ud.set(e,ud.get(e)+1),t>0){const n=`${e}_${t}`;return ud.set(n,1),n}return e}const yd=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function bd(e){return!!e.match(yd)}function xd(e,t,n){null==t&&(t=0),null==n&&(n=e.length);let s=1;for(let a=t;a<n;++a)s*=e[a];return s}function wd(e){return _o(e=Array.isArray(e)?new Float32Array(e):e)}function kd(e){return Hi(wd(e)).dataSync()[0]}function vd(e){return Oi(wd(e)).dataSync()[0]}function Nd(e,t){if(t<e)throw new Cp(`end (${t}) < begin (${e}) is forbidden.`);const n=[];for(let s=e;s<t;++s)n.push(s);return n}function Id(e,t){return e.asType(t)}function $d(e,t=-1){const n=e.shape.slice();return t<0&&(t=n.length+t+1),n.splice(t,0,1),e.reshape(n)}function Cd(e,t,n){return Cr((()=>{switch(e.rank){case 1:return No(e,t,n);case 2:return Io(e,[t,0],[n,e.shape[1]]);case 3:return $o(e,[t,0,0],[n,e.shape[1],e.shape[2]]);case 4:return Co(e,[t,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3]]);case 5:return vo(e,[t,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4]]);case 6:return vo(e,[t,0,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4],e.shape[5]]);default:throw new Cp(`sliceAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}))}function Sd(e,t,n){return Cr((()=>{switch(e.rank){case 1:return No(e,t,n);case 2:return Io(e,[0,t],[e.shape[0],n]);case 3:return $o(e,[0,0,t],[e.shape[0],e.shape[1],n]);case 4:return Co(e,[0,0,0,t],[e.shape[0],e.shape[1],e.shape[2],n]);default:throw new Cp(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}}))}function Td(e,t,n,s){return Cr((()=>{switch(e.rank){case 1:return No(e,t,n);case 2:switch(s){case 1:return Cd(e,t,n);case 2:return Sd(e,t,n);default:throw new Cp(`The axis is not within the rank of the tensor ${s}`)}case 3:switch(s){case 1:return Cd(e,t,n);case 2:return $o(e,[0,t,0],[e.shape[0],n,e.shape[2]]);case 3:return Sd(e,t,n);default:throw new Cp(`The axis is not within the rank of the tensor ${s}`)}case 4:switch(s){case 1:return Cd(e,t,n);case 2:return Co(e,[0,t,0,0],[e.shape[0],n,e.shape[2],e.shape[3]]);case 3:return Co(e,[0,0,t,0],[e.shape[0],e.shape[1],n,e.shape[3]]);case 4:return Sd(e,t,n);default:throw new Cp(`The axis is not within the rank of the tensor ${s}`)}default:throw new Cp(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}}))}function Ed(e,t=-1){let n;return t<0&&(n=e[0].rank,t=0!==n?n:0),t===e[0].rank&&(t=-1),ii(e,t)}function Ad(e,t){switch(e.rank){case 1:return oi([e,t]);case 2:return li([e,t],0);case 3:return ui([e,t],0);case 4:return ci([e,t],0);default:throw new Cp(`concatAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}function Rd(e,t){if(Array.isArray(t)||(t=[t]),e.rank!==t.length)throw new Cp(`The length of input n (${t.length}) does not match the number of dimensions in input x (${e.rank})`);return $i(e,t)}function Fd(e,t=0,n=1,s,a){return mo(e,t,n,s,a)}function Dd(e,t,n,s){if(e.rank<2||t.rank<2)throw new Sp(`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${t.shape}`);if(t.rank>=3&&e.shape.slice(-1)[0]!==t.shape.slice(-2)[0])throw new Sp(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${t.shape}`);if(2===e.rank&&2===t.rank)return ol({a:e,b:t,transposeA:!1,transposeB:!1,bias:s?Md(e.rank,s,"channelsLast"):null,activation:n});{const a=e.shape.slice(),r=a.pop();e=e.reshape([-1,r]);const i=t.shape.slice(),o=i.pop(),l=i.pop(),u=[...i,o],c=Array.from({length:t.rank},((e,n)=>0===n?t.rank-2:n<=t.rank-2?n-1:n));t=t.transpose(c).reshape([l,-1]);const h=[...a,...u];return ol({a:e,b:t,transposeA:!1,transposeB:!1,bias:s?Md(e.rank,s,"channelsLast"):null,activation:n}).reshape(h)}}function _d(e,t,n){return Cr((()=>(t=Array.isArray(t)?_o(t,"int32"):t.toInt(),Ei(e,t,n))))}function Od(e){return Mi(e,e)}function Md(e,t,n){const s=t.shape;if(1!==t.rank&&t.rank!==e)throw new Cp(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${e}`);if(5===e){if("channelsFirst"===n)return 1===s.length?t.reshape([1,s[0],1,1,1]):t.reshape([1,s[3],s[0],s[1],s[2]]);if("channelsLast"===n)return 1===s.length?t.reshape([1,1,1,1,s[0]]):t.reshape([1].concat(s))}else if(4===e){if("channelsFirst"===n)return 1===s.length?t.reshape([1,s[0],1,1]):t.reshape([1,s[2],s[0],s[1]]);if("channelsLast"===n)return 1===s.length?t.reshape([1,1,1,s[0]]):t.reshape([1].concat(s))}else if(3===e){if("channelsFirst"===n)return 1===s.length?t.reshape([1,s[0],1]):t.reshape([1,s[1],s[0]]);if("channelsLast"===n)return 1===s.length?t.reshape([1,1,s[0]]):t.reshape([1].concat(s))}else if(e<3)return t;throw new Cp(`Unsupported input rank by biasAdd: ${t.rank}`)}function Ld(e,t,n){return Cr((()=>(null==n&&(n="channelsLast"),cd(n),e.add(Md(e.rank,t,n)))))}function zd(e,t,n,s){return Cr((()=>Vo(e,t,n,s)))}function Pd(e,t,n=!1){return n?e():t()}const Bd=["fanIn","fanOut","fanAvg"],Wd=["normal","uniform","truncatedNormal"];class Vd extends kr{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class Ud extends Vd{apply(e,t){return io(e,t)}}Ud.className="Zeros",Nr(Ud);class Gd extends Vd{apply(e,t){return oo(e,t)}}Gd.className="Ones",Nr(Gd);class Hd extends Vd{constructor(e){if(super(),"object"!=typeof e)throw new Cp(`Expected argument of type ConstantConfig but got ${e}`);if(void 0===e.value)throw new Cp(`config must have value set but got ${e}`);this.value=e.value}apply(e,t){return Cr((()=>Mi(Fr(this.value),oo(e,t))))}getConfig(){return{value:this.value}}}Hd.className="Constant",Nr(Hd);class jd extends Vd{constructor(e){super(),this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=e.minval||this.DEFAULT_MINVAL,this.maxval=e.maxval||this.DEFAULT_MAXVAL,this.seed=e.seed}apply(e,t){return go(e,this.minval,this.maxval,t)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}jd.className="RandomUniform",Nr(jd);class Kd extends Vd{constructor(e){super(),this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if("float32"!==(t=t||"float32")&&"int32"!==t)throw new Sp(`randomNormal does not support dType ${t}.`);return Fd(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Kd.className="RandomNormal",Nr(Kd);class qd extends Vd{constructor(e){super(),this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if("float32"!==(t=t||"float32")&&"int32"!==t)throw new Sp(`truncatedNormal does not support dType ${t}.`);return Mo(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}qd.className="TruncatedNormal",Nr(qd);class Xd extends Vd{constructor(e){super(),this.gain=null!=e.gain?e.gain:1}apply(e,t){return Cr((()=>{if(2!==e.length||e[0]!==e[1])throw new Cp("Identity matrix initializer can only be used for 2D square matrices.");return Mi(this.gain,Ci(e[0]))}))}getConfig(){return{gain:this.gain}}}Xd.className="Identity",Nr(Xd);class Yd extends Vd{constructor(e){if(super(),e.scale<0)throw new Cp(`scale must be a positive float. Got: ${e.scale}`);var t;this.scale=null==e.scale?1:e.scale,this.mode=null==e.mode?"fanIn":e.mode,t=this.mode,Up(Bd,"FanMode",t),this.distribution=null==e.distribution?"normal":e.distribution,function(e){Up(Wd,"Distribution",e)}(this.distribution),this.seed=e.seed}apply(e,t){const n=function(e,t="channelsLast"){let n,s;if(cd(t),2===e.length)n=e[0],s=e[1];else if(-1!==[3,4,5].indexOf(e.length)){if("channelsFirst"===t){const t=xd(e,2);n=e[1]*t,s=e[0]*t}else if("channelsLast"===t){const t=xd(e,0,e.length-2);n=e[e.length-2]*t,s=e[e.length-1]*t}}else{const t=xd(e);n=Math.sqrt(t),s=Math.sqrt(t)}return[n,s]}(e),s=n[0],a=n[1];let r=this.scale;if("fanIn"===this.mode?r/=Math.max(1,s):"fanOut"===this.mode?r/=Math.max(1,a):r/=Math.max(1,(s+a)/2),"normal"===this.distribution){const n=Math.sqrt(r);if("float32"!==(t=t||"float32")&&"int32"!==t)throw new Sp(`${this.getClassName()} does not support dType ${t}.`);return Mo(e,0,n,t,this.seed)}{const n=Math.sqrt(3*r);return go(e,-n,n,t)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}Yd.className="VarianceScaling",Nr(Yd);class Jd extends Yd{constructor(e){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:null==e?null:e.seed})}getClassName(){return Yd.className}}Jd.className="GlorotUniform",Nr(Jd);class Zd extends Yd{constructor(e){super({scale:1,mode:"fanAvg",distribution:"normal",seed:null==e?null:e.seed})}getClassName(){return Yd.className}}Zd.className="GlorotNormal",Nr(Zd);class Qd extends Yd{constructor(e){super({scale:2,mode:"fanIn",distribution:"normal",seed:null==e?null:e.seed})}getClassName(){return Yd.className}}Qd.className="HeNormal",Nr(Qd);class ef extends Yd{constructor(e){super({scale:2,mode:"fanIn",distribution:"uniform",seed:null==e?null:e.seed})}getClassName(){return Yd.className}}ef.className="HeUniform",Nr(ef);class tf extends Yd{constructor(e){super({scale:1,mode:"fanIn",distribution:"normal",seed:null==e?null:e.seed})}getClassName(){return Yd.className}}tf.className="LeCunNormal",Nr(tf);class nf extends Yd{constructor(e){super({scale:1,mode:"fanIn",distribution:"uniform",seed:null==e?null:e.seed})}getClassName(){return Yd.className}}nf.className="LeCunNormal",Nr(nf);class sf extends Vd{constructor(e){if(super(),this.DEFAULT_GAIN=1,this.gain=null==e.gain?this.DEFAULT_GAIN:e.gain,this.seed=e.seed,null!=this.seed)throw new Sp("Random seed is not implemented for Orthogonal Initializer yet.")}apply(e,t){return Cr((()=>{if(e.length<2)throw new Sp("Shape must be at least 2D.");e[0]*e[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${e[0]*e[1]}) elements: Slowness may result.`);const t=Fd(e[0]>e[1]?[e[1],e[0]]:e,0,1,"float32");let n=Hl.gramSchmidt(t);return e[0]>e[1]&&(n=n.transpose()),Mi(this.gain,n)}))}getConfig(){return{gain:this.gain,seed:this.seed}}}sf.className="Orthogonal",Nr(sf);const af={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function rf(e,t={}){return Pp(e,vr.getMap().classNameMap,t,"initializer")}function of(e){return Lp(e)}function lf(e){if("string"==typeof e){const t=e in af?af[e]:e;if("GlorotNormal"===t)return new Zd;if("GlorotUniform"===t)return new Jd;if("HeNormal"===t)return new Qd;if("HeUniform"===t)return new ef;if("LeCunNormal"===t)return new tf;if("LeCunUniform"===t)return new nf;{const e={};return e.className=t,e.config={},rf(e)}}return e instanceof Vd?e:rf(e)}let uf=0;function cf(){return uf++}const hf={};function pf(e=""){return e in hf||(hf[e]=0),hf[e]+=1,e+hf[e].toString()}function df(e){return Array.isArray(e)&&Array.isArray(e[0])}function ff(e){return 0===e.length?[]:Array.isArray(e[0])?e:[e]}function mf(e){let t;if(Array.isArray(e)){if(1!==e.length)throw new Cp(`Expected Tensor length to be 1; got ${e.length}`);t=e[0]}else t=e;return t}function gf(e){if(Array.isArray(e)&&Array.isArray(e[0])){if(1===e.length)return(e=e)[0];throw new Cp(`Expected exactly 1 Shape; got ${e.length}`)}return e}function yf(e){let t=0;for(const n of e)0===n.shape.length?t+=1:t+=n.shape.reduce(((e,t)=>e*t));return t}class bf{constructor(e,t="float32",n="Variable",s=!0,a=null){this.dtype=null==t?"float32":t,this.shape=e.shape,this.id=cf(),n=null==n?"Variable":n,this.originalName=md(n),this.name=gd(this.originalName),this.trainable_=s,this.constraint=a,this.val=function(e,t=!0,n,s){return Zs.makeVariable(e,t,n,s)}(e,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(e){return this.assertNotDisposed(),function(e,t){if(e.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(e.shape)+" vs. "+JSON.stringify(t.shape))}(this.val,e),this.val.id!==e.id&&(this.val.assign(e),null!=this.constraint&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(e){this.trainable_=e,this.val.trainable=e}}function xf(e){return e.map((e=>e.read()))}function wf(e){e.forEach((e=>{e[0].write(e[1])}))}class kf{constructor(e){this.dtype=e.dtype,this.shape=e.shape,null!=e.shape?this.ndim=e.shape.length:this.ndim=e.ndim,this.maxNDim=e.maxNDim,this.minNDim=e.minNDim,this.axes=e.axes||{}}}class vf{constructor(e,t,n,s,a,r,i){this.dtype=e,this.shape=t,this.sourceLayer=n,this.inputs=s,this.callArgs=a,this.outputTensorIndex=i,this.id=cf(),null!=r&&(this.originalName=md(r),this.name=gd(this.originalName)),this.rank=t.length}}let Nf=0;class If{constructor(e,t){this.callArgs=t,this.id=Nf++,this.outboundLayer=e.outboundLayer,this.inboundLayers=e.inboundLayers,this.nodeIndices=e.nodeIndices,this.tensorIndices=e.tensorIndices,this.inputTensors=e.inputTensors,this.outputTensors=e.outputTensors,this.inputMasks=e.inputMasks,this.outputMasks=e.outputMasks,this.inputShapes=e.inputShapes,this.outputShapes=e.outputShapes;for(const t of e.inboundLayers)null!=t&&t.outboundNodes.push(this);e.outboundLayer.inboundNodes.push(this)}getConfig(){const e=[];for(const t of this.inboundLayers)null!=t?e.push(t.name):e.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:e,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let $f=0;class Cf extends kr{constructor(e={}){super(),this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=$f++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let t=e.name;if(!t){const e=this.getClassName();t=_p(e)+"_"+pf(e)}if(this.name=t,this.trainable_=null==e.trainable||e.trainable,null!=e.inputShape||null!=e.batchInputShape){let t;if(null!=e.batchInputShape)t=e.batchInputShape;else if(null!=e.inputShape){let n=null;null!=e.batchSize&&(n=e.batchSize),t=[n].concat(e.inputShape)}this.batchInputShape=t;let n=e.dtype;null==n&&(n=e.inputDType),null==n&&(n="float32"),this.dtype=n}null!=e.weights?this.initialWeights=e.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(e,t){return e.name+"_ib-"+t.toString()}getNodeAtIndex(e,t){if(0===this.inboundNodes.length)throw new $p(`The layer has never been called and thus has no defined ${t}.`);if(this.inboundNodes.length<=e)throw new Cp(`Asked to get ${t} at node ${e}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[e]}getInputAt(e){return Fp(this.getNodeAtIndex(e,"input").inputTensors)}getOutputAt(e){return Fp(this.getNodeAtIndex(e,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new Ip(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(0===this.inboundNodes.length)throw new Ip(`Layer ${this.name} is not connected, no input to return.`);return Fp(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(0===this.inboundNodes.length)throw new Ip(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new Ip(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return Fp(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map((e=>e()))}get updates(){return this._updates}get built(){return this._built}set built(e){this._built=e}get trainable(){return this.trainable_}set trainable(e){this._trainableWeights.forEach((t=>t.trainable=e)),this.trainable_=e}get trainableWeights(){return this.trainable_?this._trainableWeights.filter((e=>e.trainable)):[]}set trainableWeights(e){this._trainableWeights=e}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter((e=>!e.trainable)).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(e){this._nonTrainableWeights=e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(e){if(e=Dp(e),null==this.inputSpec||0===this.inputSpec.length)return;const t=Dp(this.inputSpec);if(e.length!==t.length)throw new Cp(`Layer ${this.name} expects ${t.length} inputs, but it received ${e.length} input tensors. Input received: ${e}`);for(let n=0;n<e.length;n++){const s=e[n],a=t[n];if(null==a)continue;const r=s.rank;if(null!=a.ndim&&r!==a.ndim)throw new Cp(`Input ${n} is incompatible with layer ${this.name}: expected ndim=${a.ndim}, found ndim=${r}`);if(null!=a.maxNDim&&r>a.maxNDim)throw new Cp(`Input ${n} is incompatible with layer ${this.name}: expected max_ndim=${a.maxNDim}, found ndim=${r}`);if(null!=a.minNDim&&r<a.minNDim)throw new Cp(`Input ${n} is incompatible with layer ${this.name}: expected min_ndim=${a.minNDim}, found ndim=${r}.`);if(null!=a.dtype&&s.dtype!==a.dtype)throw new Cp(`Input ${n} is incompatible with layer ${this.name} : expected dtype=${a.dtype}, found dtype=${s.dtype}.`);if(a.axes){const e=s.shape;for(const t in a.axes){const s=Number(t),r=a.axes[t],i=s>=0?e[s]:e[e.length+s];if(null!=r&&-1===[r,null].indexOf(i))throw new Cp(`Input ${n} is incompatible with layer ${this.name}: expected axis ${s} of input shape to have value ${r} but got shape ${e}.`)}}if(null!=a.shape)for(let e=0;e<a.shape.length;++e){const t=a.shape[e],r=s.shape[e];if(null!=t&&null!=r&&t!==r)throw new Cp(`Input ${n} is incompatible with layer ${this.name}: expected shape=${a.shape}, found shape=${s.shape}.`)}}}call(e,t){return e}invokeCallHook(e,t){null!=this._callHook&&this._callHook(e,t)}setCallHook(e){this._callHook=e}clearCallHook(){this._callHook=null}apply(e,t){t=t||{},this.assertNotDisposed();const n=Dp(e);let s=!0;for(const e of n)if(!(e instanceof vf)){s=!1;break}let a=!0;for(const e of n)if(e instanceof vf){a=!1;break}if(s===a)throw new Cp("Arguments to apply() must be all SymbolicTensors or all Tensors");return fd(this.name,(()=>{if(!this.built){this.assertInputCompatibility(e);const t=[];for(const n of Dp(e))t.push(n.shape);this.build(Fp(t)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),null===this._refCount&&a&&(this._refCount=1)}if(this.assertInputCompatibility(e),a){let s=this.call(e,t);const a=Dp(s),r=[];for(let e of a)-1!==n.indexOf(e)&&(e=e.clone()),r.push(e);if(s=Fp(r),null!=this.activityRegularizer)throw new Sp("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return s}{const n=function(e){e=Dp(e);const t=[];for(const n of e)t.push(n.shape);return Fp(t)}(e),s=this.computeOutputShape(n);let a;const r="float32";if(this.warnOnIncompatibleInputShape(Array.isArray(e)?n[0]:n),a=null!=s&&s.length>0&&Array.isArray(s[0])?s.map(((n,s)=>new vf(r,n,this,Dp(e),t,this.name,s))):new vf(r,s,this,Dp(e),t,this.name),this.addInboundNode(e,a,null,null,n,s,t),this._refCount++,null!=this.activityRegularizer)throw new Sp("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return a}}))}warnOnIncompatibleInputShape(e){if(null!=this.batchInputShape)if(e.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(e)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let t=!1;this.batchInputShape.forEach(((n,s)=>{null!=n&&null!=e[s]&&e[s]!==n&&(t=!0)})),t&&console.warn(`The shape of the input tensor (${JSON.stringify(e)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(null==this.inboundNodes||0===this.inboundNodes.length)throw new Ip(`The layer ${this.name} has never been called and thus has no defined output shape.`);const e=[];for(const t of this.inboundNodes){const n=JSON.stringify(t.outputShapes);-1===e.indexOf(n)&&e.push(n)}if(1===e.length){const e=this.inboundNodes[0].outputShapes;return Array.isArray(e)&&Array.isArray(e[0])&&1===e.length?e[0]:e}throw new Ip(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new $p(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return yf(this.weights)}build(e){this.built=!0}getWeights(e=!1){return xf(e?this.trainableWeights:this.weights)}setWeights(e){Cr((()=>{const t=this.weights;if(t.length!==e.length)throw new Cp(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${e.length}, but the layer was expecting ${t.length} weights. Provided weights: ${e}...`);if(0===t.length)return;const n=[],s=xf(t);for(let a=0;a<s.length;++a){const r=s[a],i=t[a],o=e[a];if(!_(r.shape,o.shape))throw new Cp(`Layer weight shape ${r.shape} not compatible with provided weight shape ${o.shape}`);n.push([i,o])}wf(n)}))}addWeight(e,t,n,s,a,r,i){if(-1!==this._addedWeightNames.indexOf(e))throw new Cp(`Duplicate weight name ${e} for layer ${this.name}`);this._addedWeightNames.push(e),null==n&&(n="float32"),this.fastWeightInitDuringBuild&&(s=lf("zeros"));const o=s.apply(t,n),l=new bf(o,n,e,r,i);return o.dispose(),null!=a&&this.addLoss((()=>a.apply(l.read()))),null==r&&(r=!0),r?this._trainableWeights.push(l):this._nonTrainableWeights.push(l),l}setFastWeightInitDuringBuild(e){this.fastWeightInitDuringBuild=e}addLoss(e){null==e||Array.isArray(e)&&0===e.length||(e=Dp(e),void 0!==this._losses&&null!==this._losses&&this.losses.push(...e))}computeOutputShape(e){return e}computeMask(e,t){if(!this.supportsMasking){if(null!=t){if(!Array.isArray(t))throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);t.forEach((e=>{if(null!=e)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)}))}return null}return t}addInboundNode(e,t,n,s,a,r,i=null){const o=Dp(e);t=Dp(t),n=Dp(n),s=Dp(s),a=ff(a),r=ff(r);const l=[],u=[],c=[];for(const e of o)l.push(e.sourceLayer),u.push(e.nodeIndex),c.push(e.tensorIndex);new If({outboundLayer:this,inboundLayers:l,nodeIndices:u,tensorIndices:c,inputTensors:o,outputTensors:t,inputMasks:n,outputMasks:s,inputShapes:a,outputShapes:r},i);for(let e=0;e<t.length;e++)t[e].sourceLayer=this,t[e].nodeIndex=this.inboundNodes.length-1,t[e].tensorIndex=e}getConfig(){const e={name:this.name,trainable:this.trainable};return null!=this.batchInputShape&&(e.batchInputShape=this.batchInputShape),null!=this.dtype&&(e.dtype=this.dtype),e}disposeWeights(){return this.weights.forEach((e=>e.dispose())),this.weights.length}assertNotDisposed(){if(0===this._refCount)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(null===this._refCount)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let e=0;return 0==--this._refCount&&(e=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:e}}}function Sf(e,t,n){if((null==t||null!=n&&n>0)&&(t=e.sourceLayer,n=e.nodeIndex),0===t.inboundNodes.length)return[e];{const e=t.inboundNodes[n];if(0===e.inboundLayers.length)return e.inputTensors;{const t=[];for(let n=0;n<e.inboundLayers.length;n++){const s=Sf(e.inputTensors[n],e.inboundLayers[n],e.nodeIndices[n]);for(const e of s)-1===t.indexOf(e)&&t.push(e)}return t}}}class Tf extends Cf{constructor(e){if(super({dtype:e.dtype,name:null!=e.name?e.name:pf("input").toString()}),null==e.batchSize&&(e.batchSize=null),null==e.sparse&&(e.sparse=!1),this.trainable=!1,this.built=!0,this.sparse=e.sparse,null!=e.inputShape&&null!=e.batchInputShape)throw new Cp("Only provide the inputShape OR batchInputShape argument to inputLayer, not both at the same time.");let t=e.batchInputShape;if(null==t){if(null==e.inputShape)throw new Cp("An InputLayer should be passed either a `batchInputShape` or an `inputShape`.");t=[e.batchSize].concat(e.inputShape)}else if(null!=e.batchSize)throw new Cp("Cannot specify batchSize if batchInputShape is specified when creating an InputLayer.");const n=e.dtype||"float32";this.batchInputShape=t,this.dtype=n,this.inputSpec=[{shape:t}];const s=new vf(this.dtype,this.batchInputShape,this,[],{},this.name);s.nodeIndex=0,s.tensorIndex=0,new If({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:[s],outputTensors:[s],inputMasks:[null],outputMasks:[null],inputShapes:[t],outputShapes:[t]})}apply(e,t){throw new Cp(`Cannot pass any input to an InputLayer's apply() method. InputLayer name: ${this.name}`)}dispose(){return{refCountAfterDispose:this._refCount,numDisposedVariables:0}}getConfig(){return{batchInputShape:this.batchInputShape,dtype:this.dtype,sparse:this.sparse,name:this.name}}}async function Ef(e){if(null==e)return;const t=[],n=[],s=[];for(const a in e){const r=e[a];if("number"!=typeof r){const e=r;t.push(e.data()),n.push(a),s.push(e)}}if(t.length>0){const a=await Promise.all(t);for(let t=0;t<a.length;++t)e[n[t]]=a[t][0];Sr(s)}}function Af(e){if(null!=e)for(const t in e){const n=e[t];"number"!=typeof n&&n.dispose()}}var Rf;Tf.className="InputLayer",Nr(Tf),function(e){e[e.SILENT=0]="SILENT",e[e.VERBOSE=1]="VERBOSE"}(Rf||(Rf={}));class Ff{constructor(){this.validationData=null}setParams(e){this.params=e}async onEpochBegin(e,t){}async onEpochEnd(e,t){}async onBatchBegin(e,t){}async onBatchEnd(e,t){}async onTrainBegin(e){}async onTrainEnd(e){}setModel(e){}}class Df{constructor(e,t=10){null==e&&(e=[]),this.callbacks=e,this.queueLength=t}append(e){this.callbacks.push(e)}setParams(e){for(const t of this.callbacks)t.setParams(e)}setModel(e){for(const t of this.callbacks)t.setModel(e)}async onEpochBegin(e,t){null==t&&(t={});for(const n of this.callbacks)await n.onEpochBegin(e,t)}async onEpochEnd(e,t){null==t&&(t={});for(const n of this.callbacks)await n.onEpochEnd(e,t)}async onBatchBegin(e,t){null==t&&(t={});for(const n of this.callbacks)await n.onBatchBegin(e,t)}async onBatchEnd(e,t){null==t&&(t={});for(const n of this.callbacks)await n.onBatchEnd(e,t)}async onTrainBegin(e){null==e&&(e={});for(const t of this.callbacks)await t.onTrainBegin(e)}async onTrainEnd(e){null==e&&(e={});for(const t of this.callbacks)await t.onTrainEnd(e)}}class _f extends Ff{constructor(){super()}async onEpochBegin(e){this.seen=0,this.totals={}}async onBatchEnd(e,t){null==t&&(t={});const n=null==t.size?0:t.size;this.seen+=n;for(const e in t){const s=t[e];if("number"==typeof s)this.totals.hasOwnProperty(e)||(this.totals[e]=0),this.totals[e]=this.totals[e]+s*n;else{let t;e in this.totals?t=this.totals[e]:this.totals[e]=0;const a=Cr((()=>Or(this.totals[e],Mi(s,n))));this.totals[e]=a,null!=t&&t.dispose()}}}async onEpochEnd(e,t){if(null!=t)for(const e of this.params.metrics)null!=this.totals[e]&&("number"==typeof this.totals[e]?t[e]=this.totals[e]/this.seen:Cr((()=>{const n=Mi(bi(1,this.seen),this.totals[e]);t[e]=n,this.totals[e].dispose(),Tr(t[e])})))}}class Of extends Ff{async onTrainBegin(e){this.epoch=[],this.history={}}async onEpochEnd(e,t){null==t&&(t={}),this.epoch.push(e);for(const e in t)null==this.history[e]&&(this.history[e]=[]),this.history[e].push(t[e])}async syncData(){const e=[],t=[],n=[];for(const s in this.history){const a=this.history[s];for(let r=0;r<a.length;++r)if("number"!=typeof a[r]){const i=a[r];e.push(i.data()),t.push(s),n.push(r)}}const s=await Promise.all(e);for(let e=0;e<s.length;++e)this.history[t[e]][n[e]].dispose(),this.history[t[e]][n[e]]=s[e][0]}}class Mf extends Ff{constructor(e,t){if(super(),this.currentEpoch=0,this.yieldEvery=t||"auto","auto"===this.yieldEvery&&(this.yieldEvery=125),"never"===this.yieldEvery&&null!=e.onYield)throw new Error("yieldEvery is `never` but you provided an `onYield` callback. Either change `yieldEvery` or remove the callback");q(this.yieldEvery)&&(this.maybeWait=function(e,t){let n,s=bs();return(...a)=>{const r=bs();return r-s<t||(s=r,n=e(...a)),n}}(this.maybeWait.bind(this),this.yieldEvery)),this.trainBegin=e.onTrainBegin,this.trainEnd=e.onTrainEnd,this.epochBegin=e.onEpochBegin,this.epochEnd=e.onEpochEnd,this.batchBegin=e.onBatchBegin,this.batchEnd=e.onBatchEnd,this.yield=e.onYield}async maybeWait(e,t,n){const s=[];null!=this.yield&&(await Ef(n),s.push(this.yield(e,t,n))),s.push(nu()),await Promise.all(s)}async onEpochBegin(e,t){this.currentEpoch=e,null!=this.epochBegin&&(await Ef(t),await this.epochBegin(e,t))}async onEpochEnd(e,t){const n=[];null!=this.epochEnd&&(await Ef(t),n.push(this.epochEnd(e,t))),"epoch"===this.yieldEvery&&n.push(nu()),await Promise.all(n)}async onBatchBegin(e,t){null!=this.batchBegin&&(await Ef(t),await this.batchBegin(e,t))}async onBatchEnd(e,t){const n=[];null!=this.batchEnd&&(await Ef(t),n.push(this.batchEnd(e,t))),"batch"===this.yieldEvery?n.push(nu()):q(this.yieldEvery)&&n.push(this.maybeWait(this.currentEpoch,e,t)),await Promise.all(n)}async onTrainBegin(e){null!=this.trainBegin&&(await Ef(e),await this.trainBegin(e))}async onTrainEnd(e){null!=this.trainEnd&&(await Ef(e),await this.trainEnd(e))}}function Lf(e,t){return null==e&&(e={}),e instanceof Ff?[e]:Array.isArray(e)&&e[0]instanceof Ff?e:Dp(e).map((e=>new Mf(e,t)))}class zf{constructor(){}static registerCallbackConstructor(e,t){E(e>=0&&Number.isInteger(e),(()=>`Verbosity level is expected to be an integer >= 0, but got ${e}`)),zf.checkForDuplicate(t),null==zf.constructors[e]&&(zf.constructors[e]=[]),zf.constructors[e].push(t)}static checkForDuplicate(e){for(const t in zf.constructors)zf.constructors[+t].forEach((t=>{if(t===e)throw new Cp("Duplicate callback constructor.")}))}static clear(){zf.constructors={}}static createCallbacks(e){const t=[];for(const n in zf.constructors){const s=+n;e>=s&&t.push(...zf.constructors[s])}return t.map((e=>new e))}}function Pf(e,t,n,s,a,r,i,o,l){const u=new Of,c=[new _f,...zf.createCallbacks(t)];null!=e&&c.push(...e),c.push(u);const h=new Df(c);return h.setParams({epochs:n,initialEpoch:s,samples:a,steps:r,batchSize:i,verbose:t,doValidation:o,metrics:l}),{callbackList:h,history:u}}function Bf(e,t={},n=!1){return Pp(e,vr.getMap().classNameMap,t,"layer",n)}function Wf(e,t){return Cr((()=>{"float32"!==e.dtype&&(e=e.asType("float32"));const n=zi(Od(e),t,!0),s=Si(n.shape,Np()),a=Ao(Ui(n,s));return bi(e,a)}))}function Vf(e,t){return Cr((()=>Gi(Od(Li(t,e)),-1)))}function Uf(e,t){return Cr((()=>Gi(_r(Li(t,e)),-1)))}function Gf(e,t){return Cr((()=>{const n=Li(e,t),s=ri(_r(e),Np(),Number.MAX_VALUE),a=_r(bi(n,s));return Mi(100,Gi(a,-1))}))}function Hf(e,t,n=!1){return Cr((()=>{if(n)t=So(t);else{const e=zi(t,t.shape.length-1,!0);t=bi(t,e)}return t=ri(t,Np(),1-Np()),so(zi(Mi(e.toFloat(),Di(t)),t.shape.length-1))}))}function jf(e,t,n=!1){return Cr((()=>{const s=Ti(function(e){const t=[xd(e.shape)];return e.reshape(t)}(e)).toInt(),a=(t=ri(t,Np(),1-Np())).shape;return Hf(ro(s,a[a.length-1]).reshape(a),t,n)}))}function Kf(e,t){return Cr((()=>{let n;return n=ri(t,Np(),1-Np()),n=Di(bi(n,Li(1,n))),Gi(function(e,t){if(!_(e.shape,t.shape))throw new Cp(`logits and labels must have the same shape, but got shapes ${JSON.stringify(e.shape)} and ${JSON.stringify(t.shape)}`);return Cr((()=>{const n=t.relu(),s=t.abs().neg();return n.sub(t.mul(e)).add(s.exp().log1p())}))}(e,n),-1)}))}function qf(e,t){return Cr((()=>{const n=Wf(e,-1),s=Wf(t,-1),a=Mi(n,s);return so(zi(a,-1))}))}zf.constructors={};const Xf={meanSquaredError:Vf,meanAbsoluteError:Uf,meanAbsolutePercentageError:Gf,meanSquaredLogarithmicError:function(e,t){return Cr((()=>{const n=ri(t,Np(),Number.MAX_VALUE),s=Di(Or(1,n)),a=ri(e,Np(),Number.MAX_VALUE),r=Di(Or(1,a));return Gi(Od(Li(s,r)),-1)}))},squaredHinge:function(e,t){return Cr((()=>{const n=Ui(0,Li(1,Mi(e,t)));return Gi(Od(n),-1)}))},hinge:function(e,t){return Cr((()=>{const n=Ui(0,Li(1,Mi(e,t)));return Gi(n,-1)}))},categoricalHinge:function(e,t){return Cr((()=>{const n=zi(Mi(e,t),-1),s=Oi(Mi(Li(1,e),t),-1);return Ui(0,Or(1,Li(s,n)))}))},logcosh:function(e,t){return Cr((()=>{const n=Math.log(2),s=Li(t,e),a=Li(Or(s,To(Mi(-2,s))),n);return Gi(a,-1)}))},categoricalCrossentropy:Hf,sparseCategoricalCrossentropy:jf,binaryCrossentropy:Kf,kullbackLeiblerDivergence:function(e,t){return Cr((()=>{const n=ri(e,Np(),1),s=ri(t,Np(),1);return zi(Mi(e,Di(bi(n,s))),-1)}))},poisson:function(e,t){return Cr((()=>{const n=Di(Or(Np(),t));return Gi(Li(t,Mi(e,n)),-1)}))},cosineProximity:qf};function Yf(e){if("string"==typeof e){if(e in Xf)return Xf[e];let t=`Unknown loss ${e}`;throw e.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${e}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new Cp(t)}return e}function Jf(e,t){return Cr((()=>{const n=Mi(.5,lo(t)),s=Id(Ai(t,n),e.dtype);return Gi(Ni(e,s),-1)}))}function Zf(e,t){return Cr((()=>Id(Ni(zr(e,-1),zr(t,-1)),"float32")))}function Qf(e,t){return Kf(e,t)}function em(e,t){return e.rank===t.rank&&(e=e.squeeze([e.rank-1])),(t=t.argMax(-1)).dtype!==e.dtype&&(t=t.asType(e.dtype)),Ni(e,t).asType("float32")}const tm=Hf,nm=jf,sm={binaryAccuracy:Jf,categoricalAccuracy:Zf,precision:function(e,t){return Cr((()=>{const n=function(e,t){return Cr((()=>Bi(e.equal(1),t.equal(1)).sum().cast("float32")))}(e,t),s=function(e,t){return Cr((()=>Bi(e.equal(0),t.equal(1)).sum().cast("float32")))}(e,t),a=n.add(s);return Po(Ai(a,0),n.div(a),0).cast("float32")}))},categoricalCrossentropy:tm,sparseCategoricalCrossentropy:nm,mse:Vf,MSE:Vf,mae:Uf,MAE:Uf,mape:Gf,MAPE:Gf,cosine:qf};function am(e){if("string"==typeof e&&e in sm)return sm[e];if("string"!=typeof e&&null!=e)return e;throw new Cp(`Unknown metric ${e}`)}function rm(e){if(Ap(null!==e,`Unknown LossOrMetricFn ${e}`),"string"==typeof e)return e;{let t;for(const n of Object.keys(Xf))if(Xf[n]===e){t=n;break}if(void 0!==t)return t;for(const n of Object.keys(sm))if(sm[n]===e){t=n;break}return void 0!==t?t:e.name}}function im(e,t,n=!1){if(null==e||"object"!=typeof e||Object.getPrototypeOf(e)!==Object.prototype||!om(e))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(n){const n=JSON.stringify(e);n.length>1048576&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${n.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= 1048576.`)}}function om(e){if(null===e)return!0;if("object"==typeof e){if(Object.getPrototypeOf(e)===Object.prototype){const t=Object.keys(e);for(const n of t){if("string"!=typeof n)return!1;if(!om(e[n]))return!1}return!0}if(Array.isArray(e)){for(const t of e)if(!om(t))return!1;return!0}return!1}{const t=typeof e;return"string"===t||"number"===t||"boolean"===t}}function lm(e,t,n=console.log){let s="";for(let n=0;n<e.length;++n)n>0&&(s=s.slice(0,s.length-1)+" "),s+=e[n],s=s.slice(0,t[n]),s+=" ".repeat(t[n]-s.length);n(s)}function um(e,t,n){let s;try{s=JSON.stringify(e.outputShape)}catch(e){s="multiple"}lm([`${e.name} (${e.getClassName()})`,s,e.countParams().toString()],t,n)}function cm(e,t,n,s){let a;try{a=JSON.stringify(e.outputShape)}catch(e){a="multiple"}const r=[];for(const t of e.inboundNodes)if(!(null!=n&&n.length>0&&-1===n.indexOf(t)))for(let e=0;e<t.inboundLayers.length;++e){const n=t.inboundLayers[e].name,s=t.nodeIndices[e],a=t.tensorIndices[e];r.push(`${n}[${s}][${a}]`)}const i=e.name,o=e.getClassName(),l=0===r.length?"":r[0];lm([`${i} (${o})`,a,e.countParams().toString(),l],t,s);for(let e=1;e<r.length;++e)lm(["","","",r[e]],t,s)}function hm(e,t,n){return("inboundNodes"===e||"outputLayers"===e||"inputLayers"===e)&&0===t&&"string"==typeof n}function pm(e,t){if(null===e)return null;if("string"==typeof e)return Op(e);if("number"==typeof e||"boolean"==typeof e)return e;if(e instanceof Array){const n=[],s=e.length;for(let a=0;a<s;++a){const s=e[a];hm(t,a,s)?n.push(s):n.push(pm(s,t))}return n}{const t={};for(const n of Object.keys(e)){const s=e[n];if("name"===n&&"string"==typeof s)t[n]=s;else{const e=Op(n);t[e]=pm(s,e)}}return t}}function dm(e,t){if(null==e)return null;if("string"==typeof e)return _p(e);if("number"==typeof e||"boolean"==typeof e)return e;if(e instanceof Array){const n=[],s=e.length;for(let a=0;a<s;++a){const s=e[a];hm(t,a,s)?n.push(s):n.push(dm(s,t))}return n}{const t={};for(const n of Object.keys(e)){const s=e[n];t[_p(n)]="name"!==n&&"className"!==n||"string"!=typeof s?dm(s,n):s}return t}}class fm{constructor(e){if(this.id2Value={},this.id2Mask={},this.name2Id={},e instanceof fm)for(const t in e.id2Value)this.id2Value[t]=e.id2Value[t],t in e.id2Mask&&(this.id2Mask[t]=e.id2Mask[t]);else{if(null==e)return;for(const t of e)this.add(t.key,t.value)}}add(e,t,n){if(null!=this.id2Value[e.id])throw new Cp(`Duplicate key: name=${e.name}, id=${e.id}`);return this.id2Value[e.id]=function(e,t){if(null==e.dtype||e.dtype===t.dtype)return t;try{return ja(t,e.dtype)}catch(n){throw new Cp(`The dtype of the feed (${t.dtype}) can not be cast to the dtype of the key '${e.name}' (${e.dtype}).`)}}(e,t),this.name2Id[e.name]=e.id,null!=n&&(this.id2Mask[e.id]=n),this}addFeed(e){this.add(e.key,e.value)}hasKey(e){return null!=this.id2Value[e.id]}names(){return Object.keys(this.name2Id)}getValue(e){if(e instanceof vf){if(null==this.id2Value[e.id])throw new Cp(`Nonexistent key: ${e.name}`);return this.id2Value[e.id]}{const t=this.name2Id[e];if(null==t)throw new Cp(`Feed dict has no SymbolicTensor name: ${e}`);return this.id2Value[t]}}getMask(e){if(e instanceof vf){if(null==this.id2Value[e.id])throw new Cp(`Nonexistent key: ${e.name}`);return this.id2Mask[e.id]}{const t=this.name2Id[e];if(null==t)throw new Cp(`Feed dict has no SymbolicTensor name: ${e}`);return this.id2Mask[t]}}disposeMasks(){null!=this.id2Mask&&Sr(this.id2Mask)}}const mm={},gm={};function ym(e,t,n,s){const a=null!=n&&n.training,r=Array.isArray(e),i=r?e:[e],o=i.map((e=>e.name)),l=[],u=t.names();for(const e of o)-1!==u.indexOf(e)?l.push(t.getValue(e)):l.push(null);null!=s&&(s.maxNumTensors=-1/0,s.minNumTensors=1/0);const c=o.join(",")+"|"+t.names().join(",");let h,p;if(null==mm[c]){const e=function(e,t){E(null!=e&&e.length>0,(()=>"Expected at least one fetch, got none"));let n=[],s={};if(1===e.length){const a=xm(e[0],t);n=a.sorted,s=a.recipientMap}else{const a=new Set;for(const r of e){const{sorted:e,recipientMap:i}=xm(r,t);for(const t of e)a.has(t.name)||(n.push(t),a.add(t.name));for(const e in i)null==s[e]&&(s[e]=new Set),i[e].forEach((t=>s[e].add(t)))}}return{sorted:n,recipientCounts:bm(s)}}(i,t);h=e.sorted,p=e.recipientCounts,mm[c]=h,gm[c]=p}h=mm[c],p={},a||Object.assign(p,gm[c]);const d=new fm(t);for(let e=0;e<h.length;++e){if(null!=s){const e=$r().numTensors;e>s.maxNumTensors&&(s.maxNumTensors=e),e<s.minNumTensors&&(s.minNumTensors=e)}const r=h[e],i=r.sourceLayer;if(i instanceof Tf)continue;const u=[],c=[],f=[];let m=!1;for(const e of r.inputs){const n=d.getValue(e),s=d.getMask(e);u.push(n),c.push(s),null!=s&&(m=!0),a||(p[e.name]--,0!==p[e.name]||t.hasKey(e)||-1!==o.indexOf(e.name)||n.isDisposed||!0===e.sourceLayer.stateful||f.push(n))}m&&((n=n||{}).mask=c[0]);const g=Dp(i.apply(u,n));let y=null;i.supportsMasking&&(y=i.computeMask(u,c));const b=wm(r),x=Array.isArray(b)?b:[b];for(let e=0;e<x.length;++e){d.hasKey(x[e])||d.add(x[e],g[e],Array.isArray(y)?y[0]:y);const t=o.indexOf(x[e].name);-1!==t&&(l[t]=g[e])}a||Sr(f)}return d.disposeMasks(),r?l:l[0]}function bm(e){const t={};for(const n in e)t[n]=e[n].size;return t}function xm(e,t){const n=new Set,s=[],a={};for(const e of t.names())n.add(e);const r=[],i=[];for(r.push(e);r.length>0;){const e=r[r.length-1];if(n.has(e.name)){r.pop();continue}const t=i[i.length-1]===r.length-1;if(0===e.inputs.length||t)r.pop(),s.push(e),n.add(e.name),t&&i.pop();else{i.push(r.length-1);for(const t of e.inputs)null==a[t.name]&&(a[t.name]=new Set),a[t.name].add(e.name),n.has(t.name)||r.push(t)}}return{sorted:s,recipientMap:a}}function wm(e){let t;if(1===e.sourceLayer.inboundNodes.length)t=e.sourceLayer.output;else{let n=null;for(let t=0;t<e.sourceLayer.inboundNodes.length;++t)for(const s of e.sourceLayer.inboundNodes[t].outputTensors)if(s.id===e.id){n=t;break}t=e.sourceLayer.getOutputAt(n)}return t}class km extends Cf{constructor(e){if(super({}),this.containerNodes=new Set,this.name=e.name,null==this.name){const e=this.getClassName().toLowerCase();this.name=pf(e)}if(this.supportsMasking=!1,this.trainable_=!0,Array.isArray(e.inputs)?this.inputs=e.inputs.slice():this.inputs=[e.inputs],Array.isArray(e.outputs)?this.outputs=e.outputs.slice():this.outputs=[e.outputs],Wp(this.inputs).length!==this.inputs.length)throw new Cp(`The list of inputs passed to the model is redundant. All inputs should only appear once. Found: ${this.inputs.map((e=>e.name))}`);Wp(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map((e=>e.name))}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const e of this.outputs){const t=e.sourceLayer,n=e.nodeIndex,s=e.tensorIndex;this.outputLayers.push(t),this.outputLayersNodeIndices.push(n),this.outputLayersTensorIndices.push(s)}for(const e of this.inputs){const t=e.sourceLayer,n=e.nodeIndex,s=e.tensorIndex;Ap(0===n,"input layer has >1 nodes"),Ap(0===s,"input layer has >1 tensors"),this.inputLayers.push(t),this.inputLayersNodeIndices.push(n),this.inputLayersTensorIndices.push(s)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let t=0;t<this.inputLayers.length;t++){const n=this.inputLayers[t];if(!(n instanceof Tf))throw new TypeError(`Input layers to a LayersModel must be InputLayer objects. Received inputs: ${e.inputs}. Input ${t} (0-based) originates from layer type ${n.getClassName()}.`);this.inputNames.push(n.name),this.feedInputShapes.push(n.batchInputShape),this.feedInputNames.push(n.name)}for(const e of this.outputLayers)this.outputNames.push(e.name);this.internalInputShapes=this.inputs.map((e=>e.shape)),this.internalOutputShapes=this.outputs.map((e=>e.shape));const t={},n={},s={},a={},r={},i=[],o=(e,t,n,s,a,l)=>{null!=s&&null!=a&&null!=l||(s=e.sourceLayer,a=e.nodeIndex,l=e.tensorIndex);const u=s.inboundNodes[a];if(-1!==n.indexOf(u))throw new $p(`The tensor ${e.name} at layer "${s.name}" is part of a cycle.`);if(-1!==t.indexOf(u))return;this.containerNodes.add(km.nodeKey(s,a)),s.id in r||(r[s.id]=Object.keys(r).length),-1===n.indexOf(u)&&n.push(u);const c=u.inboundLayers.length;for(let e=0;e<c;e++){const s=u.inputTensors[e],a=u.inboundLayers[e],r=u.nodeIndices[e],i=u.tensorIndices[e];o(s,t,n,a,r,i)}for(t.push(u);n.indexOf(u)>=0;)n.splice(n.indexOf(u),1);i.push(u)},l=[],u=[];for(const e of this.outputs)o(e,l,u);const c=i.slice().reverse();for(const e of c){n[e.id]=e,e.id in t||(t[e.id]=0);let r=t[e.id];const i=null==s[e.outboundLayer.id]?0:s[e.outboundLayer.id];r=Math.max(r,i),s[e.outboundLayer.id]=r,a[e.outboundLayer.id]=e.outboundLayer,t[e.id]=r;for(let s=0;s<e.inboundLayers.length;s++){const a=e.inboundLayers[s],i=e.nodeIndices[s],o=a.inboundNodes[i],l=null==t[o.id]?0:t[o.id];t[o.id]=Math.max(r+1,l),n[o.id]=o}}const h={};for(const e in t){const s=t[e];s in h||(h[s]=[]),h[s].push(n[e])}const p={};for(const e in s){const t=s[e];t in p||(p[t]=[]),p[t].push(a[e])}let d=Object.keys(p).map((e=>parseInt(e,10))).sort(Bp);this.layers=[];for(const e of d){const t=p[e];t.sort(((e,t)=>{const n=r[e.id],s=r[t.id];return n<s?-1:n>s?1:0}));for(const e of t)e instanceof km&&this.internalContainerRefs.push(e),this.layers.push(e)}this.layersByDepth=p,d=Object.keys(h).map((e=>parseInt(e,10))).sort(Bp);const f=this.inputs.slice(),m=[];for(const e of d)for(const t of h[e]){const e=t.outboundLayer;if(null!=e){for(const n of t.inputTensors)if(-1===f.indexOf(n))throw new $p(`Graph disconnected: cannot obtain value for tensor ${n} at layer "${e.name}". The following previous layers were accessed without issue: ${m}`);for(const e of t.outputTensors)f.push(e);m.push(e.name)}}this.nodesByDepth=h;const g=this.layers.map((e=>e.name));for(const e of g){const t=g.filter((t=>t===e)).length;if(1!==t)throw new $p(`The name "${e}" is used ${t} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(g))}this.outboundNodes=[],this.inboundNodes=[],new If({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map((e=>null)),outputMasks:this.outputs.map((e=>null)),inputShapes:this.inputs.map((e=>e.shape)),outputShapes:this.outputs.map((e=>e.shape))}),this.built=!0,this._refCount=1}assertNotDisposed(){if(0===this._refCount)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const e={refCountAfterDispose:null,numDisposedVariables:0};if(0==--this._refCount){for(const t of this.layers)e.numDisposedVariables+=t.dispose().numDisposedVariables;for(const t of this.internalContainerRefs)e.numDisposedVariables+=t.dispose().numDisposedVariables}return e.refCountAfterDispose=this._refCount,e}get trainable(){return this.trainable_}set trainable(e){this.layers.forEach((t=>{t._trainableWeights.forEach((t=>t.trainable=e))})),this.trainable_=e}get trainableWeights(){if(this._trainableWeights.length>0)throw new Cp("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let e=[];for(const t of this.layers)e=e.concat(t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.layers)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const e of this.layers)t.push(...e.trainableWeights);return t.concat(e)}return e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(e,t=!0){const n={};let s=0;for(const e of this.layers)for(const t of e.weights){if(null!=n[t.originalName])throw new Cp(`Duplicate weight name: ${t.originalName}`);n[t.originalName]=t,s++}const a=[];for(const s in e){let r=s;if(null==n[s]){const e=s.split("/");r=e.slice(0,-2).concat([e[e.length-1]]).join("/")}if(null!=n[r])a.push([n[r],e[s]]);else if(t)throw new Cp(`Provided weight data has no target variable: ${s}`);delete n[r]}if(t){const e=[];for(const t in n)e.push(t);if(e.length>0)throw new Cp(`${e.length} of ${s} weights are not set: ${e}`)}wf(a)}updatedConfig(){const e=this.getConfig(),t={};return t.className=this.getClassName(),t.config=e,t.kerasVersion="tfjs-layers 3.3.0",t.backend="TensorFlow.js",t}toJSON(e,t=!0){const n=dm(this.updatedConfig());return t?JSON.stringify(n):n}call(e,t){return Cr((()=>{e=Dp(e);const n=new fm;for(let t=0;t<this.inputs.length;++t)n.add(this.inputs[t],e[t]);return ym(this.outputs,n,t)}))}computeMask(e,t){return Cr((()=>{let n;return e=Dp(e),n=null==t?Ep(null,e.length):Dp(t),this.runInternalGraph(e,n)[1]}))}computeOutputShape(e){const t=ff(e);if(t.length!==this.inputLayers.length)throw new Cp(`Invalid inputShape argument ${e}: model has ${this.inputLayers.length} tensor inputs.`);const n={};for(let e=0;e<t.length;e++){const s=this.inputLayers[e],a=t[e];n[s.name+"_0_0"]=a}const s=Object.keys(this.nodesByDepth).map((e=>parseInt(e,10))).sort(Bp);if(s.length>1)for(const e of s){const t=this.nodesByDepth[e];for(const e of t){const t=e.outboundLayer;if(-1!==this.inputLayers.map((e=>e.id)).indexOf(t.id))continue;const s=[];for(let t=0;t<e.inboundLayers.length;t++){const a=e.inboundLayers[t],r=e.nodeIndices[t],i=e.tensorIndices[t],o=n[`${a.name}_${r}_${i}`];s.push(o)}const a=ff(t.computeOutputShape(Fp(s))),r=t.inboundNodes.indexOf(e);for(let e=0;e<a.length;e++)n[`${t.name}_${r}_${e}`]=a[e]}}const a=[],r=[];for(let e=0;e<this.outputLayers.length;e++){const t=this.outputLayers[e],n=this.outputLayersNodeIndices[e],s=this.outputLayersTensorIndices[e],a=`${t.name}_${n}_${s}`;r.push(a)}for(let e=0;e<r.length;e++){const t=r[e];Ap(t in n),a.push(n[t])}return Fp(a)}runInternalGraph(e,t){null==t&&(t=Ep(null,e.length));const n={};for(let s=0;s<this.inputs.length;++s){const a=this.inputs[s],r=e[s],i=t[s];n[a.id]=[r,i]}const s=Object.keys(this.nodesByDepth).map((e=>parseInt(e,10))).sort(Bp);for(const e of s){const t=this.nodesByDepth[e];for(const e of t){const t=e.outboundLayer,s=e.inputTensors,a=e.outputTensors,r=new Array;for(const e of s)e.id in n&&r.push(n[e.id]);if(r.length===s.length){let s,i,o,l,u={};if(null!=e.callArgs&&(u=e.callArgs),1===r.length){const[e,n]=r[0];null==u.mask&&(u.mask=n),o=Dp(t.call(e,u)),l=Dp(t.computeMask(e,n)),s=[e],i=[n]}else s=r.map((e=>e[0])),i=r.map((e=>e[1])),null==u.mask&&(u.mask=i),o=Dp(t.call(s,u)),l=Dp(t.computeMask(s,i));if(t.activityRegularizer)throw new Sp("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let e=0;e<a.length;++e){const t=a[e],s=o[e],r=l[e];n[t.id]=[s,r]}}}}const a=[],r=[],i=[];for(const e of this.outputs){Ap(e.id in n,`Could not compute output ${e.name} : ${e.id}`);const[t,s]=n[e.id];i.push(t.shape),a.push(t),r.push(s)}return[a,r,i]}buildNodeConversionMap(e){const t={};let n;for(const e of this.layers){n=e instanceof km?1:0;for(let s=0;s<e.inboundNodes.length;s++){const a=km.nodeKey(e,s);this.containerNodes.has(a)&&(t[a]=n,n+=1)}}return t}getLayer(e,t){if(null!=t){if(this.layers.length<=t)throw new Cp(`Was asked to retrieve layer at index ${t}, but model only has ${this.layers.length} layer(s).`);return this.layers[t]}if(null==e)throw new Cp("Provide either a layer name or layer index");for(const t of this.layers)if(t.name===e)return t;throw new Cp(`No such layer: ${e}`)}calculateLosses(){return Cr((()=>{const e=[];for(const t of this.layers)for(let n=0;n<t.inboundNodes.length;++n){const s=km.nodeKey(t,n);this.containerNodes.has(s)&&e.push(...t.calculateLosses())}return e}))}getConfig(){const e={name:this.name},t=this.buildNodeConversionMap(this.layers),n=[];for(const e of this.layers){const s=e.getClassName(),a=e.getConfig(),r=[];for(let n=0;n<e.inboundNodes.length;n++){const s=e.inboundNodes[n],a=km.nodeKey(e,n);let i={};if(this.containerNodes.has(a)){if(s.callArgs)try{JSON.stringify(s.callArgs),i=s.callArgs}catch(t){console.warn(`Layer ${e.name} was passed non-serializable keyword arguments: ${s.callArgs}. They will not be included in the serialized model (and thus will be missing at deserialization time).`),i={}}if(s.inboundLayers.length>0){const e=[];for(let n=0;n<s.inboundLayers.length;n++){const a=s.inboundLayers[n],r=s.nodeIndices[n],o=s.tensorIndices[n];let l=t[km.nodeKey(a,r)];null==l&&(l=0),e.push([a.name,l,o,i])}r.push(e)}}}const i={};i.name=e.name,i.className=s,i.config=a,i.inboundNodes=r,n.push(i)}e.layers=n;const s=[];for(let e=0;e<this.inputLayers.length;e++){const n=this.inputLayers[e],a=this.inputLayersNodeIndices[e],r=km.nodeKey(n,a);if(!this.containerNodes.has(r))continue;let i=t[r];null==i&&(i=0);const o=this.inputLayersTensorIndices[e];s.push([n.name,i,o])}e.inputLayers=s;const a=[];for(let e=0;e<this.outputLayers.length;e++){const n=this.outputLayers[e],s=this.outputLayersNodeIndices[e],r=km.nodeKey(n,s);if(!this.containerNodes.has(r))continue;let i=t[r];null==i&&(i=0);const o=this.outputLayersTensorIndices[e];a.push([n.name,i,o])}return e.outputLayers=a,e}static fromConfig(e,t,n={},s=!1){const a={},r={};function i(e,t){e.name in r?r[e.name].push(t):r[e.name]=[t]}function o(e,t){const n=[];let s;for(const r of t){const o=r[0],l=r[1],u=r[2];if(s=null==r[3]?{}:r[3],!(o in a))return void i(e,t);const c=a[o];if(c.inboundNodes.length<=l)return void i(e,t);const h=c.inboundNodes[l];n.push(h.outputTensors[u])}n.length>0&&e.apply(Fp(n),s)}function l(e){const n=e.name,r=Bf(e,null!=t.customObjects?t.customObjects:{});r.setFastWeightInitDuringBuild(s),a[n]=r,e.inboundNodes.forEach((e=>{if(!(e instanceof Array))throw new Cp(`Corrupted configuration, expected array for nodeData: ${e}`);i(r,e)}))}const u=t.name,c=t.layers;for(const e of c)l(e);for(;!Vp(r);)for(const e of c){const t=a[e.name];if(t.name in r){const e=r[t.name];delete r[t.name];for(const n of e)o(t,n)}}const h=[],p=[],d=t.inputLayers;for(const e of d){const t=e[0],n=e[1],s=e[2];Ap(t in a);const r=a[t].inboundNodes[n].outputTensors;h.push(r[s])}const f=t.outputLayers;for(const e of f){const t=e[0],n=e[1],s=e[2];Ap(t in a);const r=a[t].inboundNodes[n].outputTensors;p.push(r[s])}return new e({inputs:h,outputs:p,name:u})}get stateful(){if(this._stateful)throw new Cp("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const e of this.layers)if(e.stateful)return!0;return!1}resetStates(){Cr((()=>{this.layers.forEach((e=>{e.stateful&&e.resetStates()}))}))}}function vm(e,t){return function(e,t,n){const s=t.length;if(null==e||Array.isArray(e)&&0===e.length)return t.map((e=>null));if(1===s)return Array.isArray(e)&&1===e.length?e:"object"==typeof e&&t[0]in e?[e[t[0]]]:[e];if(Array.isArray(e)){if(e.length!==s)throw new Error(`Provided ${n} is an array of ${e.length} element(s), but the model has ${s} outputs. Make sure a set of weights is provided for each model output.`);return e}if("object"==typeof e&&Object.keys(e).length>0&&"object"==typeof e[Object.keys(e)[0]]){const n=[];return t.forEach((t=>{t in e?n.push(e[t]):n.push(null)})),n}throw new Error(`The model has multiple (${s}) outputs, so ${n} must be either an array with ${s} elements or an object with ${t} keys. Provided ${n} not understood: ${JSON.stringify(e)}`)}(e,t,"classWeight")}async function Nm(e,t,n,s){if(null!=t||null!=s)throw new Error("Support sampleWeight is not implemented yet");if(null!=n){const t=Cr((()=>{if(1===e.shape.length)return e.clone();if(2===e.shape.length){if(e.shape[1]>1){const t=1;return e.argMax(t)}if(1===e.shape[1])return e.reshape([e.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${e.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}throw new Error(`Unexpected rank of target (y) tensor (${e.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)})),s=Array.from(await t.data());Sr(t);const a=[];return s.forEach((e=>{if(null==n[e])throw new Error(`classWeight must contain all classes in the training data. The class ${e} exists in the data but not in classWeight`);a.push(n[e])})),_o(a,"float32")}return null}function Im(e,t){return Mi(e,t)}function $m(e,t){let n,s;const a=t;n=a.xs,s=a.ys,E(null!=n&&null!=s,(()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`));const r=Cm("input",e.inputNames,n),i=Cm("output",e.outputNames,s),o=r[0].shape[0];E(r.length===e.inputs.length,(()=>`LayersModel has ${e.inputs.length} inputs, but the dataset provides ${r.length} inputs. (Expected input keys: ${JSON.stringify(e.inputNames)})`)),E(i.length===e.outputs.length,(()=>`LayersModel has ${e.outputs.length} outputs, but the dataset provides ${i.length} outputs. (Expected output keys: ${JSON.stringify(e.outputNames)})`));for(let t=0;t<r.length;t++)E(r[t].shape[0]===o,(()=>`Batch size mismatch: input ${e.inputNames[t]} has ${r[t].shape[0]}; expected ${o} based on input ${e.inputNames[0]}.`));for(let t=0;t<i.length;t++)E(i[t].shape[0]===o,(()=>`Batch size mismatch: output ${e.outputNames[t]} has ${i[t].shape[0]}; expected ${o} based on input ${e.inputNames[0]}.`));return{xs:r,ys:i}}function Cm(e,t,n){if(n instanceof Ds)return[n];if(Array.isArray(n))return E(n.length===t.length,(()=>`Received an array of ${n.length} Tensors, but expected ${t.length} to match the ${e} keys ${t}.`)),n;{const s=[];for(const a of t){if(null==n[a])throw new Cp(`The feature data generated by the dataset lacks the required ${e} key '${a}'.`);s.push(n[a])}return s}}function Sm(e){return"function"==typeof e.iterator}function Tm(e){E(e>0&&Number.isInteger(e),(()=>`batchSize is required to be a positive integer, but got ${e}`))}function Em(e,t,n){return null==e?[null]:Array.isArray(e)?e.map((e=>Cd(e,t,n-t))):Cd(e,t,n-t)}function Am(e,t){return Cr((()=>null==e?null:Array.isArray(e)?e.map((e=>Am(e,t))):_d(e,"int32"===t.dtype?t:t.toInt())))}function Rm(e,t){const n=[];let s=0,a=null;for(;s<e;)a=s+t,a>=e&&(a=e),n.push([s,a]),s=a;return n}function Fm(e){const t=[];e instanceof Ds&&(e=[e]);for(let n=0;n<e.length;++n){const s=e[n];if(1===s.rank)t.push($d(s,1));else{if(0===s.rank)throw new Error("Expected tensor to be at least 1D, but received a 0D tensor (scalar).");t.push(s)}}return t}function Dm(e,t){if(null==e)return;const n=[];if(t instanceof Ds)n.push(t.id);else if(Array.isArray(t))t.forEach((e=>n.push(e.id)));else if(null!=t)for(const e in t){const s=t[e];n.push(s.id)}const s=[];if(e instanceof Ds)-1===n.indexOf(e.id)&&s.push(e);else if(Array.isArray(e))e.forEach((e=>{-1===n.indexOf(e.id)&&s.push(e)}));else if(null!=e)for(const t in e){const a=e[t];-1===n.indexOf(a.id)&&s.push(a)}s.forEach((e=>{e.isDisposed||e.dispose()}))}function _m(e){return Array.isArray(e)}function Om(e){return!function(e){return e instanceof Ds}(e)&&!_m(e)}function Mm(e,t,n,s=!0,a=""){if(null==t||0===t.length){if(null!=e){let t=!1;if(_m(e)&&e.length>0)t=!0;else if(Om(e)){for(const n in e)if(e.hasOwnProperty(n)){t=!0;break}}else t=!0;if(t)throw new Cp(`Error when checking model ${a} expected no data, but got ${e}`)}return[]}if(null==e)return t.map((e=>null));let r;if(Om(e)){e=e,r=[];for(const n of t){if(null==e[n])throw new Cp(`No data provided for "${n}". Need data for each key in: ${t}`);r.push(e[n])}}else if(_m(e)){if((e=e).length!==t.length)throw new Cp(`Error when checking model ${a}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${e}`);r=e}else{if(e=e,t.length>1)throw new Cp(`The model ${a} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${e.shape}`);r=[e]}if(r=Fm(r),null!=n)for(let e=0;e<t.length;++e){if(null==n[e])continue;const i=r[e];if(i.shape.length!==n[e].length)throw new Cp(`Error when checking ${a}: expected ${t[e]} to have ${n[e].length} dimension(s). but got array with shape ${i.shape}`);for(let r=0;r<n[e].length;++r){if(0===r&&!s)continue;const o=i.shape[r],l=n[e][r];if(null!=l&&l>=0&&o!==l)throw new Cp(`Error when checking ${a}: expected ${t[e]} to have shape [${n[e]}], but got array with shape [${i.shape}].`)}}return r}function Lm(e,t,n,s=!0,a=""){let r;if(Array.isArray(e)){if(e.length!==t.length)throw new Cp(`Error when checking model ${a}: the Array of Tensors that you are passing to your model is not the size the the model expected. Expected to see ${t.length} Tensor(s), but instead got ${e.length} Tensors(s).`);r=e}else{if(t.length>1)throw new Cp(`The model expects ${t.length} ${a} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(e.shape)}.`);r=[e]}if(null!=n)for(let e=0;e<t.length;++e){if(null==n[e])continue;const i=r[e];if(i.shape.length!==n[e].length)throw new Cp(`Error when checking ${a}: expected ${t[e]} to have ${n[e].length} dimension(s), but got array with shape ${JSON.stringify(i.shape)}`);for(let r=0;r<n[e].length;++r){if(0===r&&!s)continue;const o=i.shape[r],l=n[e][r];if(null!=l&&l!==o)throw new Cp(`Error when checking ${a}: expected ${t[e]} to have shape ${JSON.stringify(n[e])} but got array with shape ${JSON.stringify(i.shape)}.`)}}}class zm extends km{constructor(e){super(e),this.isTraining=!1}summary(e,t,n=console.log){if(!this.built)throw new Cp("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");!function(e,t,n,s=console.log){const a=function(e){let t=!0;const n=[],s=[];for(const t in e.nodesByDepth)n.push(e.nodesByDepth[t]);for(const e of n){if(e.length>1||1===e.length&&e[0].inboundLayers.length>1){t=!1;break}s.push(...e)}if(t)for(const n of e.layers){let e=!1;for(const a of n.inboundNodes)if(-1!==s.indexOf(a)){if(e){t=!1;break}e=!0}if(!t)break}return t}(e),r=["Layer (type)","Output shape","Param #"];let i;if(a?(t=t||65,n=n||[.45,.85,1]):(t=t||98,n=n||[.33,.55,.67,1]),n[n.length-1]<=1&&(n=n.map((e=>Math.floor(t*e)))),!a){r.push("Receives inputs"),i=[];for(const t in e.nodesByDepth)i.push(...e.nodesByDepth[t])}s("_".repeat(t)),lm(r,n,s),s("=".repeat(t));const o=e.layers;for(let e=0;e<o.length;++e)a?um(o[e],n,s):cm(o[e],n,i,s),s((e===o.length-1?"=":"_").repeat(t));e.checkTrainableWeightsConsistency();const l=function(e){let t;return t=null!=e.collectedTrainableWeights?yf(e.collectedTrainableWeights):yf(e.trainableWeights),t}(e),u=yf(e.nonTrainableWeights);s(`Total params: ${l+u}`),s(`Trainable params: ${l}`),s(`Non-trainable params: ${u}`),s("_".repeat(t))}(this,e,t,n)}compile(e){if(null==e.loss&&(e.loss=[]),this.loss=e.loss,"string"==typeof e.optimizer)this.optimizer_=function(e){const t={Adagrad:()=>eu.adagrad(.01),Adadelta:()=>eu.adadelta(1,.95,Np()),Adam:()=>eu.adam(.001,.9,.999,Np()),Adamax:()=>eu.adamax(.002,.9,.999,Np(),0),RMSProp:()=>eu.rmsprop(.001,.9,0,Np()),SGD:()=>eu.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,e in t)return t[e]();throw new Cp(`Unknown Optimizer ${e}`)}(e.optimizer),this.isOptimizerOwned=!0;else{if(!(e.optimizer instanceof Dr))throw new Cp("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=e.optimizer,this.isOptimizerOwned=!1}let t=[];if(Array.isArray(e.loss)||"string"==typeof e.loss||"function"==typeof e.loss)if(Array.isArray(e.loss)){if(e.loss.length!==this.outputs.length)throw new Cp(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${e.loss}.`);const n=e.loss;t=n.map((e=>Yf(e)))}else{const n=Yf(e.loss);this.outputs.forEach((e=>{t.push(n)}))}else{e.loss=e.loss;for(const t in e.loss)if(-1===this.outputNames.indexOf(t))throw new Cp(`Unknown entry in loss dictionary: "${t}". Only expected the following keys: ${this.outputNames}`);for(const n of this.outputNames)null==e.loss[n]&&console.warn(`Output "${n}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${n} during training`),t.push(Yf(e.loss[n]))}this.lossFunctions=t,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let e=0;e<this.outputs.length;++e){const t=this.internalOutputShapes[e],n=this.outputNames[e];this.feedOutputNames.push(n),this.feedOutputShapes.push(t),this.feedLossFns.push(this.lossFunctions[e])}const n=[];this.metrics=e.metrics,this.metricsNames=["loss"],this.metricsTensors=[],fd("loss",(()=>{for(let e=0;e<this.outputs.length;++e){if(-1!==n.indexOf(e))continue;const t=this.lossFunctions[e];this.outputs.length>1&&(this.metricsTensors.push([t,e]),this.metricsNames.push(this.outputNames[e]+"_loss"))}}));const s=function(e,t){if(null==e||Array.isArray(e)&&0===e.length)return t.map((e=>[]));let n;if("string"==typeof e||"function"==typeof e)n=[e];else{if(!Array.isArray(e)&&"object"!=typeof e)throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${e}`);n=e}if(Array.isArray(n))return t.map((e=>n));{const e=[];for(const s of t){let t=n.hasOwnProperty(s)?n[s]:[];Array.isArray(t)||(t=[t]),e.push(t)}return e}}(e.metrics,this.outputNames),a=(e,t,n)=>{this.outputNames.length>1&&(t=this.outputNames[e]+"_"+t),this.metricsNames.push(t),this.metricsTensors.push([n,e])};fd("metric",(()=>{for(let e=0;e<this.outputs.length;++e)-1===n.indexOf(e)&&(t=>{let n,s,r;for(const i of t){if("string"==typeof i&&-1!==["accuracy","acc","crossentropy","ce"].indexOf(i)){const t=this.internalOutputShapes[e];let a;1===t[t.length-1]||this.lossFunctions[e]===Kf?-1!==["accuracy","acc"].indexOf(i)?s=Jf:-1!==["crossentropy","ce"].indexOf(i)&&(s=Qf):this.lossFunctions[e]===jf?-1!==["accuracy","acc"].indexOf(i)?s=em:-1!==["crossentropy","ce"].indexOf(i)&&(s=nm):-1!==["accuracy","acc"].indexOf(i)?s=Zf:-1!==["crossentropy","ce"].indexOf(i)&&(s=tm),-1!==["accuracy","acc"].indexOf(i)?a="acc":-1!==["crossentropy","ce"].indexOf(i)&&(a="ce"),r=s,n=""+a}else{const e=am(i);r=e,n=""+rm(i)}let t;fd(n,(()=>{t=r})),a(e,n,t)}})(s[e])})),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){null!=this.collectedTrainableWeights&&this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(e,t,n={}){const s=null==n.batchSize?32:n.batchSize;Tm(s);const a=this.standardizeUserDataXY(e,t,!0,s);try{const r=a[0].concat(a[1]);this.makeTestFunction();const i=this.testFunction;return Fp(this.testLoop(i,r,s,n.verbose,n.steps))}finally{Dm(a[0],e),Dm(a[1],t)}}async evaluateDataset(e,t){return this.makeTestFunction(),async function(e,t,n){const s=null!=(n=n||{}).batches,a=e.testFunction;let r=[];if(n.verbose>0)throw new Sp("Verbose mode is not implemented yet.");E(!s||n.batches>0&&Number.isInteger(n.batches),(()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`));const i="function"==typeof t.next?t:await t.iterator();let o=0,l=0;for(;!s||l<n.batches;){const t=await i.next();if(r=Cr((()=>{if(t.value){const{xs:n,ys:s}=$m(e,t.value),i=n.concat(s),u=Cr((()=>a(i)));if(Sr(i),0===l)for(let e=0;e<u.length;++e)r.push(Fr(0));const c=i[0].shape[0];for(let e=0;e<u.length;++e){const t=u[e],n=r[e];r[e]=Cr((()=>Or(r[e],Mi(c,t)))),l>0&&Sr(n)}Sr(u),o+=c,++l}return r})),t.done){s&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let e=0;e<r.length;++e){const t=r[e];r[e]=bi(r[e],o),Sr(t)}return Fp(r)}(this,e,t)}checkNumSamples(e,t,n,s="steps"){let a;if(null!=n){if(a=null,null!=t)throw new Cp(`If ${s} is set, batchSize must be null or undefined.Got batchSize = ${t}`)}else{if(null==e)throw new Cp(`Either the input data should have a defined shape, or ${s} shoud be specified.`);a=Array.isArray(e)?e[0].shape[0]:e.shape[0]}return a}execute(e,t){if(Array.isArray(t)&&0===t.length)throw new Cp("`outputs` is an empty Array, which is not allowed.");const n=Array.isArray(t),s=n?t:[t],a=this.retrieveSymbolicTensors(s),r=new fm;if(e instanceof Ds&&(e=[e]),Array.isArray(e)){if(e.length!==this.inputs.length)throw new Cp(`The number of inputs provided (${e.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let t=0;t<this.inputs.length;++t)r.add(this.inputs[t],e[t])}else for(const t of this.inputs){const n=e[t.name];if(null==n)throw new Cp(`No value is provided for the model's input ${t.name}`);r.add(t,n)}const i=ym(a,r);return n?i:i[0]}retrieveSymbolicTensors(e){const t=Ep(null,e.length);let n=e.length;for(const s of this.layers){const a=Array.isArray(s.output)?s.output:[s.output],r=a.map((e=>e.name));for(let s=0;s<e.length;++s){const i=r.indexOf(e[s]);if(-1!==i&&(t[s]=a[i],n--),0===n)break}if(0===n)break}if(n>0){const n=[];throw t.forEach(((t,s)=>{null==t&&n.push(e[s])})),new Cp(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(n)}`)}return t}predictLoop(e,t=32,n=!1){return Cr((()=>{const s=this.checkNumSamples(e);if(n)throw new Sp("Verbose predictLoop() is not implemented yet.");const a=Rm(s,t),r=this.outputs.map((e=>[]));for(let t=0;t<a.length;++t)Cr((()=>{const n=a[t][0],s=a[t][1],r=Em(e,n,s),i=[];if(Array.isArray(r))for(let e=0;e<r.length;++e)i.push({key:this.inputs[e],value:r[e]});else i.push({key:this.inputs[0],value:r});const o=new fm(i);return ym(this.outputs,o)})).forEach(((e,t)=>r[t].push(e)));return Fp(r.map((e=>ii(e,0))))}))}predict(e,t={}){const n=Fm(e);Lm(n,this.inputNames,this.feedInputShapes,!1);try{const s=null==t.batchSize?32:t.batchSize;return Tm(s),this.predictLoop(n,s)}finally{Dm(n,e)}}predictOnBatch(e){Lm(e,this.inputNames,this.feedInputShapes,!0);const t=(Array.isArray(e)?e[0]:e).shape[0];return this.predictLoop(e,t)}standardizeUserDataXY(e,t,n=!0,s){if(null==this.optimizer_)throw new $p("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const a=[];for(let e=0;e<this.feedOutputShapes.length;++e){const t=this.feedOutputShapes[e];this.feedLossFns[e]===jf?a.push(t.slice(0,t.length-1).concat([1])):a.push(t)}if(function(e,t,n){const s=Wp(e.map((e=>e.shape[0])));s.sort();const a=Wp(t.map((e=>e.shape[0])));if(a.sort(),s.length>1)throw new Cp(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(e.map((e=>e.shape)))}`);if(a.length>1)throw new Cp(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map((e=>e.shape)))}`);if(s.length>0&&a.length>0&&!_(s,a))throw new Cp(`Input Tensors should have the same number of samples as target Tensors. Found ${s[0]} input sample(s) and ${a[0]} target sample(s).`)}(e=Mm(e,this.feedInputNames,this.feedInputShapes,!1,"input"),t=Mm(t,this.feedOutputNames,a,!1,"target")),function(e,t,n){const s=[Vf,Kf,Hf];for(let a=0;a<e.length;++a){const r=e[a],i=t[a],o=n[a];if(null!=i){if(i===Hf&&1===r.shape[r.shape.length-1])throw new Cp(`You are passing a target array of shape ${r.shape} while using a loss 'categorical_crossentropy'. 'categorical_crossentropy'expects targets to be binary matrices (1s and 0s) of shape [samples, classes].`);if(-1!==s.indexOf(i)){const e=r.shape.slice(1),t=o.slice(1);for(let n=0;n<e.length;++n){const s=e[n],a=t[n];if(null!=a&&s!==a)throw new Cp(`A target Tensor with shape ${r.shape} was passed for an output of shape ${o}, while using a loss function that expects targets to have the same shape as the output.`)}}}}}(t,this.feedLossFns,this.feedOutputShapes),this.stateful&&null!=s&&s>0&&e[0].shape[0]%s!=0)throw new Cp(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${s}. Found: ${e[0].shape[0]} sample(s).`);return[e,t]}async standardizeUserData(e,t,n,s,a=!0,r){const[i,o]=this.standardizeUserDataXY(e,t,a,r);if(null!=n)throw new Error("sample weight is not supported yet.");let l=null;if(null!=s){const e=vm(s,this.outputNames);l=[];for(let t=0;t<e.length;++t)l.push(await Nm(o[t],null,e[t]))}return[i,o,l]}testLoop(e,t,n,s=0,a){return Cr((()=>{const r=this.checkNumSamples(t,n,a,"steps"),i=[];if(s>0)throw new Sp("Verbose mode is not implemented yet.");if(null!=a)throw new Sp("steps mode in testLoop() is not implemented yet");{const s=Rm(r,n),a=_o(Nd(0,r));for(let n=0;n<s.length;++n){const r=s[n][0],o=s[n][1],l=Cd(a,r,o-r),u=Am(t,l),c=e(u);if(0===n)for(let e=0;e<c.length;++e)i.push(Fr(0));for(let e=0;e<c.length;++e){const t=c[e];i[e]=Or(i[e],Mi(o-r,t))}}for(let e=0;e<i.length;++e)i[e]=bi(i[e],r)}return i}))}getDedupedMetricsNames(){const e=this.metricsNames,t=[];for(let n=0;n<e.length;++n){const s=e[n];let a=s;Rp(e,s)>1&&(a+=`_${Rp(e.slice(0,n),s)}`),t.push(a)}return t}makeTrainFunction(){return e=>{const t=[],n=e.slice(0,this.inputs.length),s=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),a=e.slice(this.inputs.length+this.outputs.length,this.inputs.length+2*this.outputs.length),r=[],i=this.collectedTrainableWeights.map((e=>e.read()));return[this.optimizer_.minimize((()=>{const e=[];for(let t=0;t<this.inputs.length;++t)e.push({key:this.inputs[t],value:n[t]});const i=new fm(e),o=ym(this.outputs,i,{training:!0});let l;for(let e=0;e<this.lossFunctions.length;++e){let n=(0,this.lossFunctions[e])(s[e],o[e]);null!=a[e]&&(n=Im(n,a[e]));const r=Gi(n);t.push(r),l=0===e?n:Or(l,n)}for(let e=0;e<this.metricsTensors.length;++e){let n;if(this.outputs.length>1&&e<this.outputs.length)n=t[e];else{const t=this.metricsTensors[e][0],a=this.metricsTensors[e][1];n=Gi(t(s[a],o[a]))}Tr(n),r.push(n)}return l=Gi(l),this.calculateLosses().forEach((e=>{l=Or(l,e)})),l}),!0,i)].concat(r)}}makeTestFunction(){this.testFunction=e=>Cr((()=>{const t=[];let n;const s=e.slice(0,this.inputs.length),a=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),r=[];for(let e=0;e<this.inputs.length;++e)r.push({key:this.inputs[e],value:s[e]});const i=new fm(r),o=ym(this.outputs,i);for(let e=0;e<this.lossFunctions.length;++e){const s=this.lossFunctions[e],r=Gi(s(a[e],o[e]));n=0===e?r:Or(n,r),t.push(n)}for(let e=0;e<this.metricsTensors.length;++e){const n=this.metricsTensors[e][0],s=this.metricsTensors[e][1],r=Gi(n(a[s],o[s]));t.push(r)}return t}))}async fit(e,t,n={}){return async function(e,t,n,s={}){if(e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");let a,r,i,o,l,u,c;e.isTraining=!0;try{const h=null==s.batchSize?32:s.batchSize;Tm(h);const p=!1,d=await e.standardizeUserData(t,n,s.sampleWeight,s.classWeight,p,h);a=d[0],r=d[1],c=d[2];let f,m=!1;if(null!=s.validationData&&s.validationData.length>0){if(m=!0,2!==s.validationData.length)throw 3===s.validationData.length?new Sp("validationData including sample weights is not supported yet."):new Cp(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${s.validationData} is invalid.`);i=s.validationData[0],o=s.validationData[1];const t=!0,n=await e.standardizeUserData(i,o,null,null,t,h);l=n[0],u=n[1],f=l.concat(u)}else if(null!=s.validationSplit&&s.validationSplit>0&&s.validationSplit<1){m=!0;const e=Math.floor(a[0].shape[0]*(1-s.validationSplit)),t=a[0].shape[0];l=Em(a,e,t),a=Em(a,0,e),u=Em(r,e,t),r=Em(r,0,e),f=l.concat(u)}else null!=s.validationSteps&&(m=!0);const g=a.concat(r).concat(c);e.checkTrainableWeightsConsistency();const y=e.makeTrainFunction(),b=e.getDedupedMetricsNames();let x,w;m?(e.makeTestFunction(),x=e.testFunction,w=b.slice().concat(b.map((e=>"val_"+e)))):(x=null,f=[],w=b.slice());const k=Lf(s.callbacks,s.yieldEvery);return await async function(e,t,n,s,a,r,i,o,l,u,c,h,p,d,f){null==a&&(a=32),null==r&&(r=1),null==c&&(c=!0),null==p&&(p=0);let m=!1;null!=l&&null!=u&&(m=!0);const g=e.checkNumSamples(n,a,d,"steps_per_epoch");let y;null!=g&&(y=Nd(0,g)),null==i&&(i=1);const{callbackList:b,history:x}=Pf(o,i,r,p,g,d,a,m,h);b.setModel(e),e.history=x,await b.onTrainBegin(),e.stopTraining_=!1;for(let i=p;i<r;++i){await b.onEpochBegin(i);const r={};{if("batch"===c)throw new Sp("batch shuffling is not implemneted yet");c&&C(y);const i=_o(y),o=Rm(g,a);for(let c=0;c<o.length;++c){const h={};if(await b.onBatchBegin(c,h),Cr((()=>{const p=o[c][0],d=o[c][1],f=Cd(i,p,d-p);h.batch=c,h.size=d-p;const g=Am(n,f),y=t(g);for(let e=0;e<s.length;++e){const t=s[e],n=y[e];h[t]=n,Tr(n)}if(c===o.length-1&&m){const t=e.testLoop(l,u,a);for(let e=0;e<s.length;++e){const n=s[e],a=t[e];Tr(a),r["val_"+n]=a}}})),await b.onBatchEnd(c,h),Af(h),e.stopTraining_)break}i.dispose()}if(await b.onEpochEnd(i,r),e.stopTraining_)break}return await b.onTrainEnd(),await e.history.syncData(),e.history}(e,y,g,b,h,s.epochs,s.verbose,k,x,f,s.shuffle,w,s.initialEpoch,null)}finally{e.isTraining=!1,Dm(a,t),Dm(r,n),Dm(l,i),Dm(u,o),null!=c&&Sr(c)}}(this,e,t,n)}async fitDataset(e,t){return async function(e,t,n){const s=null!=n.batchesPerEpoch;if(E(null!=e.optimizer,(()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig).")),E(null!=n,(()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call.")),E(null!=n.epochs&&n.epochs>0&&Number.isInteger(n.epochs),(()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${n.epochs}`)),E(!s||n.batchesPerEpoch>0&&Number.isInteger(n.batchesPerEpoch),(()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${n.batchesPerEpoch}`)),E(null==n.validationSplit,(()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead.")),e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");e.isTraining=!0;try{const a=null!=n.validationData;let r,i;if(a)if(Sm(n.validationData))E(null==n.validationBatches||n.validationBatches>0&&Number.isInteger(n.validationBatches),(()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${n.validationBatches}`));else{const e=function(e){if(3===e.length)throw new Sp("Validation with sample weights is not implemented yet.");return{xs:e[0],ys:e[1]}}(n.validationData);r=e.xs,i=e.ys}const o=e.makeTrainFunction(),l=e.getDedupedMetricsNames();let u;u=a?l.slice().concat(l.map((e=>"val_"+e))):l.slice();const c=Lf(n.callbacks,n.yieldEvery),h=null==n.verbose?1:n.verbose,{callbackList:p,history:d}=Pf(c,h,n.epochs,null,null,function(e,t){let n=null;return null!=t.batchesPerEpoch?n=t.batchesPerEpoch:Number.isFinite(e.size)&&(n=e.size),n}(t,n),null,a,u);p.setModel(e),e.history=d,await p.onTrainBegin(),e.stopTraining_=!1;let f=null==n.initialEpoch?0:n.initialEpoch,m=await t.iterator();for(;f<n.epochs;){const u={};await p.onEpochBegin(f);let c=0,h=0;for(s||(m=await t.iterator());!s||c<n.batchesPerEpoch;){const t=await m.next();if(s&&t.done){console.warn(`You provided \`batchesPerEpoch\` as ${n.batchesPerEpoch}, but your dataset iterator ran out of data after ${c} batches; interrupting training. Make sure that your dataset can generate at least \`batchesPerEpoch * epochs\` batches (in this case, `+n.batchesPerEpoch*n.epochs+" batches). You may need to use the repeat() function when building your dataset.");break}if(null!=t.value){const{xs:s,ys:a}=$m(e,t.value),r={};r.batch=h,r.size=s[0].shape[0],await p.onBatchBegin(h,r);const i=[];if(null!=n.classWeight){const t=vm(n.classWeight,e.outputNames);for(let e=0;e<t.length;++e)i.push(await Nm(a[e],null,t[e]))}const u=s.concat(a).concat(i),d=o(u);Sr(u);for(let e=0;e<l.length;++e){const t=l[e],n=d[e];r[t]=n,Tr(n)}await p.onBatchEnd(h,r),Af(r),h++,c++}if(s?c>=n.batchesPerEpoch:t.done){if(a){let t;t=Sm(n.validationData)?Dp(await e.evaluateDataset(n.validationData,{batches:n.validationBatches})):Dp(e.evaluate(r,i,{batchSize:null==n.validationBatchSize?32:n.validationBatchSize,verbose:0}));for(let n=0;n<e.metricsNames.length;++n)u[`val_${e.metricsNames[n]}`]=t[n]}break}if(e.stopTraining_)break}if(await p.onEpochEnd(f,u),f++,e.stopTraining_)break}return await p.onTrainEnd(),await e.history.syncData(),e.history}finally{e.isTraining=!1}}(this,e,t)}async trainOnBatch(e,t){const n=await this.standardizeUserData(e,t),s=n[0],a=n[1],r=this.makeTrainFunction()(s.concat(a)),i=[];for(const e of r){const t=await e.data();i.push(t[0])}return Sr(r),Fp(i)}getNamedWeights(e){const t=[],n=null!=e&&e.trainableOnly,s=n?this.trainableWeights:this.weights,a=this.getWeights(n);for(let e=0;e<s.length;++e)n&&!s[e].trainable||t.push({name:s[e].originalName,tensor:a[e]});return t}set stopTraining(e){this.stopTraining_=e}get stopTraining(){return this.stopTraining_}get optimizer(){return this.optimizer_}set optimizer(e){this.optimizer_!==e&&(this.optimizer_=e,this.isOptimizerOwned=!1)}dispose(){const e=super.dispose();if(0===e.refCountAfterDispose&&null!=this.optimizer&&this.isOptimizerOwned){const t=$r().numTensors;this.optimizer_.dispose(),e.numDisposedVariables+=t-$r().numTensors}return e}getLossIdentifiers(){let e;if("string"==typeof this.loss)e=_p(this.loss);else if(Array.isArray(this.loss)){for(const e of this.loss)if("string"!=typeof e)throw new Error("Serialization of non-string loss is not supported.");e=this.loss.map((e=>_p(e)))}else{const t=Object.keys(this.loss);e={};const n=this.loss;for(const s of t){if("string"!=typeof n[s])throw new Error("Serialization of non-string loss is not supported.");e[s]=_p(n[s])}}return e}getMetricIdentifiers(){if("string"==typeof this.metrics||"function"==typeof this.metrics)return[_p(rm(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map((e=>_p(rm(e))));{const e={};for(const t in this.metrics)e[t]=_p(rm(this.metrics[t]));return e}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(e){if(null!=e.weighted_metrics)throw new Error("Loading weight_metrics is not supported yet.");if(null!=e.loss_weights)throw new Error("Loading loss_weights is not supported yet.");if(null!=e.sample_weight_mode)throw new Error("Loading sample_weight_mode is not supported yet.");const t=Bf(pm(e.optimizer_config));let n,s;if("string"==typeof e.loss)n=Op(e.loss);else if(Array.isArray(e.loss))n=e.loss.map((e=>Op(e)));else if(null!=e.loss){n={};for(const t in e.loss)n[t]=Op(e.loss[t])}if(Array.isArray(e.metrics))s=e.metrics.map((e=>Op(e)));else if(null!=e.metrics){s={};for(const t in e.metrics)s[t]=Op(e.metrics[t])}this.compile({loss:n,metrics:s,optimizer:t})}async save(e,t){if("string"==typeof e){const t=ka(e);if(0===t.length)throw new Cp(`Cannot find any save handlers for URL '${e}'`);if(t.length>1)throw new Cp(`Found more than one (${t.length}) save handlers for URL '${e}'`);e=t[0]}if(null==e.save)throw new Cp("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const n=await da(this.getNamedWeights(t)),s={modelTopology:this.toJSON(null,!1),format:"layers-model",generatedBy:"TensorFlow.js tfjs-layers v3.3.0",convertedBy:null};if(null!=t&&t.includeOptimizer&&null!=this.optimizer){s.trainingConfig=this.getTrainingConfig();const e="optimizer",{data:t,specs:a}=await da(await this.optimizer.getWeights(),e);n.specs.push(...a),n.data=ya([n.data,t])}if(null!=this.userDefinedMetadata){const e=!0;im(this.userDefinedMetadata,this.name,e),s.userDefinedMetadata=this.userDefinedMetadata}return s.weightData=n.data,s.weightSpecs=n.specs,e.save(s)}setUserDefinedMetadata(e){im(e,this.name),this.userDefinedMetadata=e}getUserDefinedMetadata(){return this.userDefinedMetadata}}zm.className="Model",Nr(zm);class Pm extends zm{}Pm.className="Functional",Nr(Pm);class Bm extends zm{constructor(e){if(super({inputs:[],outputs:[]}),e=e||{},this.trainable=!0,this.built=!1,this.name=null!=e.name?e.name:pf("sequential_"),null!=e.layers)for(const t of e.layers)this.add(t)}checkShape(e){if(e.inboundNodes[0].outputTensors[0].shape.some((e=>e<0)))throw new Cp(`Negative dimension size caused by adding layer ${e.name} with input shape [${e.inboundNodes[0].inputTensors[0].shape}]`)}add(e){const t=e instanceof Bm||e instanceof zm;let n;if(t){if(n=e,1!==n.outputs.length)throw new Cp("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(1!==n.inputs.length)throw new Cp("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(0===this.outputs.length){if(0===e.inboundNodes.length){if(null==e.batchInputShape)throw new Cp("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const t=function(e){if(null==e.batchShape&&null==e.shape)throw new Error("Please provide to Input either a `shape` or a `batchShape` argument. Note that `shape` does not include the batch dimension.");if(null!=e.batchShape&&null!=e.shape)throw new Cp("Please provide either a `shape` or `batchShape` argument to Input, but not both.");let t=e.batchShape;null!=e.shape&&null==t&&(t=[null].concat(e.shape));let n=e.dtype;return null==n&&(n="float32"),new Tf({batchInputShape:t,name:e.name,dtype:n,sparse:e.sparse}).inboundNodes[0].outputTensors[0]}({batchShape:e.batchInputShape,dtype:e.dtype,name:e.name+"_input"});e.apply(t)}if(t)this.outputs=n.outputs,this.inputs=n.inputs;else{if(1!==e.inboundNodes.length)throw new Cp(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${e.name} which has ${e.inboundNodes.length} pre-existing inbound connections.`);if(1!==e.inboundNodes[0].outputTensors.length)throw new Cp("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[e.inboundNodes[0].outputTensors[0]],this.inputs=Sf(this.outputs[0])}this.inboundNodes=[],new If({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:Ep(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map((e=>e.shape)),outputShapes:this.outputs[0].shape})}else{const t=e.apply(this.outputs[0]);if(Array.isArray(t))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[t],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(e),this.built=!1}pop(){if(0===this.layers.length)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),0===this.layers.length)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const e=this.layers.length-1;this.layers[e].outboundNodes=[],this.outputs=[this.layers[e].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(e,t){return null==this.model&&this.build(),this.model.call(e,t)}build(e){if(gf(e),0===this.inputs.length||0===this.outputs.length)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new zm({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(e,t,n=console.log){this.built||this.build(),super.summary(e,t,n)}setWeights(e){null==this.model&&this.build(),this.model.setWeights(e)}evaluate(e,t,n={}){if(!this.built)throw new $p("The model needs to be compiled before being used.");return this.model.evaluate(e,t,n)}async evaluateDataset(e,t){if(!this.built)throw new $p("The model needs to be compiled before being used.");return this.model.evaluateDataset(e,t)}predict(e,t={}){return null==this.model&&this.build(),this.model.predict(e,t)}predictOnBatch(e){return null==this.model&&this.build(),this.model.predictOnBatch(e)}compile(e){this.build(),this.model.compile(e),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return null==this.model?void 0:this.model.optimizer}set optimizer(e){this.model.optimizer=e}async fit(e,t,n={}){if(!this.built)throw new $p("The model needs to be compiled before being used.");return this.model.fit(e,t,n)}async fitDataset(e,t){if(!this.built)throw new $p("The model needs to be compiled before being used.");return this.model.fitDataset(e,t)}async trainOnBatch(e,t){return this.model.trainOnBatch(e,t)}static fromConfig(e,t,n={},s=!1){let a,r={};if(t instanceof Array){if(null==t[0].className||"Merge"===t[0].className)throw new Cp("Legacy serialization format not supported yet.");a=t}else E(null!=t.layers,(()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field.")),a=t.layers,delete t.layers,r=t;const i=new e(r);if(!(i instanceof Bm))throw new Sp(`Sequential.fromConfig called on non-Sequential input: ${i}`);for(const e of a){const t=Bf(e,void 0,s);s&&t.setFastWeightInitDuringBuild(!0),i.add(t)}return i}set stopTraining(e){if(null==this.model)throw new Cp("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=e}get stopTraining(){if(null==this.model)throw new Cp("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const e=[];for(const t of this.layers){const n={};n.className=t.getClassName(),n.config=t.getConfig(),e.push(n)}return{name:this.name,layers:e}}}Bm.className="Sequential",Nr(Bm);class Wm extends kr{getConfig(){return{}}}class Vm extends Wm{apply(e,t=1){return function(e,t=1){if(1!==t)throw new Sp(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return xi(e)}(e,t)}}Vm.className="elu",Nr(Vm);class Um extends Wm{apply(e){return xo(e)}}Um.className="selu",Nr(Um);class Gm extends Wm{apply(e){return yo(e)}}Gm.className="relu",Nr(Gm);class Hm extends Wm{apply(e){return Cr((()=>ji(6,yo(e))))}}Hm.className="relu6",Nr(Hm);class jm extends Wm{apply(e){return e}}jm.className="linear",Nr(jm);class Km extends Wm{apply(e){return ko(e)}}Km.className="sigmoid",Nr(Km);class qm extends Wm{apply(e){return function(e){return Cr((()=>{const t=Or(.5,Mi(.2,e));return ri(t,0,1)}))}(e)}}qm.className="hardSigmoid",Nr(qm);class Xm extends Wm{apply(e){return To(e)}}Xm.className="softplus",Nr(Xm);class Ym extends Wm{apply(e){return function(e){return Cr((()=>bi(e,_r(e).add(1))))}(e)}}Ym.className="softsign",Nr(Ym);class Jm extends Wm{apply(e){return Do(e)}}Jm.className="tanh",Nr(Jm);class Zm extends Wm{apply(e,t=-1){return So(e,t)}}Zm.className="softmax",Nr(Zm);class Qm extends Wm{apply(e,t=-1){return Pi(e,t)}}Qm.className="logSoftmax",Nr(Qm);class eg extends Wm{apply(e,t=1){return Cr((()=>ko(e.mul(t)).mul(e)))}}function tg(e){return e.getClassName()}function ng(e,t={}){return Pp(e,vr.getMap().classNameMap,t,"activation")}function sg(e){if(null==e){return ng({className:"linear",config:{}})}if("string"==typeof e){const t={};return t.className=e,t.config={},ng(t)}return e instanceof Wm?e:ng(e)}eg.className="swish",Nr(eg);class ag extends kr{}class rg extends ag{constructor(e){super(),function(e){if(null!=e&&"object"!=typeof e)throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${e}`)}(e),this.l1=null==e||null==e.l1?.01:e.l1,this.l2=null==e||null==e.l2?.01:e.l2,this.hasL1=0!==this.l1,this.hasL2=0!==this.l2}apply(e){return Cr((()=>{let t=io([1]);return this.hasL1&&(t=Or(t,zi(Mi(this.l1,_r(e))))),this.hasL2&&(t=Or(t,zi(Mi(this.l2,Od(e))))),t.asScalar()}))}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(e,t){return new e({l1:t.l1,l2:t.l2})}}rg.className="L1L2",Nr(rg);const ig={l1l2:"L1L2"};function og(e){return Lp(e)}function lg(e,t={}){return Pp(e,vr.getMap().classNameMap,t,"regularizer")}function ug(e){return null==e?null:"string"==typeof e?lg({className:e in ig?ig[e]:e,config:{}}):e instanceof ag?e:lg(e)}class cg extends Cf{constructor(e){super(null==e?{}:e),this.supportsMasking=!0,null!=e&&(this.maxValue=e.maxValue)}call(e,t){e=mf(e);let n=yo(e);return null!=this.maxValue&&(n=ri(n,0,this.maxValue)),n}computeOutputShape(e){return e}getConfig(){const e={maxValue:this.maxValue},t=super.getConfig();return Object.assign(e,t),e}}cg.className="ReLU",Nr(cg);class hg extends Cf{constructor(e){super(null==e?{}:e),this.DEFAULT_ALPHA=.3,null==e&&(e={}),this.alpha=null==e.alpha?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=mf(e);return Fi(n,this.alpha)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}hg.className="LeakyReLU",Nr(hg);class pg extends Cf{constructor(e){if(super(null==e?{}:e),this.DEFAULT_ALPHA_INITIALIZER="zeros",null==e&&(e={}),this.supportsMasking=!0,this.alphaInitializer=lf(e.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=ug(e.alphaRegularizer),this.alphaConstraint=sd(e.alphaConstraint),null==e.sharedAxes)this.sharedAxes=null;else if(Array.isArray(e.sharedAxes))this.sharedAxes=e.sharedAxes;else{if("number"!=typeof e.sharedAxes)throw new Cp(`Expected sharedAxes to be a number or an array of numbers, but got ${e.sharedAxes}`);this.sharedAxes=[e.sharedAxes]}}build(e){const t=(e=gf(e)).slice(1);if(null!=this.sharedAxes)for(const e of this.sharedAxes)t[e-1]=1;this.alpha=this.addWeight("alpha",t,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const n={};if(null!=this.sharedAxes)for(let t=1;t<e.length;++t)n[t]=e[t];this.inputSpec=[new kf({ndim:e.length,axes:n})],this.built=!0}call(e,t){return e=mf(e),co(e,this.alpha.read())}getConfig(){const e={alphaInitializer:of(this.alphaInitializer),alphaRegularizer:og(this.alphaRegularizer),alphaConstraint:td(this.alphaConstraint),sharedAxes:this.sharedAxes},t=super.getConfig();return Object.assign(e,t),e}}pg.className="PReLU",Nr(pg);class dg extends Cf{constructor(e){if(super(null==e?{}:e),this.DEFAULT_ALPHA=1,null==e&&(e={}),null!=e.alpha&&e.alpha!==this.DEFAULT_ALPHA)throw new Sp(`Non-default alpha value (${e.alpha}) is not supported by the ELU layer yet.`);this.alpha=null==e.alpha?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=mf(e);return xi(n)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}dg.className="ELU",Nr(dg);class fg extends Cf{constructor(e){super(null==e?{}:e),this.DEFAULT_THETA=1,null==e&&(e={}),this.theta=null==e.theta?this.DEFAULT_THETA:e.theta}call(e,t){const n=mf(e);return n.mul(Id(n.greater(this.theta),"float32"))}computeOutputShape(e){return e}getConfig(){const e={theta:this.theta},t=super.getConfig();return Object.assign(e,t),e}}fg.className="ThresholdedReLU",Nr(fg);class mg extends Cf{constructor(e){super(null==e?{}:e),this.DEFAULT_AXIS=1,null==e&&(e={}),this.softmax=(new Zm).apply,this.axis=null==e.axis?this.DEFAULT_AXIS:e.axis}call(e,t){const n=mf(e);return this.softmax(n,this.axis)}computeOutputShape(e){return e}getConfig(){const e={axis:this.axis},t=super.getConfig();return Object.assign(e,t),e}}function gg(e,t,n){if("number"==typeof e)return Ep(e,t);if(e.length!==t)throw new Cp(`The ${n} argument must be an integer or tuple of ${t} integers. Received: ${e.length} elements.`);for(let a=0;a<t;++a){const r=e[a];if((s=r)!==parseInt(s.toString(),10))throw new Cp(`The ${n} argument must be an integer or tuple of ${t} integers. Received: ${JSON.stringify(e)} including a non-integer number ${r}`)}return e;var s}function yg(e,t,n,s,a=1){if(null==e)return e;let r;return r="same"===n?e:e-(t+(t-1)*(a-1))+1,Math.floor((r+s-1)/s)}function bg(e,t,n,s){if(null==e)return null;if("valid"===s)e=e*t+vd([n-t,0]);else{if("same"!==s)throw new Cp(`Unsupport padding mode: ${s}.`);e*=t}return e}function xg(e,t){return Cr((()=>(cd(t),"channelsFirst"===t?Wo(e,[0,2,3,1]):e)))}function wg(e,t){return Cr((()=>(cd(t),"channelsFirst"===t?Wo(e,[0,2,3,4,1]):e)))}function kg(e,t,n,s=[1,1],a="valid",r,i,o=null){return Cr((()=>{if(null==r&&(r="channelsLast"),cd(r),3!==e.rank&&4!==e.rank)throw new Cp(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${e.rank}.`);if(3!==t.rank&&4!==t.rank)throw new Cp(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${e.rank}.`);let l=xg(e,r);if("causal"===a)throw new Sp("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return l=nl({x:l,filter:t,strides:s,pad:"same"===a?"same":"valid",dilations:i,dataFormat:"NHWC",bias:n,activation:o}),"channelsFirst"===r&&(l=Wo(l,[0,3,1,2])),l}))}mg.className="Softmax",Nr(mg);class vg extends Cf{constructor(e,t){if(super(t),this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",vg.verifyArgs(t),this.rank=e,Hp(this.rank,"rank"),1!==this.rank&&2!==this.rank&&3!==this.rank)throw new Sp(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=gg(t.kernelSize,e,"kernelSize"),this.strides=gg(null==t.strides?1:t.strides,e,"strides"),this.padding=null==t.padding?"valid":t.padding,hd(this.padding),this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,cd(this.dataFormat),this.activation=sg(t.activation),this.useBias=null==t.useBias||t.useBias,this.biasInitializer=lf(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=sd(t.biasConstraint),this.biasRegularizer=ug(t.biasRegularizer),this.activityRegularizer=ug(t.activityRegularizer),this.dilationRate=gg(null==t.dilationRate?1:t.dilationRate,e,"dilationRate"),1===this.rank&&Array.isArray(this.dilationRate)&&1!==this.dilationRate.length)throw new Cp(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(2===this.rank){if("number"==typeof this.dilationRate)this.dilationRate=[this.dilationRate,this.dilationRate];else if(2!==this.dilationRate.length)throw new Cp(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(3===this.rank)if("number"==typeof this.dilationRate)this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(3!==this.dilationRate.length)throw new Cp(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}static verifyArgs(e){if(Ap("kernelSize"in e,"required key 'kernelSize' not in config"),"number"!=typeof e.kernelSize&&!Gp(e.kernelSize,"number",1,3))throw new Cp(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(e.kernelSize)}.`)}getConfig(){const e={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:tg(this.activation),useBias:this.useBias,biasInitializer:of(this.biasInitializer),biasRegularizer:og(this.biasRegularizer),activityRegularizer:og(this.activityRegularizer),biasConstraint:td(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}class Ng extends vg{constructor(e,t){super(e,t),this.kernel=null,Ng.verifyArgs(t),this.filters=t.filters,Hp(this.filters,"filters"),this.kernelInitializer=lf(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=sd(t.kernelConstraint),this.kernelRegularizer=ug(t.kernelRegularizer)}build(e){e=gf(e);const t="channelsFirst"===this.dataFormat?1:e.length-1;if(null==e[t])throw new Cp(`The channel dimension of the input should be defined. Found ${e[t]}`);const n=e[t],s=this.kernelSize.concat([n,this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[t]:n}}],this.built=!0}call(e,t){return Cr((()=>{let t;e=mf(e);const n=null==this.bias?null:this.bias.read(),s=Kp(this.activation.getClassName());if(null!=s&&2===this.rank)t=kg(e,this.kernel.read(),n,this.strides,this.padding,this.dataFormat,this.dilationRate,s);else{if(1===this.rank)t=function(e,t,n,s=1,a="valid",r,i=1){return Cr((()=>{if(null==r&&(r="channelsLast"),cd(r),3!==e.shape.length)throw new Cp(`The input of a conv1dWithBias operation should be 3, but is ${e.shape.length} instead.`);if(3!==t.shape.length)throw new Cp(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(null!=n&&1!==n.shape.length)throw new Cp(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if("channelsFirst"===r&&(e=Wo(e,[0,2,1])),"causal"===a)throw new Sp("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let o=pi(e,t,s,"same"===a?"same":"valid","NWC",i);return null!=n&&(o=Ld(o,n)),o}))}(e,this.kernel.read(),n,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(2===this.rank)t=kg(e,this.kernel.read(),n,this.strides,this.padding,this.dataFormat,this.dilationRate);else{if(3!==this.rank)throw new Sp("convolutions greater than 3D are not implemented yet.");t=function(e,t,n,s=[1,1,1],a="valid",r,i){return Cr((()=>{if(null==r&&(r="channelsLast"),cd(r),4!==e.rank&&5!==e.rank)throw new Cp(`conv3dWithBias expects input to be of rank 4 or 5, but received ${e.rank}.`);if(4!==t.rank&&5!==t.rank)throw new Cp(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${e.rank}.`);let o=wg(e,r);if("causal"===a)throw new Sp("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return o=mi(o,t,s,"same"===a?"same":"valid","NDHWC",i),null!=n&&(o=Ld(o,n)),"channelsFirst"===r&&(o=Wo(o,[0,4,1,2,3])),o}))}(e,this.kernel.read(),n,this.strides,this.padding,this.dataFormat,this.dilationRate)}null!=this.activation&&(t=this.activation.apply(t))}return t}))}computeOutputShape(e){e=gf(e);const t=[],n="channelsLast"===this.dataFormat?e.slice(1,e.length-1):e.slice(2);for(let e=0;e<n.length;++e){const s=yg(n[e],this.kernelSize[e],this.padding,this.strides[e],"number"==typeof this.dilationRate?this.dilationRate:this.dilationRate[e]);t.push(s)}let s=[e[0]];return"channelsLast"===this.dataFormat?(s=s.concat(t),s.push(this.filters)):(s.push(this.filters),s=s.concat(t)),s}getConfig(){const e={filters:this.filters,kernelInitializer:of(this.kernelInitializer),kernelRegularizer:og(this.kernelRegularizer),kernelConstraint:td(this.kernelConstraint)},t=super.getConfig();return Object.assign(e,t),e}static verifyArgs(e){if(!("filters"in e)||"number"!=typeof e.filters||e.filters<1)throw new Cp(`Convolution layer expected config.filters to be a 'number' > 0 but got ${JSON.stringify(e.filters)}`)}}class Ig extends Ng{constructor(e){super(2,e),Ig.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if("number"!=typeof e.kernelSize&&!Gp(e.kernelSize,"number",1,2))throw new Cp(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(e.kernelSize)}.`)}}Ig.className="Conv2D",Nr(Ig);class $g extends Ng{constructor(e){super(3,e),$g.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if("number"!=typeof e.kernelSize&&(!Array.isArray(e.kernelSize)||1!==e.kernelSize.length&&3!==e.kernelSize.length))throw new Cp(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(e.kernelSize)}.`)}}$g.className="Conv3D",Nr($g);class Cg extends Ig{constructor(e){if(super(e),this.inputSpec=[new kf({ndim:4})],"same"!==this.padding&&"valid"!==this.padding)throw new Cp(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(e){if(4!==(e=gf(e)).length)throw new Cp("Input should have rank 4; Received input shape: "+JSON.stringify(e));const t="channelsFirst"===this.dataFormat?1:e.length-1;if(null==e[t])throw new Cp("The channel dimension of the inputs should be defined. Found `None`.");const n=e[t],s=this.kernelSize.concat([this.filters,n]);this.kernel=this.addWeight("kernel",s,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new kf({ndim:4,axes:{[t]:n}})],this.built=!0}call(e,t){return Cr((()=>{let t=mf(e);if(4!==t.shape.length)throw new Cp(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${t.shape.length}`);const n=t.shape,s=n[0];let a,r;"channelsFirst"===this.dataFormat?(a=2,r=3):(a=1,r=2);const i=n[a],o=n[r],l=this.kernelSize[0],u=this.kernelSize[1],c=this.strides[0],h=this.strides[1],p=[s,bg(i,c,l,this.padding),bg(o,h,u,this.padding),this.filters];"channelsLast"!==this.dataFormat&&(t=Wo(t,[0,2,3,1]));let d=fi(t,this.kernel.read(),p,this.strides,this.padding);return"channelsLast"!==this.dataFormat&&(d=Wo(d,[0,3,1,2])),null!=this.bias&&(d=Ld(d,this.bias.read(),this.dataFormat)),null!=this.activation&&(d=this.activation.apply(d)),d}))}computeOutputShape(e){const t=(e=gf(e)).slice();let n,s,a;"channelsFirst"===this.dataFormat?(n=1,s=2,a=3):(n=3,s=1,a=2);const r=this.kernelSize[0],i=this.kernelSize[1],o=this.strides[0],l=this.strides[1];return t[n]=this.filters,t[s]=bg(t[s],o,r,this.padding),t[a]=bg(t[a],l,i,this.padding),t}getConfig(){const e=super.getConfig();return delete e.dilationRate,e}}Cg.className="Conv2DTranspose",Nr(Cg);class Sg extends Ng{constructor(e,t){if(super(e,t),this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,null==t.filters)throw new Cp("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(null!=t.kernelInitializer||null!=t.kernelRegularizer||null!=t.kernelConstraint)throw new Cp("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(null!=t.padding&&"same"!==t.padding&&"valid"!==t.padding)throw new Cp(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(t.padding)}`);this.depthMultiplier=null==t.depthMultiplier?1:t.depthMultiplier,this.depthwiseInitializer=lf(t.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=ug(t.depthwiseRegularizer),this.depthwiseConstraint=sd(t.depthwiseConstraint),this.pointwiseInitializer=lf(t.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=ug(t.pointwiseRegularizer),this.pointwiseConstraint=sd(t.pointwiseConstraint)}build(e){if((e=gf(e)).length<this.rank+2)throw new Cp(`Inputs to SeparableConv${this.rank}D should have rank ${this.rank+2}, but received input shape: ${JSON.stringify(e)}`);const t="channelsFirst"===this.dataFormat?1:e.length-1;if(null==e[t]||e[t]<0)throw new Cp(`The channel dimension of the inputs should be defined, but found ${JSON.stringify(e[t])}`);const n=e[t],s=this.kernelSize.concat([n,this.depthMultiplier]),a=[];for(let e=0;e<this.rank;++e)a.push(1);a.push(n*this.depthMultiplier,this.filters);const r=!0;this.depthwiseKernel=this.addWeight("depthwise_kernel",s,"float32",this.depthwiseInitializer,this.depthwiseRegularizer,r,this.depthwiseConstraint),this.pointwiseKernel=this.addWeight("pointwise_kernel",a,"float32",this.pointwiseInitializer,this.pointwiseRegularizer,r,this.pointwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,r,this.biasConstraint):this.bias=null,this.inputSpec=[new kf({ndim:this.rank+2,axes:{[t]:n}})],this.built=!0}call(e,t){return Cr((()=>{let t;if(e=mf(e),1===this.rank)throw new Sp("1D separable convolution is not implemented yet.");return 2===this.rank&&("channelsFirst"===this.dataFormat&&(e=Wo(e,[0,2,3,1])),t=wo(e,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(t=Ld(t,this.bias.read(),this.dataFormat)),null!=this.activation&&(t=this.activation.apply(t)),"channelsFirst"===this.dataFormat&&(t=Wo(t,[0,3,1,2])),t}))}getConfig(){const e=super.getConfig();return delete e.rank,delete e.kernelInitializer,delete e.kernelRegularizer,delete e.kernelConstraint,e.depthwiseInitializer=of(this.depthwiseInitializer),e.pointwiseInitializer=of(this.pointwiseInitializer),e.depthwiseRegularizer=og(this.depthwiseRegularizer),e.pointwiseRegularizer=og(this.pointwiseRegularizer),e.depthwiseConstraint=td(this.depthwiseConstraint),e.pointwiseConstraint=td(this.pointwiseConstraint),e}}Sg.className="SeparableConv";class Tg extends Sg{constructor(e){super(2,e)}}Tg.className="SeparableConv2D",Nr(Tg);class Eg extends Ng{constructor(e){super(1,e),Eg.verifyArgs(e),this.inputSpec=[{ndim:3}]}getConfig(){const e=super.getConfig();return delete e.rank,delete e.dataFormat,e}static verifyArgs(e){if("number"!=typeof e.kernelSize&&!Gp(e.kernelSize,"number",1,1))throw new Cp(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(e.kernelSize)}.`)}}Eg.className="Conv1D",Nr(Eg);class Ag extends Cf{constructor(e){super(e),"number"==typeof e.cropping?this.cropping=[[e.cropping,e.cropping],[e.cropping,e.cropping]]:"number"==typeof e.cropping[0]?this.cropping=[[e.cropping[0],e.cropping[0]],[e.cropping[1],e.cropping[1]]]:this.cropping=e.cropping,this.dataFormat=void 0===e.dataFormat?"channelsLast":e.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(e){return"channelsFirst"===this.dataFormat?[e[0],e[1],e[2]-this.cropping[0][0]-this.cropping[0][1],e[3]-this.cropping[1][0]-this.cropping[1][1]]:[e[0],e[1]-this.cropping[0][0]-this.cropping[0][1],e[2]-this.cropping[1][0]-this.cropping[1][1],e[3]]}call(e,t){return Cr((()=>{if(e=mf(e),"channelsLast"===this.dataFormat){const t=Td(e,this.cropping[0][0],e.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return Td(t,this.cropping[1][0],e.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}{const t=Td(e,this.cropping[0][0],e.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return Td(t,this.cropping[1][0],e.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}}))}getConfig(){const e={cropping:this.cropping,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}Ag.className="Cropping2D",Nr(Ag);class Rg extends Cf{constructor(e){var t;super(e),this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=null==e.size?this.DEFAULT_SIZE:e.size,this.dataFormat=null==e.dataFormat?"channelsLast":e.dataFormat,cd(this.dataFormat),this.interpolation=null==e.interpolation?"nearest":e.interpolation,t=this.interpolation,Up(rd,"InterpolationFormat",t)}computeOutputShape(e){if("channelsFirst"===this.dataFormat){const t=null==e[2]?null:this.size[0]*e[2],n=null==e[3]?null:this.size[1]*e[3];return[e[0],e[1],t,n]}{const t=null==e[1]?null:this.size[0]*e[1],n=null==e[2]?null:this.size[1]*e[2];return[e[0],t,n,e[3]]}}call(e,t){return Cr((()=>{let t=mf(e);const n=t.shape;if("channelsFirst"===this.dataFormat){t=Wo(t,[0,2,3,1]);const e=this.size[0]*n[2],s=this.size[1]*n[3],a="nearest"===this.interpolation?t.resizeNearestNeighbor([e,s]):t.resizeBilinear([e,s]);return Wo(a,[0,3,1,2])}{const e=this.size[0]*n[1],s=this.size[1]*n[2];return"nearest"===this.interpolation?t.resizeNearestNeighbor([e,s]):t.resizeBilinear([e,s])}}))}getConfig(){const e={size:this.size,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}Rg.className="UpSampling2D",Nr(Rg);class Fg extends vg{constructor(e){super(2,e),this.depthwiseKernel=null,this.depthMultiplier=null==e.depthMultiplier?1:e.depthMultiplier,this.depthwiseInitializer=lf(e.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=sd(e.depthwiseConstraint),this.depthwiseRegularizer=ug(e.depthwiseRegularizer)}build(e){if((e=gf(e)).length<4)throw new Cp(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(e)}.`);const t="channelsFirst"===this.dataFormat?1:3;if(null==e[t]||e[t]<0)throw new Cp(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${e[t]}).`);const n=e[t],s=[this.kernelSize[0],this.kernelSize[1],n,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",s,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[n*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Cr((()=>{let t=function(e,t,n=[1,1],s="valid",a,r){return Cr((()=>{null==a&&(a="channelsLast"),cd(a);let i=xg(e,a);if(4!==e.rank)throw new Cp(`Input for depthwiseConv2d is required to be 4-D, but is instead ${e.rank}-D`);if(4!==t.rank)throw new Cp(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return i=gi(i,t,n,"same"===s?"same":"valid","NHWC",r),"channelsFirst"===a&&(i=Wo(i,[0,3,1,2])),i}))}(e=mf(e),this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(t=Ld(t,this.bias.read(),this.dataFormat)),null!=this.activation&&(t=this.activation.apply(t)),t}))}computeOutputShape(e){e=gf(e);const t="channelsFirst"===this.dataFormat?e[2]:e[1],n="channelsFirst"===this.dataFormat?e[3]:e[2],s="channelsFirst"===this.dataFormat?e[1]*this.depthMultiplier:e[3]*this.depthMultiplier,a=yg(t,this.kernelSize[0],this.padding,this.strides[0]),r=yg(n,this.kernelSize[1],this.padding,this.strides[1]);return"channelsFirst"===this.dataFormat?[e[0],s,a,r]:[e[0],a,r,s]}getConfig(){const e=super.getConfig();return e.depthMultiplier=this.depthMultiplier,e.depthwiseInitializer=of(this.depthwiseInitializer),e.depthwiseRegularizer=og(this.depthwiseRegularizer),e.depthwiseConstraint=td(this.depthwiseRegularizer),e}}function Dg(e,t,n,s){if(Array.isArray(e)){if(null!=t||null!=n)throw new Cp("When inputs is an array, neither initialState or constants should be provided");null!=s&&(n=e.slice(e.length-s,e.length),e=e.slice(0,e.length-s)),e.length>1&&(t=e.slice(1,e.length)),e=e[0]}function a(e){return null==e||Array.isArray(e)?e:[e]}return{inputs:e,initialState:t=a(t),constants:n=a(n)}}function _g(e,t,n,s=!1,a,r,i=!1,o=!1){return Cr((()=>{const l=t.shape.length;if(l<3)throw new Cp(`Input should be at least 3D, but is ${l}D.`);const u=[1,0].concat(Nd(2,l));if(t=Wo(t,u),null!=r)throw new Sp("The rnn() functoin of the deeplearn.js backend does not support constants yet.");i&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),null!=a&&((a=a.asType("bool").asType("float32")).rank===l-1&&(a=Ii(a,-1)),a=Wo(a,u)),s&&(t=bo(t,0),null!=a&&(a=bo(a,0)));const c=[];let h,p=n;const d=t.shape[0],f=Lo(t);let m,g;null!=a&&(m=Lo(a));for(let t=0;t<d;++t){const n=f[t],s=Cr((()=>e(n,p)));if(null==a)h=s[0],p=s[1];else{const e=Cr((()=>{const e=m[t],n=lo(e).sub(e);return{output:s[0].mul(e).add(p[0].mul(n)),newStates:p.map(((t,a)=>s[1][a].mul(e).add(t.mul(n))))}}));h=e.output,p=e.newStates}o&&c.push(h)}return o&&(g=Fo(c,1)),[h,g,p]}))}Fg.className="DepthwiseConv2D",Nr(Fg);class Og extends Cf{constructor(e){let t;if(super(e),null==e.cell)throw new Cp("cell property is missing for the constructor of RNN.");if(t=Array.isArray(e.cell)?new Ug({cells:e.cell}):e.cell,null==t.stateSize)throw new Cp("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=t,this.returnSequences=null!=e.returnSequences&&e.returnSequences,this.returnState=null!=e.returnState&&e.returnState,this.goBackwards=null!=e.goBackwards&&e.goBackwards,this._stateful=null!=e.stateful&&e.stateful,this.unroll=null!=e.unroll&&e.unroll,this.supportsMasking=!0,this.inputSpec=[new kf({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){return null==this.states_?Nd(0,Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1).map((e=>null)):this.states_}setStates(e){this.states_=e}computeOutputShape(e){df(e)&&(e=e[0]),e=e;let t=this.cell.stateSize;Array.isArray(t)||(t=[t]);const n=t[0];let s;if(s=this.returnSequences?[e[0],e[1],n]:[e[0],n],this.returnState){const n=[];for(const s of t)n.push([e[0],s]);return[s].concat(n)}return s}computeMask(e,t){return Cr((()=>{Array.isArray(t)&&(t=t[0]);const e=this.returnSequences?t:null;if(this.returnState){const t=this.states.map((e=>null));return[e].concat(t)}return e}))}get states(){if(null==this.states_){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,t=[];for(let n=0;n<e;++n)t.push(null);return t}return this.states_}set states(e){this.states_=e}build(e){if(null!=this.numConstants)throw new Sp("Constants support is not implemented in RNN yet.");df(e)&&(e=e[0]),e=e;const t=this.stateful?e[0]:null,n=e.slice(2);this.inputSpec[0]=new kf({shape:[t,null,...n]});const s=[e[0]].concat(e.slice(2));let a;if(this.cell.build(s),a=Array.isArray(this.cell.stateSize)?this.cell.stateSize:[this.cell.stateSize],null!=this.stateSpec){if(!_(this.stateSpec.map((e=>e.shape[e.shape.length-1])),a))throw new Cp(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=a.map((e=>new kf({shape:[null,e]})));this.stateful&&this.resetStates()}resetStates(e,t=!1){Cr((()=>{if(!this.stateful)throw new Ip("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape[0];if(null==n)throw new Cp("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(null==this.states_)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((e=>io([n,e]))):this.states_=[io([n,this.cell.stateSize])];else if(null==e)Sr(this.states_),null!=this.keptStates&&(Sr(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((e=>io([n,e]))):this.states_[0]=io([n,this.cell.stateSize]);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new Cp(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);!0===t?this.keptStates.push(this.states_.slice()):Sr(this.states_);for(let t=0;t<this.states_.length;++t){const s=e[t],a=Array.isArray(this.cell.stateSize)?this.cell.stateSize[t]:this.cell.stateSize,r=[n,a];if(!_(s.shape,r))throw new Cp(`State ${t} is incompatible with layer ${this.name}: expected shape=${r}, received shape=${s.shape}`);this.states_[t]=s}}this.states_=this.states_.map((e=>Tr(e.clone())))}))}apply(e,t){let n=null==t?null:t.initialState,s=null==t?null:t.constants;null==t&&(t={});const a=Dg(e,n,s,this.numConstants);e=a.inputs,n=a.initialState,s=a.constants;let r=[],i=[];if(null!=n){t.initialState=n,r=r.concat(n),this.stateSpec=[];for(const e of n)this.stateSpec.push(new kf({shape:e.shape}));i=i.concat(this.stateSpec)}if(null!=s&&(t.constants=s,r=r.concat(s),this.numConstants=s.length),r[0]instanceof vf){const n=[e].concat(r),s=this.inputSpec.concat(i),a=this.inputSpec;this.inputSpec=s;const o=super.apply(n,t);return this.inputSpec=a,o}return super.apply(e,t)}call(e,t){return Cr((()=>{const n=null==t?null:t.mask,s=null==t?null:t.training;let a=null==t?null:t.initialState;e=mf(e),null==a&&(a=this.stateful?this.states_:this.getInitialState(e));const r=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(a.length!==r)throw new Cp(`RNN Layer has ${r} state(s) but was passed ${a.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const i={training:s},o=_g(((e,t)=>{const n=this.cell.call([e].concat(t),i);return[n[0],n.slice(1)]}),e,a,this.goBackwards,n,null,this.unroll,this.returnSequences),l=o[0],u=o[1],c=o[2];this.stateful&&this.resetStates(c,s);const h=this.returnSequences?u:l;return this.returnState?[h].concat(c):h}))}getInitialState(e){return Cr((()=>{let t=io(e.shape);return t=zi(t,[1,2]),t=$d(t),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map((e=>e>1?Rd(t,[1,e]):t)):this.cell.stateSize>1?[Rd(t,[1,this.cell.stateSize])]:[t]}))}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),null!=this.cell&&this.cell.setFastWeightInitDuringBuild(e)}getConfig(){const e=super.getConfig(),t={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};null!=this.numConstants&&(t.numConstants=this.numConstants);const n=this.cell.getConfig();return this.getClassName()===Og.className&&(t.cell={className:this.cell.getClassName(),config:n}),Object.assign({},n,e,t)}static fromConfig(e,t,n={}){const s=Bf(t.cell,n);return new e(Object.assign(t,{cell:s}))}}Og.className="RNN",Nr(Og);class Mg extends Cf{}class Lg extends Mg{constructor(e){super(e),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,Hp(this.units,"units"),this.activation=sg(null==e.activation?this.DEFAULT_ACTIVATION:e.activation),this.useBias=null==e.useBias||e.useBias,this.kernelInitializer=lf(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=lf(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=lf(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=ug(e.kernelRegularizer),this.recurrentRegularizer=ug(e.recurrentRegularizer),this.biasRegularizer=ug(e.biasRegularizer),this.kernelConstraint=sd(e.kernelConstraint),this.recurrentConstraint=sd(e.recurrentConstraint),this.biasConstraint=sd(e.biasConstraint),this.dropout=kd([1,vd([0,null==e.dropout?0:e.dropout])]),this.recurrentDropout=kd([1,vd([0,null==e.recurrentDropout?0:e.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=gf(e),this.kernel=this.addWeight("kernel",[e[e.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Cr((()=>{if(2!==(e=e).length)throw new Cp(`SimpleRNNCell expects 2 input Tensors, got ${e.length}.`);let n=e[1];e=e[0];const s=null!=t.training&&t.training;let a;0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Gg({ones:()=>lo(e),rate:this.dropout,training:s})),0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Gg({ones:()=>lo(n),rate:this.recurrentDropout,training:s}));const r=this.dropoutMask,i=this.recurrentDropoutMask;a=Dd(null!=r?Mi(e,r):e,this.kernel.read()),null!=this.bias&&(a=Ld(a,this.bias.read())),null!=i&&(n=Mi(n,i));let o=Or(a,Dd(n,this.recurrentKernel.read()));return null!=this.activation&&(o=this.activation.apply(o)),[o,o]}))}getConfig(){const e=super.getConfig(),t={units:this.units,activation:tg(this.activation),useBias:this.useBias,kernelInitializer:of(this.kernelInitializer),recurrentInitializer:of(this.recurrentInitializer),biasInitializer:of(this.biasInitializer),kernelRegularizer:og(this.kernelRegularizer),recurrentRegularizer:og(this.recurrentRegularizer),biasRegularizer:og(this.biasRegularizer),activityRegularizer:og(this.activityRegularizer),kernelConstraint:td(this.kernelConstraint),recurrentConstraint:td(this.recurrentConstraint),biasConstraint:td(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},e,t)}}Lg.className="SimpleRNNCell",Nr(Lg);class zg extends Og{constructor(e){e.cell=new Lg(e),super(e)}call(e,t){return Cr((()=>{null!=this.cell.dropoutMask&&(Sr(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(Sr(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=null==t?null:t.mask,s=null==t?null:t.training,a=null==t?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:a})}))}static fromConfig(e,t){return new e(t)}}zg.className="SimpleRNN",Nr(zg);class Pg extends Mg{constructor(e){if(super(e),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.resetAfter)throw new Cp("GRUCell does not support reset_after parameter set to true.");this.units=e.units,Hp(this.units,"units"),this.activation=sg(void 0===e.activation?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=sg(void 0===e.recurrentActivation?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=null==e.useBias||e.useBias,this.kernelInitializer=lf(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=lf(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=lf(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=ug(e.kernelRegularizer),this.recurrentRegularizer=ug(e.recurrentRegularizer),this.biasRegularizer=ug(e.biasRegularizer),this.kernelConstraint=sd(e.kernelConstraint),this.recurrentConstraint=sd(e.recurrentConstraint),this.biasConstraint=sd(e.biasConstraint),this.dropout=kd([1,vd([0,null==e.dropout?0:e.dropout])]),this.recurrentDropout=kd([1,vd([0,null==e.recurrentDropout?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){const t=(e=gf(e))[e.length-1];this.kernel=this.addWeight("kernel",[t,3*this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,3*this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[3*this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Cr((()=>{if(2!==(e=e).length)throw new Cp(`GRUCell expects 2 input Tensors (inputs, h, c), got ${e.length}.`);const n=null!=t.training&&t.training;let s=e[1];e=e[0],0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Gg({ones:()=>lo(e),rate:this.dropout,training:n,count:3})),0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Gg({ones:()=>lo(s),rate:this.recurrentDropout,training:n,count:3}));const a=this.dropoutMask,r=this.recurrentDropoutMask;let i,o,l;0<this.dropout&&this.dropout<1&&(e=Mi(e,a[0]));let u=Dd(e,this.kernel.read());this.useBias&&(u=Ld(u,this.bias.read())),0<this.recurrentDropout&&this.recurrentDropout<1&&(s=Mi(s,r[0]));const c=this.recurrentKernel.read(),[h,p]=Eo(c,[2*this.units,this.units],c.rank-1),d=Dd(s,h),[f,m,g]=Eo(u,3,u.rank-1),[y,b]=Eo(d,2,d.rank-1);i=this.recurrentActivation.apply(Or(f,y)),o=this.recurrentActivation.apply(Or(m,b));const x=Dd(Mi(o,s),p);l=this.activation.apply(Or(g,x));const w=Or(Mi(i,s),Mi(Or(1,so(i)),l));return[w,w]}))}getConfig(){const e=super.getConfig(),t={units:this.units,activation:tg(this.activation),recurrentActivation:tg(this.recurrentActivation),useBias:this.useBias,kernelInitializer:of(this.kernelInitializer),recurrentInitializer:of(this.recurrentInitializer),biasInitializer:of(this.biasInitializer),kernelRegularizer:og(this.kernelRegularizer),recurrentRegularizer:og(this.recurrentRegularizer),biasRegularizer:og(this.biasRegularizer),activityRegularizer:og(this.activityRegularizer),kernelConstraint:td(this.kernelConstraint),recurrentConstraint:td(this.recurrentConstraint),biasConstraint:td(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout,implementation:this.implementation,resetAfter:!1};return Object.assign({},e,t)}}Pg.className="GRUCell",Nr(Pg);class Bg extends Og{constructor(e){0===e.implementation&&console.warn("`implementation=0` has been deprecated, and now defaults to `implementation=1`. Please update your layer call."),e.cell=new Pg(e),super(e)}call(e,t){return Cr((()=>{null!=this.cell.dropoutMask&&(Sr(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(Sr(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=null==t?null:t.mask,s=null==t?null:t.training,a=null==t?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:a})}))}static fromConfig(e,t){return 0===t.implmentation&&(t.implementation=1),new e(t)}}Bg.className="GRU",Nr(Bg);class Wg extends Mg{constructor(e){super(e),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,Hp(this.units,"units"),this.activation=sg(void 0===e.activation?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=sg(void 0===e.recurrentActivation?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=null==e.useBias||e.useBias,this.kernelInitializer=lf(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=lf(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=lf(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=e.unitForgetBias,this.kernelRegularizer=ug(e.kernelRegularizer),this.recurrentRegularizer=ug(e.recurrentRegularizer),this.biasRegularizer=ug(e.biasRegularizer),this.kernelConstraint=sd(e.kernelConstraint),this.recurrentConstraint=sd(e.recurrentConstraint),this.biasConstraint=sd(e.biasConstraint),this.dropout=kd([1,vd([0,null==e.dropout?0:e.dropout])]),this.recurrentDropout=kd([1,vd([0,null==e.recurrentDropout?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){var t;const n=(e=gf(e))[e.length-1];let s;if(this.kernel=this.addWeight("kernel",[n,4*this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,4*this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){if(this.unitForgetBias){const e=this.biasInitializer,n=this.units;s=new((t=class extends Vd{apply(t,s){const a=e.apply([n]),r=(new Gd).apply([n]),i=e.apply([2*n]);return Ad(Ad(a,r),i)}}).className="CustomInit",t)}else s=this.biasInitializer;this.bias=this.addWeight("bias",[4*this.units],null,s,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(e,t){return Cr((()=>{const n=null!=t.training&&t.training;if(3!==(e=e).length)throw new Cp(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);let s=e[1];const a=e[2];e=e[0],0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Gg({ones:()=>lo(e),rate:this.dropout,training:n,count:4})),0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Gg({ones:()=>lo(s),rate:this.recurrentDropout,training:n,count:4}));const r=this.dropoutMask,i=this.recurrentDropoutMask;let o,l,u,c;0<this.dropout&&this.dropout<1&&(e=Mi(e,r[0]));let h=Dd(e,this.kernel.read());0<this.recurrentDropout&&this.recurrentDropout<1&&(s=Mi(s,i[0])),h=Or(h,Dd(s,this.recurrentKernel.read())),this.useBias&&(h=Ld(h,this.bias.read()));const[p,d,f,m]=Eo(h,4,h.rank-1);o=this.recurrentActivation.apply(p),l=this.recurrentActivation.apply(d),u=Or(Mi(l,a),Mi(o,this.activation.apply(f))),c=this.recurrentActivation.apply(m);const g=Mi(c,this.activation.apply(u));return[g,g,u]}))}getConfig(){const e=super.getConfig(),t={units:this.units,activation:tg(this.activation),recurrentActivation:tg(this.recurrentActivation),useBias:this.useBias,kernelInitializer:of(this.kernelInitializer),recurrentInitializer:of(this.recurrentInitializer),biasInitializer:of(this.biasInitializer),unitForgetBias:this.unitForgetBias,kernelRegularizer:og(this.kernelRegularizer),recurrentRegularizer:og(this.recurrentRegularizer),biasRegularizer:og(this.biasRegularizer),activityRegularizer:og(this.activityRegularizer),kernelConstraint:td(this.kernelConstraint),recurrentConstraint:td(this.recurrentConstraint),biasConstraint:td(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout,implementation:this.implementation};return Object.assign({},e,t)}}Wg.className="LSTMCell",Nr(Wg);class Vg extends Og{constructor(e){0===e.implementation&&console.warn("`implementation=0` has been deprecated, and now defaults to `implementation=1`. Please update your layer call."),e.cell=new Wg(e),super(e)}call(e,t){return Cr((()=>{null!=this.cell.dropoutMask&&(Sr(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(Sr(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=null==t?null:t.mask,s=null==t?null:t.training,a=null==t?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:a})}))}static fromConfig(e,t){return 0===t.implmentation&&(t.implementation=1),new e(t)}}Vg.className="LSTM",Nr(Vg);class Ug extends Mg{constructor(e){super(e),this.cells=e.cells}get stateSize(){const e=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?e.push(...t.stateSize):e.push(t.stateSize);return e}call(e,t){return Cr((()=>{let n=(e=e).slice(1);const s=[];for(const e of this.cells.slice().reverse())Array.isArray(e.stateSize)?s.push(n.splice(0,e.stateSize.length)):s.push(n.splice(0,1));s.reverse();const a=[];let r;for(let i=0;i<this.cells.length;++i){const o=this.cells[i];n=s[i],r=0===i?[e[0]].concat(n):[r[0]].concat(n),r=o.call(r,t),a.push(r.slice(1))}n=[];for(const e of a.slice().reverse())n.push(...e);return[r[0]].concat(n)}))}build(e){let t;df(e)&&(e=e[0]),e=e,this.cells.forEach(((n,s)=>{fd(`RNNCell_${s}`,(()=>{n.build(e),t=Array.isArray(n.stateSize)?n.stateSize[0]:n.stateSize,e=[e[0],t]}))})),this.built=!0}getConfig(){const e=super.getConfig(),t={cells:this.cells.map((e=>({className:e.getClassName(),config:e.getConfig()})))};return Object.assign({},e,t)}static fromConfig(e,t,n={}){const s=[];for(const e of t.cells)s.push(Bf(e,n));return new e({cells:s})}get trainableWeights(){if(!this.trainable)return[];const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.cells)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const e of this.cells)t.push(...e.trainableWeights);return t.concat(e)}return e}getWeights(){const e=[];for(const t of this.cells)e.push(...t.weights);return xf(e)}setWeights(e){const t=[];for(const n of this.cells){const s=n.weights.length,a=e.splice(s);for(let e=0;e<n.weights.length;++e)t.push([n.weights[e],a[e]])}wf(t)}}function Gg(e){const{ones:t,rate:n,training:s=!1,count:a=1}=e,r=()=>zd(t(),n),i=()=>Pd(r,t,s);return!a||a<=1?Tr(i().clone()):Array(a).fill(void 0).map(i).map((e=>Tr(e.clone())))}Ug.className="StackedRNNCells",Nr(Ug);var Hg,jg;class Kg extends Og{constructor(e){if(e.unroll)throw new Sp("Unrolling is not possible with convolutional RNNs.");if(Array.isArray(e.cell))throw new Sp("It is not possible at the moment to stack convolutional cells.");super(e),this.inputSpec=[new kf({ndim:5})]}call(e,t){return Cr((()=>{if(null!=this.cell.dropoutMask&&(Sr(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(Sr(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),t&&t.constants)throw new Cp("ConvRNN2D cell does not support constants");const n=null==t?null:t.mask,s=null==t?null:t.training,a=null==t?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:a})}))}computeOutputShape(e){let t=this.computeSingleOutputShape(e);return this.returnSequences||(t=[t[0],...t.slice(2)]),this.returnState&&(t=[t,...Array(2).fill([e[0],...t.slice(-3)])]),t}getInitialState(e){return Cr((()=>{const{stateSize:t}=this.cell,n=e.shape,s=this.computeSingleOutputShape(n),a=io([s[0],...s.slice(2)]);return Array.isArray(t)?Array(t.length).fill(a):[a]}))}resetStates(e,t=!1){Cr((()=>{if(!this.stateful)throw new Ip("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape,s=this.computeSingleOutputShape(n),a=[s[0],...s.slice(2)];if(null==n[0])throw new Cp("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(null==this.getStates())Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((()=>io(a))):this.states_=[io(a)];else if(null==e)Sr(this.states_),null!=this.keptStates&&(Sr(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((()=>io(a))):this.states_[0]=io(a);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new Cp(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t?this.keptStates.push(this.states_.slice()):Sr(this.states_);for(let t=0;t<this.states_.length;++t){const n=e[t],s=a;if(!_(n.shape,s))throw new Cp(`State ${t} is incompatible with layer ${this.name}: expected shape=${s}, received shape=${n.shape}`);this.states_[t]=n}}this.states_=this.states_.map((e=>Tr(e.clone())))}))}computeSingleOutputShape(e){const{dataFormat:t,filters:n,kernelSize:s,padding:a,strides:r,dilationRate:i}=this.cell,o="channelsFirst"===t,l=e[o?3:2],u=e[o?4:3],c=yg(l,s[0],a,r[0],i[0]),h=yg(u,s[1],a,r[1],i[1]);return[...e.slice(0,2),...o?[n,c,h]:[c,h,n]]}}Kg.className="ConvRNN2D";class qg extends Wg{constructor(e){const{filters:t,kernelSize:n,strides:s,padding:a,dataFormat:r,dilationRate:i}=e;super(Object.assign({},e,{units:t})),this.filters=t,Hp(this.filters,"filters"),this.kernelSize=gg(n,2,"kernelSize"),this.kernelSize.forEach((e=>Hp(e,"kernelSize"))),this.strides=gg(s||1,2,"strides"),this.strides.forEach((e=>Hp(e,"strides"))),this.padding=a||"valid",hd(this.padding),this.dataFormat=r||"channelsLast",cd(this.dataFormat),this.dilationRate=gg(i||1,2,"dilationRate"),this.dilationRate.forEach((e=>Hp(e,"dilationRate")))}build(e){var t;e=gf(e);const n="channelsFirst"===this.dataFormat?1:e.length-1;if(null==e[n])throw new Cp(`The channel dimension of the input should be defined. Found ${e[n]}`);const s=e[n],a=this.kernelSize.concat([s,4*this.filters]);this.kernel=this.addWeight("kernel",a,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const r=this.kernelSize.concat([this.filters,4*this.filters]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",r,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let e;if(this.unitForgetBias){const n=this.biasInitializer,s=this.filters;e=new((t=class extends Vd{apply(e,t){return Ed([n.apply([s]),oo([s]),n.apply([2*s])])}}).className="CustomInit",t)}else e=this.biasInitializer;this.bias=this.addWeight("bias",[4*this.filters],null,e,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(e,t){return Cr((()=>{if(3!==e.length)throw new Cp(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training||!1,s=e[0],a=e[1],r=e[2];0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Gg({ones:()=>lo(s),rate:this.dropout,training:n,count:4}));const i=this.dropoutMask,o=(e,t,n)=>t&&t[n]?Mi(t[n],e):e;let l=o(s,i,0),u=o(s,i,1),c=o(s,i,2),h=o(s,i,3);0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Gg({ones:()=>lo(a),rate:this.recurrentDropout,training:n,count:4}));const p=this.recurrentDropoutMask;let d=o(a,p,0),f=o(a,p,1),m=o(a,p,2),g=o(a,p,3);const[y,b,x,w]=Eo(this.kernel.read(),4,3),[k,v,N,I]=this.useBias?Eo(this.bias.read(),4):[null,null,null,null];l=this.inputConv(l,y,k,this.padding),u=this.inputConv(u,b,v,this.padding),c=this.inputConv(c,x,N,this.padding),h=this.inputConv(h,w,I,this.padding);const[$,C,S,T]=Eo(this.recurrentKernel.read(),4,3);d=this.recurrentConv(d,$),f=this.recurrentConv(f,C),m=this.recurrentConv(m,S),g=this.recurrentConv(g,T);const E=this.recurrentActivation.apply(Or(l,d)),A=this.recurrentActivation.apply(Or(u,f)),R=Or(Mi(A,r),Mi(E,this.activation.apply(Or(c,m)))),F=Mi(this.recurrentActivation.apply(Or(h,g)),this.activation.apply(R));return[F,F,R]}))}getConfig(){const e=super.getConfig(),{units:t}=e,n=function(e,t){var n={};for(var s in e)Object.prototype.hasOwnProperty.call(e,s)&&t.indexOf(s)<0&&(n[s]=e[s]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var a=0;for(s=Object.getOwnPropertySymbols(e);a<s.length;a++)t.indexOf(s[a])<0&&Object.prototype.propertyIsEnumerable.call(e,s[a])&&(n[s[a]]=e[s[a]])}return n}(e,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},n,s)}inputConv(e,t,n,s){const a=hi(e,t,this.strides,s||"valid","channelsFirst"===this.dataFormat?"NCHW":"NHWC",this.dilationRate);return n?Ld(a,n,this.dataFormat):a}recurrentConv(e,t){return hi(e,t,1,"same","channelsFirst"===this.dataFormat?"NCHW":"NHWC")}}qg.className="ConvLSTM2DCell",Nr(qg);class Xg extends Kg{constructor(e){const t=new qg(e);super(Object.assign({},e,{cell:t}))}static fromConfig(e,t){return new e(t)}}Xg.className="ConvLSTM2D",Nr(Xg);class Yg extends Cf{constructor(e){super(e),this.rate=Math.max(Math.min(e.rate,1),0),this.noiseShape=e.noiseShape,this.seed=e.seed,this.supportsMasking=!0}getNoiseShape(e){if(null==this.noiseShape)return this.noiseShape;const t=e.shape,n=[];for(let e=0;e<this.noiseShape.length;++e)n.push(null==this.noiseShape[e]?t[e]:this.noiseShape[e]);return n}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e);if(0<this.rate&&this.rate<1){const e=null!=t.training&&t.training,s=this.getNoiseShape(n);return Pd((()=>zd(n,this.rate,s,this.seed)),(()=>n),e)}return e}))}getConfig(){const e={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},t=super.getConfig();return Object.assign(e,t),e}dispose(){return super.dispose()}}Yg.className="Dropout",Nr(Yg);class Jg extends Yg{constructor(e){super(e),this.inputSpec=[{ndim:3}]}getNoiseShape(e){const t=e.shape;return[t[0],1,t[2]]}}Jg.className="SpatialDropout1D",Nr(Jg);class Zg extends Cf{constructor(e){if(super(e),this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",null==e.batchInputShape&&null==e.inputShape&&null!=e.inputDim){let t=null;null!=e.batchSize&&(t=e.batchSize),this.batchInputShape=[t,e.inputDim]}this.units=e.units,Hp(this.units,"units"),this.activation=sg(e.activation),null!=e.useBias&&(this.useBias=e.useBias),this.kernelInitializer=lf(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=lf(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=sd(e.kernelConstraint),this.biasConstraint=sd(e.biasConstraint),this.kernelRegularizer=ug(e.kernelRegularizer),this.biasRegularizer=ug(e.biasRegularizer),this.activityRegularizer=ug(e.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(e){const t=(e=gf(e))[e.length-1];null==this.kernel&&(this.kernel=this.addWeight("kernel",[t,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:t}}],this.built=!0}computeOutputShape(e){const t=(e=gf(e)).slice();return t[t.length-1]=this.units,t}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e),s=Kp(this.activation.getClassName());let a;return null!=s?a=Dd(n,this.kernel.read(),s,this.bias?this.bias.read():null):(a=Dd(n,this.kernel.read()),null!=this.bias&&(a=Ld(a,this.bias.read())),null!=this.activation&&(a=this.activation.apply(a))),a}))}getConfig(){const e={units:this.units,activation:tg(this.activation),useBias:this.useBias,kernelInitializer:of(this.kernelInitializer),biasInitializer:of(this.biasInitializer),kernelRegularizer:og(this.kernelRegularizer),biasRegularizer:og(this.biasRegularizer),activityRegularizer:og(this.activityRegularizer),kernelConstraint:td(this.kernelConstraint),biasConstraint:td(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}Zg.className="Dense",Nr(Zg);class Qg extends Cf{constructor(e){super(e=e||{}),this.inputSpec=[{minNDim:3}],this.dataFormat=e.dataFormat}computeOutputShape(e){e=gf(e);for(const t of e.slice(1))if(null==t)throw new Cp(`The shape of the input to "Flatten" is not fully defined (got ${e.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[e[0],xd(e,1)]}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);let n=mf(e);if("channelsFirst"===this.dataFormat&&n.rank>1){const e=[0];for(let t=2;t<n.rank;++t)e.push(t);e.push(1),n=n.transpose(e)}return function(e){if(e.rank<=1)throw new Cp(`batchFlatten requires a minimum rank of 2. Got rank: ${e.rank}.`);const t=[e.shape[0],xd(e.shape,1)];return e.reshape(t)}(n)}))}getConfig(){const e={};null!=this.dataFormat&&(e.dataFormat=this.dataFormat);const t=super.getConfig();return Object.assign(e,t),e}}Qg.className="Flatten",Nr(Qg);class ey extends Cf{constructor(e){super(e),this.supportsMasking=!0,this.activation=sg(e.activation)}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e);return this.activation.apply(n)}))}getConfig(){const e={activation:tg(this.activation)},t=super.getConfig();return Object.assign(e,t),e}}ey.className="Activation",Nr(ey);class ty extends Cf{constructor(e){super(e),this.n=e.n,this.inputSpec=[{ndim:2}]}computeOutputShape(e){return[e[0],this.n,e[1]]}call(e,t){return Cr((()=>{return e=mf(e),t=e,n=this.n,Cr((()=>{if(2!==t.shape.length)throw new Cp(`repeat() expects a rank-2 tensor, but received a rank-${t.shape.length} tensor.`);return Rd($d(t,1),[1,n,1])}));var t,n}))}getConfig(){const e={n:this.n},t=super.getConfig();return Object.assign(e,t),e}}ty.className="RepeatVector",Nr(ty);class ny extends Cf{constructor(e){super(e),this.targetShape=e.targetShape;for(let e=0;e<this.targetShape.length;++e)this.isUnknown(this.targetShape[e])&&(this.targetShape[e]=null)}isUnknown(e){return e<0||null==e}fixUnknownDimension(e,t){const n="Total size of new array must be unchanged.",s=t.slice();let a=1,r=null;for(let e=0;e<s.length;++e){const t=s[e];if(this.isUnknown(t)){if(null!==r)throw new Cp("Can only specifiy one unknown dimension.");r=e}else a*=t}const i=xd(e);if(null!==r){if(0===a||i%a!=0)throw new Cp(n);s[r]=i/a}else if(i!==a)throw new Cp(n);return s}computeOutputShape(e){let t=!1;for(let n=0;n<e.length;++n)if(this.isUnknown(e[n])){t=!0;break}return t?e.slice(0,1).concat(this.targetShape):e.slice(0,1).concat(this.fixUnknownDimension(e.slice(1),this.targetShape))}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e),s=n.shape,a=s.slice(0,1).concat(this.fixUnknownDimension(s.slice(1),this.targetShape));return n.reshape(a)}))}getConfig(){const e={targetShape:this.targetShape},t=super.getConfig();return Object.assign(e,t),e}}ny.className="Reshape",Nr(ny);class sy extends Cf{constructor(e){if(super(e),null==e.dims)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(e.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${e.dims} instead.`);const t=Nd(1,e.dims.length+1);if(!_(e.dims.slice().sort(),t))throw new Error("Invalid permutation `dims`: "+JSON.stringify(e.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=e.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new kf({ndim:this.dims.length+1})]}computeOutputShape(e){const t=(e=gf(e)).slice();return this.dims.forEach(((n,s)=>{t[s+1]=e[n]})),t}call(e,t){return Wo(mf(e),this.dimsIncludingBatch)}getConfig(){const e={dims:this.dims},t=super.getConfig();return Object.assign(e,t),e}}sy.className="Permute",Nr(sy);class ay extends Cf{constructor(e){super(null==e?{}:e),this.supportsMasking=!0,this.maskValue=null!=e?null==e.maskValue?0:e.maskValue:0}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={maskValue:this.maskValue};return Object.assign(t,e),t}computeMask(e,t){const n=mf(e);return Lr(ao(n,this.maskValue),-1)}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e),s=Lr(ao(n,this.maskValue),-1,!0);return n.mul(s.asType(n.dtype))}))}}ay.className="Masking",Nr(ay);class ry extends Cf{constructor(e){if(super(e),this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",null==e.batchInputShape&&null==e.inputShape){let t=null;null!=e.batchSize&&(t=e.batchSize),null==e.inputLength?this.batchInputShape=[t,null]:this.batchInputShape=[t].concat(Dp(e.inputLength))}this.inputDim=e.inputDim,Hp(this.inputDim,"inputDim"),this.outputDim=e.outputDim,Hp(this.outputDim,"outputDim"),this.embeddingsInitializer=lf(e.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=ug(e.embeddingsRegularizer),this.activityRegularizer=ug(e.activityRegularizer),this.embeddingsConstraint=sd(e.embeddingsConstraint),this.maskZero=e.maskZero,this.supportsMasking=e.maskZero,this.inputLength=e.inputLength}build(e){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(e){}computeMask(e,t){return Cr((()=>this.maskZero?(e=mf(e),ao(e,Bo(e))):null))}computeOutputShape(e){if(e=gf(e),null==this.inputLength)return[...e,this.outputDim];const t=Dp(this.inputLength);if(t.length!==e.length-1)throw new Cp(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);{let n=0;for(let s=0;s<t.length;++s){const a=t[s],r=e[s+1];if(null!=a&&null!=r&&a!==r)throw new Cp(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);null==a&&(t[n]=r),n++}}return[e[0],...t,this.outputDim]}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);let n=mf(e);return"int32"!==n.dtype&&(n=Id(n,"int32")),_d(this.embeddings.read(),n.as1D()).reshape(gf(this.computeOutputShape(n.shape)))}))}getConfig(){const e={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:of(this.embeddingsInitializer),embeddingsRegularizer:og(this.embeddingsRegularizer),activityRegularizer:og(this.activityRegularizer),embeddingsConstraint:td(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},t=super.getConfig();return Object.assign(e,t),e}}ry.className="Embedding",Nr(ry);class iy extends Cf{constructor(e){super(e||{}),this.supportsMasking=!0}mergeFunction(e){throw new Sp}computeElementwiseOpOutputShape(e,t){if(null==e||null==t)return null;if(e.length<t.length)return this.computeElementwiseOpOutputShape(t,e);if(0===t.length)return e;const n=e.slice(0,e.length-t.length);for(let s=0;s<t.length;++s){const a=e[e.length-t.length+s],r=t[s];if(null==a||null==r||a<0||r<0)n.push(null);else if(1===a)n.push(r);else if(1===r)n.push(a);else{if(a!==r)throw new Cp("Operands could not be broadcast together with shapes "+JSON.stringify(e)+" "+JSON.stringify(t));n.push(a)}}return n}build(e){if(Array.isArray(e)&&!Array.isArray(e[0])&&(e=[gf(e)]),(e=e).length<2)throw new Cp(`A merge layer should be called on an Array of at least 2 inputs. Got ${e.length} input(s).`);let t=[];for(const n of e)null!=n&&null!==n[0]&&t.push(n[0]);if(t=Wp(t),t.length>1)throw new Cp(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(e)}.`);let n=null==e[0]?null:e[0].slice(1);for(let t=1;t<e.length;++t){const s=null==e[t]?null:e[t].slice(1);n=this.computeElementwiseOpOutputShape(n,s)}const s=e.map((e=>e.length));-1===e.indexOf(null)&&1===Wp(s).length?this.reshapeRequired=!1:this.reshapeRequired=!0}call(e,t){return Cr((()=>{if(e=e,this.reshapeRequired){const t=[],n=e.map((e=>e.rank));if(-1===n.indexOf(null)){const s=vd(n);for(let n of e){const e=n.rank;for(let t=0;t<s-e;++t)n=$d(n,1);t.push(n)}return this.mergeFunction(t)}{let n=!1;for(const s of e){const e=s.rank;if(null==e){const e=s.shape,a=e[0],r=e.slice(1).concat([a]);let i=s.reshape([a].concat(xd(e.slice(1))));i=Wo(i,[1,0]),i=i.reshape(r),t.push(i),n=!0}else if(e>1){const a=Nd(1,e).concat([0]);t.push(Wo(s,a)),n=!0}else t.push(s)}let s=this.mergeFunction(t);const a=s.rank;if(n)if(null==a){const e=s.shape,t=e[e.length-1],n=[t].concat(e.slice(0,e.length-1));s=Wo(s.reshape([-1,t]),[1,0]).reshape(n)}else if(a>1){const e=[a-1].concat(Nd(0,a-1));s=Wo(s,e)}return s}}return this.mergeFunction(e)}))}computeOutputShape(e){let t;t=null==(e=e)[0]?null:e[0].slice(1);for(let n=1;n<e.length;++n){const s=null==e[n]?null:e[n].slice(1);t=this.computeElementwiseOpOutputShape(t,s)}let n=[];for(const t of e)null!=t&&null!==t[0]&&n.push(t[0]);return n=Wp(n),t=1===n.length?n.concat(t):[null].concat(t),t}computeMask(e,t){return Cr((()=>{if(null==t)return null;if(!Array.isArray(t))throw new Cp("`mask` should be an Array");if(!Array.isArray(e))throw new Cp("`inputs` should be an Array");if(t.length!==e.length)throw new Cp(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${e.length} vs ${t.length})`);if(t.every((e=>null==e)))return null;let n=(t=t.map((e=>null==e?e:Ii(e,0))))[0];for(let e=1;e<t.length-1;++e)n=Bi(n,t[e]);return n}))}}class oy extends iy{constructor(e){super(e)}mergeFunction(e){return Cr((()=>{let t=e[0].clone();for(let n=1;n<e.length;++n)t=Or(t,e[n]);return t}))}}oy.className="Add",Nr(oy);class ly extends iy{constructor(e){super(e)}mergeFunction(e){return Cr((()=>{let t=e[0].clone();for(let n=1;n<e.length;++n)t=Mi(t,e[n]);return t}))}}ly.className="Multiply",Nr(ly);class uy extends iy{constructor(e){super(e)}mergeFunction(e){return Cr((()=>{let t=e[0].clone();for(let n=1;n<e.length;++n)t=Or(t,e[n]);return Mi(1/e.length,t)}))}}uy.className="Average",Nr(uy);class cy extends iy{constructor(e){super(e)}mergeFunction(e){return Cr((()=>{let t=e[0];for(let n=1;n<e.length;++n)t=Ui(t,e[n]);return t}))}}cy.className="Maximum",Nr(cy);class hy extends iy{constructor(e){super(e)}mergeFunction(e){return Cr((()=>{let t=e[0];for(let n=1;n<e.length;++n)t=ji(t,e[n]);return t}))}}hy.className="Minimum",Nr(hy);class py extends iy{constructor(e){super(e),this.DEFAULT_AXIS=-1,null==e&&(e={}),this.axis=null==e.axis?this.DEFAULT_AXIS:e.axis,this.supportsMasking=!0,this.reshapeRequired=!1}build(e){if(!Array.isArray(e)||!Array.isArray(e[0])||1===e.length)throw new Cp("A `Concatenate` layer should be called on a list of at least 2 inputs");e=e;let t=!0;for(const n of e)if(null!=n){t=!1;break}if(t)return;const n=[];for(let t=0;t<e.length;++t){const s=e[t].slice();s.splice(this.axis,1);let a=!1;for(const e of n)if(_(e,s)){a=!0;break}a||n.push(s)}if(n.length>1)throw new Cp("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(e))}mergeFunction(e){return Cr((()=>Ed(e,this.axis)))}computeOutputShape(e){if(!Array.isArray(e)||!Array.isArray(e[0]))throw new Cp("A `Concatenate` layer should be called on a list of inputs.");const t=e,n=t[0].slice(),s=this.axis<0?n.length+this.axis:this.axis;for(const e of t.slice(1)){if(null==n[s]||null==e[s]){n[s]=null;break}n[s]+=e[s]}return n}computeMask(e,t){if(null==t)return null;if(!Array.isArray(t))throw new Cp("`mask` should be an array for Concatenate");if(!Array.isArray(e))throw new Cp("`inputs` should be an array for Concatenate");if(t.length!==e.length)throw new Cp(`Mismatch in the length of mask (${t.length}) and the legnth of inputs (${e.length})`);return Cr((()=>{let n=!0;if(t.forEach((e=>{null==e||(n=!1)})),n)return null;const s=[];for(let n=0;n<e.length;++n)null==t[n]?s.push(lo(e[n]).asType("bool")):t[n].rank<e[n].rank?s.push(Ii(t[n],-1)):s.push(t[n]);const a=ii(s,this.axis);return Mr(a,-1,!1)}))}getConfig(){const e={axis:this.axis},t=super.getConfig();return Object.assign(e,t),e}}function dy(e,t){for(;e<0;)e+=t;return e}py.className="Concatenate",Nr(py);class fy extends iy{constructor(e){super(e),this.axes=e.axes,this.normalize=null!=e.normalize&&e.normalize,this.supportsMasking=!0,this.reshapeRequired=!1}build(e){E(Array.isArray(e)&&2===e.length&&Array.isArray(e[0])&&Array.isArray(e[1]),(()=>"A `Dot` layer should be called on a list of exactly 2 inputs."));const t=e[0],n=e[1];if(t.length>3||n.length>3)throw new Sp("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);if(t[s[0]]!==n[s[1]])throw new Cp(`Dimension incompatibility: ${t[s[0]]} !== ${n[s[1]]}`)}mergeFunction(e){if(2!==e.length)throw new Cp(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${e.length} input(s).`);let t,n=e[0],s=e[1];return t=Array.isArray(this.axes)?this.axes.map(((t,n)=>dy(t,e[n].shape.length))):[dy(this.axes,n.shape.length),dy(this.axes,s.shape.length)],this.normalize&&(n=Wf(n,t[0]),s=Wf(s,t[1])),function(e,t,n){if(e.shape.length>3||t.shape.length>3)throw new Sp("batchDot is not implemented for tensors of 4D or higher rank yet");if(E(e.shape.length>=2,(()=>`batchDot requires the rank of x to be >= 2, but got ${e.shape.length}`)),E(e.shape.length>=2,(()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`)),"number"==typeof n&&(n=[n,n]),"complex64"===e.dtype||"complex64"===t.dtype)throw new Sp("batchDot is not implemented for complex64-type Tensors yet.");const s=e.shape.length,a=t.shape.length;null==n&&(n=[s-1,a-2]);const r=n;return Cr((()=>{let n,i;if(s>a){n=s-a;const e=[];for(let t=0;t<n;++t)e.push(1);t=t.reshape(t.shape.concat(e))}else if(a>s){n=a-s;const t=[];for(let e=0;e<n;++e)t.push(1);e=e.reshape(e.shape.concat(t))}else n=0;if(2===e.shape.length&&2===t.shape.length)i=r[0]===r[1]?e.mul(t).sum(r[0]):e.transpose([1,0]).mul(t).sum(r[1]);else{const n=r[0]!==e.shape.length-1,s=r[1]===t.shape.length-1;i=e.matMul(t,n,s)}if(n>0){let e;e=s>a?s+a-3:s-1;const t=[];for(let s=e;s<e+n;++s)t.push(s);i=i.squeeze(t)}return 1===i.shape.length&&(i=i.expandDims(1)),i}))}(n,s,t)}interpretAxes(e,t){let n;return n=Array.isArray(this.axes)?this.axes:[dy(this.axes,e.length),dy(this.axes,t.length)],n}computeOutputShape(e){E(Array.isArray(e)&&2===e.length&&Array.isArray(e[0])&&Array.isArray(e[1]),(()=>"A `Dot` layer should be called on a list of exactly 2 inputs."));const t=e[0].slice(),n=e[1].slice();if(t.length>3||n.length>3)throw new Sp("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);t.splice(s[0],1),n.splice(s[1],1),n.splice(0,1);const a=t.concat(n);return 1===a.length&&a.push(1),a}computeMask(e,t){return null}getConfig(){const e={axes:this.axes,normalize:this.normalize},t=super.getConfig();return Object.assign(e,t),e}}fy.className="Dot",Nr(fy);class my extends Cf{constructor(e){super(e),this.supportsMasking=!0,this.stddev=e.stddev}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={stddev:this.stddev};return Object.assign(t,e),t}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e);return Pd((()=>Fd(n.shape,0,this.stddev).add(n)),(()=>n),t.training||!1)}))}}my.className="GaussianNoise",Nr(my);class gy extends Cf{constructor(e){super(e),this.supportsMasking=!0,this.rate=e.rate}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return Cr((()=>{this.invokeCallHook(e,t);const n=mf(e);return this.rate>0&&this.rate<1?Pd((()=>{const e=Math.sqrt(this.rate/(1-this.rate));return n.mul(Fd(n.shape,1,e))}),(()=>n),t.training||!1):n}))}}gy.className="GaussianDropout",Nr(gy);class yy extends Cf{constructor(e){super(e),this.supportsMasking=!0,this.rate=e.rate,this.noiseShape=e.noiseShape}_getNoiseShape(e){return this.noiseShape||mf(e).shape}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return Cr((()=>{if(this.rate<1&&this.rate>0){const n=this._getNoiseShape(e);return Pd((()=>{const t=mf(e),s=-1.7580993408473766;let a=Ri(go(n),this.rate);a=Id(a,"float32");const r=((1-this.rate)*(1+this.rate*s**2))**-.5,i=-r*s*this.rate;return t.mul(a).add(a.add(-1).mul(s)).mul(r).add(i)}),(()=>mf(e)),t.training||!1)}return e}))}}function by(e,t,n,s,a,r=.001){let i;if(2===e.rank)i=ni(e,t,n,s,a,r);else if(3===e.rank)i=si(e,t,n,s,a,r);else{if(4!==e.rank)throw new Sp(`batchNormalization is not implemented for array of rank ${e.rank} yet`);i=ai(e,t,n,s,a,r)}return i}yy.className="AlphaDropout",Nr(yy);class xy extends Cf{constructor(e){null==e&&(e={}),super(e),this.supportsMasking=!0,this.axis=null==e.axis?-1:e.axis,this.momentum=null==e.momentum?.99:e.momentum,this.epsilon=null==e.epsilon?.001:e.epsilon,this.center=null==e.center||e.center,this.scale=null==e.scale||e.scale,this.betaInitializer=lf(e.betaInitializer||"zeros"),this.gammaInitializer=lf(e.gammaInitializer||"ones"),this.movingMeanInitializer=lf(e.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=lf(e.movingVarianceInitializer||"ones"),this.betaConstraint=sd(e.betaConstraint),this.gammaConstraint=sd(e.gammaConstraint),this.betaRegularizer=ug(e.betaRegularizer),this.gammaRegularizer=ug(e.gammaRegularizer)}build(e){e=gf(e);const t=this.axis>=0?this.axis:this.axis+e.length,n=e[t];if(null==n)throw new Cp(`Axis ${t} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(e)}.`);this.inputSpec=[new kf({ndim:e.length,axes:{[t]:n}})];const s=[n];this.scale&&(this.gamma=this.addWeight("gamma",s,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",s,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",s,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",s,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(e,t){return Cr((()=>{const n=null!=t.training&&t.training,s=mf(e),a=s.shape,r=a.length,i=Nd(0,r),o=this.axis>=0?this.axis:this.axis+r;i.splice(o,1);const l=Ep(1,r);l[o]=a[o];const u=i.slice();u.sort();const c=!_(u,Nd(0,r).slice(0,r-1));if(!n)return(()=>{if(c){const e=this.movingMean.read().reshape(l),t=this.movingVariance.read().reshape(l),n=this.center?this.beta.read().reshape(l):null,a=this.scale?this.gamma.read().reshape(l):null;return by(s,e,t,n,a,this.epsilon)}return by(s,this.movingMean.read(),this.movingVariance.read(),null==this.beta?null:this.beta.read(),null==this.gamma?null:this.gamma.read(),this.epsilon)})();const[h,p,d]=function(e,t,n,s,a=.001){return _(s.slice().sort(),Nd(0,e.rank-1))?function(e,t,n,s,a=.001){return Cr((()=>{const r=no(e,s),i=r.mean,o=r.variance;return[by(e,i,o,n,t,a),i,o]}))}(e,t,n,s,a):function(e,t,n,s,a=.001){return Cr((()=>{const r=no(e,s),i=r.mean,o=r.variance,l=[];for(const t of Nd(0,e.rank))-1!==s.indexOf(t)?l.push(1):l.push(e.shape[t]);const u=i.reshape(l),c=o.reshape(l),h=null==t?null:t.reshape(l),p=null==n?null:n.reshape(l);return[by(e,u,c,p,h,a),i,o]}))}(e,t,n,s,a)}(s,this.gamma.read(),this.beta.read(),i,this.epsilon),f=(e,t,n)=>{Cr((()=>{const s=1-n,a=e.read(),r=a.sub(t).mul(s);e.write(a.sub(r))}))};return(()=>{f(this.movingMean,p,this.momentum),f(this.movingVariance,d,this.momentum)})(),h}))}getConfig(){const e={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:of(this.betaInitializer),gammaInitializer:of(this.gammaInitializer),movingMeanInitializer:of(this.movingMeanInitializer),movingVarianceInitializer:of(this.movingVarianceInitializer),betaRegularizer:og(this.betaRegularizer),gammaRegularizer:og(this.gammaRegularizer),betaConstraint:td(this.betaConstraint),gammaConstraint:td(this.gammaConstraint)},t=super.getConfig();return Object.assign(e,t),e}}xy.className="BatchNormalization",Nr(xy);class wy extends Cf{constructor(e){if(null==e&&(e={}),super(e),this.axis=null==e.axis?-1:e.axis,"number"==typeof this.axis){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else{if(!Array.isArray(this.axis))throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);for(const e of this.axis)if(!Number.isInteger(e))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}this.epsilon=null==e.epsilon?.001:e.epsilon,this.center=null==e.center||e.center,this.scale=null==e.scale||e.scale,this.betaInitializer=lf(e.betaInitializer||"zeros"),this.gammaInitializer=lf(e.gammaInitializer||"ones"),this.betaRegularizer=ug(e.betaRegularizer),this.gammaRegularizer=ug(e.gammaRegularizer),this.supportsMasking=!0}build(e){const t=(e=gf(e)).length;"number"==typeof this.axis&&(this.axis=[this.axis]);for(let e=0;e<this.axis.length;++e)this.axis[e]<0&&(this.axis[e]+=t);for(const e of this.axis)if(e<0||e>=t)throw new Error(`Invalid axis: ${e}`);if(this.axis.length!==Wp(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const n=this.axis.map((t=>e[t]));this.scale?this.gamma=this.addWeight("gamma",n,"float32",this.gammaInitializer,this.gammaRegularizer,!0):this.gamma=null,this.center?this.beta=this.addWeight("beta",n,"float32",this.betaInitializer,this.betaRegularizer,!0):this.beta=null,this.built=!0}call(e,t){const n=mf(e),s=n.shape,a=s.length;return Cr((()=>{let{mean:e,variance:t}=no(n,this.axis,!0);const r=Ep(1,a);for(const e of this.axis)r[e]=s[e];const i=e=>null!=e&&e.shape.length!==a&&this.axis!==[a-1]?e.reshape(r):e;let o=i(this.gamma.read()),l=i(this.beta.read());const u=[],c=[];for(let e=0;e<a;++e)-1!==this.axis.indexOf(e)?(u.push(s[e]),c.push(1)):(u.push(1),c.push(s[e]));return e=e.tile(u),t=t.tile(u),o=o.tile(c),l=l.tile(c),by(n,e,t,l,o,this.epsilon)}))}getConfig(){const e={axis:this.axis,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:of(this.betaInitializer),gammaInitializer:of(this.gammaInitializer),betaRegularizer:og(this.betaRegularizer),gammaRegularizer:og(this.gammaRegularizer)},t=super.getConfig();return Object.assign(e,t),e}}wy.className="LayerNormalization",Nr(wy);class ky extends Cf{constructor(e){if(null==e&&(e={}),super(e),this.dataFormat=null==e.dataFormat?"channelsLast":e.dataFormat,null==e.padding)this.padding=[[1,1],[1,1]];else if("number"==typeof e.padding)this.padding=[[e.padding,e.padding],[e.padding,e.padding]];else{if(e.padding=e.padding,2!==e.padding.length)throw new Cp(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${e.padding.length} array.`);let t,n;if("number"==typeof e.padding[0])t=[e.padding[0],e.padding[0]],n=[e.padding[1],e.padding[1]];else{if(e.padding=e.padding,2!==e.padding[0].length)throw new Cp(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${e.padding[0].length} array.`);if(t=e.padding[0],2!==e.padding[1].length)throw new Cp(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${e.padding[1].length} array.`);n=e.padding[1]}this.padding=[t,n]}this.inputSpec=[new kf({ndim:4})]}computeOutputShape(e){let t,n;return e=gf(e),"channelsFirst"===this.dataFormat?(t=null!=e[2]&&e[2]>=0?e[2]+this.padding[0][0]+this.padding[0][1]:null,n=null!=e[3]&&e[3]>=0?e[3]+this.padding[1][0]+this.padding[1][1]:null,[e[0],e[1],t,n]):(t=null!=e[1]&&e[1]>=0?e[1]+this.padding[0][0]+this.padding[0][1]:null,n=null!=e[2]&&e[2]>=0?e[2]+this.padding[1][0]+this.padding[1][1]:null,[e[0],t,n,e[3]])}call(e,t){return Cr((()=>{return t=mf(e),n=this.padding,s=this.dataFormat,Cr((()=>{if(4!==t.rank)throw new Cp(`temporalPadding expects input tensor to be 4-D, but received a ${t.rank}-D tensor.`);if(null==n&&(n=[[1,1],[1,1]]),2!==n.length||2!==n[0].length||2!==n[1].length)throw new Cp("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(null==s&&(s="channelsLast"),"channelsLast"!==s&&"channelsFirst"!==s)throw new Cp(`Unknown data format: ${s}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let e;return e="channelsFirst"===s?[[0,0],[0,0],n[0],n[1]]:[[0,0],n[0],n[1],[0,0]],uo(t,e)}));var t,n,s}))}getConfig(){const e={padding:this.padding,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}function vy(e,t,n,s,a,r){return Cr((()=>{let i;cd(a),pd(r),hd(s),null==n&&(n=[1,1]),null==s&&(s="valid"),null==a&&(a="channelsLast"),null==r&&(r="max"),e=xg(e,a);const o="same"===s?"same":"valid";return i="max"===r?Wi(e,t,n,o):Qr(e,t,n,o),"channelsFirst"===a&&(i=Wo(i,[0,3,1,2])),i}))}function Ny(e,t,n,s,a,r){return Cr((()=>{let i;cd(a),pd(r),hd(s),null==n&&(n=[1,1,1]),null==s&&(s="valid"),null==a&&(a="channelsLast"),null==r&&(r="max"),e=wg(e,a);const o="same"===s?"same":"valid";return i="max"===r?Vi(e,t,n,o):ei(e,t,n,o),"channelsFirst"===a&&(i=Wo(i,[0,4,1,2,3])),i}))}ky.className="ZeroPadding2D",Nr(ky);class Iy extends Cf{constructor(e){if(null==e.poolSize&&(e.poolSize=2),super(e),"number"==typeof e.poolSize)this.poolSize=[e.poolSize];else{if(!Array.isArray(e.poolSize)||1!==e.poolSize.length||"number"!=typeof e.poolSize[0])throw new Cp(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.poolSize)}`);this.poolSize=e.poolSize}if(Hp(this.poolSize,"poolSize"),null==e.strides)this.strides=this.poolSize;else if("number"==typeof e.strides)this.strides=[e.strides];else{if(!Array.isArray(e.strides)||1!==e.strides.length||"number"!=typeof e.strides[0])throw new Cp(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.strides)}`);this.strides=e.strides}Hp(this.strides,"strides"),this.padding=null==e.padding?"valid":e.padding,hd(this.padding),this.inputSpec=[new kf({ndim:3})]}computeOutputShape(e){const t=yg((e=gf(e))[1],this.poolSize[0],this.padding,this.strides[0]);return[e[0],t,e[2]]}call(e,t){return Cr((()=>{this.invokeCallHook(e,t),e=$d(mf(e),2);const n=this.poolingFunction(mf(e),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return Ro(n,[2])}))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides},t=super.getConfig();return Object.assign(e,t),e}}class $y extends Iy{constructor(e){super(e)}poolingFunction(e,t,n,s,a){return cd(a),hd(s),vy(e,t,n,s,a,"max")}}$y.className="MaxPooling1D",Nr($y);class Cy extends Iy{constructor(e){super(e)}poolingFunction(e,t,n,s,a){return cd(a),hd(s),vy(e,t,n,s,a,"avg")}}Cy.className="AveragePooling1D",Nr(Cy);class Sy extends Cf{constructor(e){if(null==e.poolSize&&(e.poolSize=[2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize],null==e.strides)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(2!==e.strides.length)throw new Cp(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides];Hp(this.poolSize,"poolSize"),Hp(this.strides,"strides"),this.padding=null==e.padding?"valid":e.padding,this.dataFormat=null==e.dataFormat?"channelsLast":e.dataFormat,cd(this.dataFormat),hd(this.padding),this.inputSpec=[new kf({ndim:4})]}computeOutputShape(e){e=gf(e);let t="channelsFirst"===this.dataFormat?e[2]:e[1],n="channelsFirst"===this.dataFormat?e[3]:e[2];return t=yg(t,this.poolSize[0],this.padding,this.strides[0]),n=yg(n,this.poolSize[1],this.padding,this.strides[1]),"channelsFirst"===this.dataFormat?[e[0],e[1],t,n]:[e[0],t,n,e[3]]}call(e,t){return Cr((()=>(this.invokeCallHook(e,t),this.poolingFunction(mf(e),this.poolSize,this.strides,this.padding,this.dataFormat))))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class Ty extends Sy{constructor(e){super(e)}poolingFunction(e,t,n,s,a){return cd(a),hd(s),vy(e,t,n,s,a,"max")}}Ty.className="MaxPooling2D",Nr(Ty);class Ey extends Sy{constructor(e){super(e)}poolingFunction(e,t,n,s,a){return cd(a),hd(s),vy(e,t,n,s,a,"avg")}}Ey.className="AveragePooling2D",Nr(Ey);class Ay extends Cf{constructor(e){if(null==e.poolSize&&(e.poolSize=[2,2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize,e.poolSize],null==e.strides)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(3!==e.strides.length)throw new Cp(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides,e.strides];Hp(this.poolSize,"poolSize"),Hp(this.strides,"strides"),this.padding=null==e.padding?"valid":e.padding,this.dataFormat=null==e.dataFormat?"channelsLast":e.dataFormat,cd(this.dataFormat),hd(this.padding),this.inputSpec=[new kf({ndim:5})]}computeOutputShape(e){e=gf(e);let t="channelsFirst"===this.dataFormat?e[2]:e[1],n="channelsFirst"===this.dataFormat?e[3]:e[2],s="channelsFirst"===this.dataFormat?e[4]:e[3];return t=yg(t,this.poolSize[0],this.padding,this.strides[0]),n=yg(n,this.poolSize[1],this.padding,this.strides[1]),s=yg(s,this.poolSize[2],this.padding,this.strides[2]),"channelsFirst"===this.dataFormat?[e[0],e[1],t,n,s]:[e[0],t,n,s,e[4]]}call(e,t){return Cr((()=>(this.invokeCallHook(e,t),this.poolingFunction(mf(e),this.poolSize,this.strides,this.padding,this.dataFormat))))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class Ry extends Ay{constructor(e){super(e)}poolingFunction(e,t,n,s,a){return cd(a),hd(s),Ny(e,t,n,s,a,"max")}}Ry.className="MaxPooling3D",Nr(Ry);class Fy extends Ay{constructor(e){super(e)}poolingFunction(e,t,n,s,a){return cd(a),hd(s),Ny(e,t,n,s,a,"avg")}}Fy.className="AveragePooling3D",Nr(Fy);class Dy extends Cf{constructor(e){super(e),this.inputSpec=[new kf({ndim:3})]}computeOutputShape(e){return[e[0],e[2]]}call(e,t){throw new Sp}}class _y extends Dy{constructor(e){super(e||{})}call(e,t){return Cr((()=>{const t=mf(e);return Gi(t,1)}))}}_y.className="GlobalAveragePooling1D",Nr(_y);class Oy extends Dy{constructor(e){super(e||{})}call(e,t){return Cr((()=>{const t=mf(e);return Oi(t,1)}))}}Oy.className="GlobalMaxPooling1D",Nr(Oy);class My extends Cf{constructor(e){super(e),this.dataFormat=null==e.dataFormat?"channelsLast":e.dataFormat,cd(this.dataFormat),this.inputSpec=[new kf({ndim:4})]}computeOutputShape(e){return e=e,"channelsLast"===this.dataFormat?[e[0],e[3]]:[e[0],e[1]]}call(e,t){throw new Sp}getConfig(){const e={dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class Ly extends My{call(e,t){return Cr((()=>{const t=mf(e);return"channelsLast"===this.dataFormat?Gi(t,[1,2]):Gi(t,[2,3])}))}}Ly.className="GlobalAveragePooling2D",Nr(Ly);class zy extends My{call(e,t){return Cr((()=>{const t=mf(e);return"channelsLast"===this.dataFormat?Oi(t,[1,2]):Oi(t,[2,3])}))}}zy.className="GlobalMaxPooling2D",Nr(zy);class Py extends Cf{constructor(e){super(e),this.layer=e.layer}build(e){this.built=!0}get trainable(){return null!=this.layer&&this.layer.trainable}set trainable(e){null!=this.layer&&(this.layer.trainable=e)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(e){this.layer.setWeights(e)}getConfig(){const e={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},t=super.getConfig();return Object.assign(e,t),e}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),null!=this.layer&&this.layer.setFastWeightInitDuringBuild(e)}static fromConfig(e,t,n={}){const s=Bf(t.layer,n);delete t.layer;const a={layer:s};return Object.assign(a,t),new e(a)}}class By extends Py{constructor(e){super(e),this.supportsMasking=!0}build(e){if((e=gf(e)).length<3)throw new Cp(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(e)}`);this.inputSpec=[{shape:e}];const t=[e[0]].concat(e.slice(2));this.layer.built||(this.layer.build(t),this.layer.built=!0),super.build(e)}computeOutputShape(e){const t=[(e=gf(e))[0]].concat(e.slice(2)),n=this.layer.computeOutputShape(t),s=e[1];return[n[0],s].concat(n.slice(1))}call(e,t){return Cr((()=>_g(((e,n)=>[mf(this.layer.call(e,t)),[]]),e=mf(e),[],!1,null,null,!1,!0)[1]))}}By.className="TimeDistributed",Nr(By);class Wy extends Py{constructor(e){super(e);const t=e.layer.getConfig(),n={};n.className=e.layer.getClassName(),n.config=t,this.forwardLayer=Bf(n),t.goBackwards=!0!==t.goBackwards;const s={};var a;if(s.className=e.layer.getClassName(),s.config=t,this.backwardLayer=Bf(s),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=void 0===e.mergeMode?"concat":e.mergeMode,a=this.mergeMode,Up(ld,"BidirectionalMergeMode",a),e.weights)throw new Sp("weights support is not implemented for Bidirectional layer yet.");this._stateful=e.layer.stateful,this.returnSequences=e.layer.returnSequences,this.returnState=e.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=e.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(e){this._trainable=e,null!=this.forwardLayer&&(this.forwardLayer.trainable=e),null!=this.backwardLayer&&(this.backwardLayer.trainable=e)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(e){const t=e.length,n=Math.floor(t/2);this.forwardLayer.setWeights(e.slice(0,n)),this.backwardLayer.setWeights(e.slice(n))}computeOutputShape(e){let t,n,s,a=this.forwardLayer.computeOutputShape(e);return Array.isArray(a)&&Array.isArray(a[0])||(a=[a]),a=a,this.returnState?(s=a.slice(1),t=a[0]):t=a[0],t=t,"concat"===this.mergeMode?(t[t.length-1]*=2,n=[t]):n=null==this.mergeMode?[t,t.slice()]:[t],this.returnState?null==this.mergeMode?n.concat(s).concat(s.slice()):[t].concat(s).concat(s.slice()):Fp(n)}apply(e,t){let n=null==t?null:t.initialState,s=null==t?null:t.constants;null==t&&(t={});const a=Dg(e,n,s,this.numConstants);if(e=a.inputs,n=a.initialState,s=a.constants,Array.isArray(e)&&(n=e.slice(1),e=e[0]),(null==n||0===n.length)&&null==s)return super.apply(e,t);const r=[],i=[];if(null!=n){const e=n.length;if(e%2>0)throw new Cp("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");t.initialState=n,r.push(...n);const s=n.map((e=>new kf({shape:e.shape})));this.forwardLayer.stateSpec=s.slice(0,e/2),this.backwardLayer.stateSpec=s.slice(e/2),i.push(...s)}if(null!=s)throw new Sp("Support for constants in Bidirectional layers is not implemented yet.");const o=r[0]instanceof vf;for(const e of r)if(e instanceof vf!==o)throw new Cp("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(o){const n=[e].concat(r),s=this.inputSpec.concat(i),a=this.inputSpec;this.inputSpec=s;const o=super.apply(n,t);return this.inputSpec=a,o}return super.apply(e,t)}call(e,t){return Cr((()=>{const n=t.initialState;let s,a,r,i;if(null==n)s=this.forwardLayer.call(e,t),a=this.backwardLayer.call(e,t);else{const r=n.slice(0,n.length/2),i=n.slice(n.length/2);s=this.forwardLayer.call(e,Object.assign(t,{initialState:r})),a=this.backwardLayer.call(e,Object.assign(t,{initialState:i}))}return this.returnState&&(Array.isArray(s)&&(r=s.slice(1).concat(a.slice(1))),s=s[0],a=a[0]),this.returnSequences&&(a=bo(a,1)),"concat"===this.mergeMode?i=Ed([s,a]):"sum"===this.mergeMode?i=Or(s,a):"ave"===this.mergeMode?i=Mi(.5,Or(s,a)):"mul"===this.mergeMode?i=Mi(s,a):null==this.mergeMode&&(i=[s,a]),this.returnState?null==this.mergeMode?i.concat(r):[i].concat(r):i}))}resetStates(e){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(e){fd(this.forwardLayer.name,(()=>{this.forwardLayer.build(e)})),fd(this.backwardLayer.name,(()=>{this.backwardLayer.build(e)})),this.built=!0}computeMask(e,t){let n;if(Array.isArray(t)&&(t=t[0]),n=this.returnSequences?null==this.mergeMode?[t,t]:t:null==this.mergeMode?[null,null]:null,this.returnState){const e=this.forwardLayer.states.map((e=>null));return Array.isArray(n)?n.concat(e).concat(e):[n].concat(e).concat(e)}return n}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),null!=this.forwardLayer&&this.forwardLayer.setFastWeightInitDuringBuild(e),null!=this.backwardLayer&&this.backwardLayer.setFastWeightInitDuringBuild(e)}getConfig(){const e={mergeMode:this.mergeMode},t=super.getConfig();return Object.assign(e,t),e}static fromConfig(e,t){const n=Bf(t.layer);if(delete t.layer,null!=t.numConstants)throw new Sp("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const s=t;return s.layer=n,new e(s)}}Wy.className="Bidirectional",Nr(Wy),function(e){e[e.DT_INVALID=0]="DT_INVALID",e[e.DT_FLOAT=1]="DT_FLOAT",e[e.DT_DOUBLE=2]="DT_DOUBLE",e[e.DT_INT32=3]="DT_INT32",e[e.DT_UINT8=4]="DT_UINT8",e[e.DT_INT16=5]="DT_INT16",e[e.DT_INT8=6]="DT_INT8",e[e.DT_STRING=7]="DT_STRING",e[e.DT_COMPLEX64=8]="DT_COMPLEX64",e[e.DT_INT64=9]="DT_INT64",e[e.DT_BOOL=10]="DT_BOOL",e[e.DT_QINT8=11]="DT_QINT8",e[e.DT_QUINT8=12]="DT_QUINT8",e[e.DT_QINT32=13]="DT_QINT32",e[e.DT_BFLOAT16=14]="DT_BFLOAT16",e[e.DT_FLOAT_REF=101]="DT_FLOAT_REF",e[e.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",e[e.DT_INT32_REF=103]="DT_INT32_REF",e[e.DT_UINT8_REF=104]="DT_UINT8_REF",e[e.DT_INT16_REF=105]="DT_INT16_REF",e[e.DT_INT8_REF=106]="DT_INT8_REF",e[e.DT_STRING_REF=107]="DT_STRING_REF",e[e.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",e[e.DT_INT64_REF=109]="DT_INT64_REF",e[e.DT_BOOL_REF=110]="DT_BOOL_REF",e[e.DT_QINT8_REF=111]="DT_QINT8_REF",e[e.DT_QUINT8_REF=112]="DT_QUINT8_REF",e[e.DT_QINT32_REF=113]="DT_QINT32_REF",e[e.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"}(Hg||(Hg={})),function(e){let t;!function(e){e[e.LEGACY=0]="LEGACY",e[e.V1=1]="V1",e[e.V2=2]="V2"}(t=e.CheckpointFormatVersion||(e.CheckpointFormatVersion={}))}(jg||(jg={}));const Vy={};function Uy(e){return Vy[e]}function Gy(e,t,n,s,a){const r=t.inputParams[e];if(r&&void 0!==r.inputIndexStart){const e=r.inputIndexStart,i=0===r.inputIndexEnd?void 0:void 0===r.inputIndexEnd?e+1:r.inputIndexEnd;if("tensor"===r.type)return Hy(t.inputNames[r.inputIndexStart],n,s,a);if("tensors"===r.type)return t.inputNames.slice(e,i).map((e=>Hy(e,n,s,a)));const o=Hy(t.inputNames.slice(e)[0],n,s,a),l=o.dataSync();return"number"===r.type?l[0]:ee(o.shape,l)}const i=t.attrParams[e];return i&&i.value}function Hy(e,t,n,s){const[a,r]=qy(e);if(null!=s){const e=s.getHashTableHandleByName(a);if(null!=e)return e}const i=n.currentContextIds.find((e=>!!t[Ky(a,e)]));return void 0!==i?t[Ky(a,i)][r]:void 0}function jy(e,t){const[n,s]=qy(e);return[Ky(n,t&&t.currentContextId),s]}function Ky(e,t){return t?`${e}-${t}`:e}function qy(e){const t=e.split(":");return 1===t.length?[e,0]:[t[0],Number(t[t.length-1])]}function Xy(e,t,n){let s=Gy("pad",e,t,n);if("explicit"===s){s=Gy("explicitPaddings",e,t,n);const a=[[0,0],[0,0],[0,0],[0,0]];for(let e=0;e<4;e++)a[e][0]=s[2*e],a[e][1]=s[2*e+1];return a}return s}function Yy(e){return e.kept?e:Ka(e)}const Jy=[{tfOpName:"Add",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"AddV2",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"AddN",category:"arithmetic",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}]},{tfOpName:"BiasAdd",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"Sub",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"RealDiv",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Div",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"DivNoNan",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"FloorDiv",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Mul",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Maximum",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Minimum",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Pow",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"SquaredDifference",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Mod",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"FloorMod",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],Zy=[{tfOpName:"Abs",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Acos",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Asin",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Atan",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Atan2",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"y",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Ceil",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ClipByValue",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"clipValueMin",type:"number"},{start:2,name:"clipValueMax",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Complex",category:"basic_math",inputs:[{start:0,name:"real",type:"tensor"},{start:1,name:"imag",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ComplexAbs",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Cos",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Cosh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Elu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Exp",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Floor",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Log",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Imag",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"Tout",name:"outputType",type:"dtype",notSupported:!0}]},{tfOpName:"Neg",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Real",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"Tout",name:"outputType",type:"dtype",notSupported:!0}]},{tfOpName:"Prelu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"alpha",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Relu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Relu6",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Selu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sigmoid",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sin",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sinh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sqrt",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Rsqrt",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Square",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Tan",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Tanh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sign",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Round",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Expm1",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Log1p",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Reciprocal",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Softplus",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Asinh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Acosh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Atanh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Erf",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Prod",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axes",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool",notSupported:!0},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LeakyRelu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"alpha",name:"alpha",type:"number",defaultValue:.2},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],Qy=[{tfOpName:"EmptyTensorList",category:"control",inputs:[{start:0,name:"elementShape",type:"shape"},{start:1,name:"maxNumElements",type:"number"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"LoopCond",category:"control",inputs:[{start:0,name:"pred",type:"tensor"}]},{tfOpName:"Switch",category:"control",inputs:[{start:0,name:"data",type:"tensor"},{start:1,name:"pred",type:"tensor"}]},{tfOpName:"Merge",category:"control",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}]},{tfOpName:"Enter",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"frame_name",name:"frameName",type:"string"},{tfName:"is_constant",name:"isConstant",type:"bool"}]},{tfOpName:"Exit",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"NextIteration",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"TensorArrayV3",category:"control",inputs:[{start:0,name:"size",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"element_shape",name:"elementShape",type:"shape"},{tfName:"dynamic_size",name:"dynamicSize",type:"bool"},{tfName:"clear_after_read",name:"clearAfterRead",type:"bool"},{tfName:"identical_element_shapes",name:"identicalElementShapes",type:"bool"},{tfName:"tensor_array_name",name:"name",type:"string"}]},{tfOpName:"TensorArrayWriteV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"tensor",type:"tensor"},{start:3,name:"flowIn",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"TensorArrayReadV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"flowIn",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"TensorArrayGatherV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"flowIn",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"element_shape",name:"elementShape",type:"shape"}]},{tfOpName:"TensorArrayScatterV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"tensor",type:"tensor"},{start:3,name:"flowIn",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"TensorArrayConcatV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"flowIn",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"element_shape_except0",name:"elementShapeExcept0",type:"shape",notSupported:!0}]},{tfOpName:"TensorArraySplitV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"tensor",type:"tensor"},{start:2,name:"lengths",type:"number[]"},{start:3,name:"flowIn",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"TensorArraySizeV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"flowIn",type:"number"}]},{tfOpName:"TensorArrayCloseV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"}]},{tfOpName:"StatelessIf",category:"control",inputs:[{start:0,name:"cond",type:"tensor"},{start:1,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"then_branch",name:"thenBranch",type:"func"},{tfName:"else_branch",name:"elseBranch",type:"func"}]},{tfOpName:"If",category:"control",inputs:[{start:0,name:"cond",type:"tensor"},{start:1,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"then_branch",name:"thenBranch",type:"func"},{tfName:"else_branch",name:"elseBranch",type:"func"}]},{tfOpName:"StatelessWhile",category:"control",inputs:[{start:0,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"cond",name:"cond",type:"func"},{tfName:"body",name:"body",type:"func"}]},{tfOpName:"While",category:"control",inputs:[{start:0,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"cond",name:"cond",type:"func"},{tfName:"body",name:"body",type:"func"}]},{tfOpName:"TensorListScatter",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListScatterV2",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"elementShape",type:"shape"},{start:3,name:"numElements",type:"number"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListGather",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListGetItem",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListSetItem",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"tensor",type:"tensor"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListReserve",category:"control",inputs:[{start:0,name:"elementShape",type:"shape"},{start:1,name:"numElements",type:"number"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListFromTensor",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListStack",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"},{tfName:"num_elements",name:"numElements",type:"dtype"}]},{tfOpName:"TensorListSplit",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"elementShape",type:"shape"},{start:2,name:"lengths",type:"number[]"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListConcat",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"}],attrs:[{tfName:"element_shape",name:"elementShape",type:"shape"},{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListPopBack",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListPushBack",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"tensor",type:"tensor"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]}],eb=[{tfOpName:"AvgPool",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MaxPool",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[],notSupported:!0},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MaxPoolWithArgmax",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"include_batch_in_index",name:"includeBatchInIndex",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"AvgPool3D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MaxPool3D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Conv1D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"stride",name:"stride",type:"number"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NWC"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"dilation",name:"dilation",type:"number",defaultValue:1}]},{tfOpName:"Conv2D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"useCudnnOnGpu",name:"useCudnnOnGpu",type:"bool"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"_FusedConv2D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"},{start:2,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"num_args",name:"numArgs",type:"number"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"use_cudnn_on_gpu",name:"useCudnnOnGpu",type:"bool",defaultValue:!0},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"dilations",name:"dilations",type:"number[]",defaultValue:[1,1,1,1]},{tfName:"fused_ops",name:"fusedOps",type:"string[]",defaultValue:[]},{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:1e-4},{tfName:"leakyrelu_alpha",name:"leakyreluAlpha",type:"number"}]},{tfOpName:"Conv2DBackpropInput",category:"convolution",inputs:[{start:2,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"},{start:0,name:"outputShape",type:"number[]"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]",notSupported:!0}]},{tfOpName:"DepthwiseConv2d",category:"convolution",inputs:[{start:0,name:"input",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"DepthwiseConv2dNative",category:"convolution",inputs:[{start:0,name:"input",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"FusedDepthwiseConv2dNative",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"},{start:2,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"num_args",name:"numArgs",type:"number"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"dilations",name:"dilations",type:"number[]",defaultValue:[1,1,1,1]},{tfName:"fused_ops",name:"fusedOps",type:"string[]",defaultValue:[]},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]}]},{tfOpName:"Conv3D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"Dilation2D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"rates",name:"dilations",type:"number[]"},{tfName:"padding",name:"pad",type:"string"}]}],tb=[{tfOpName:"Fill",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"},{start:1,name:"value",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"LinSpace",category:"creation",inputs:[{start:0,name:"start",type:"number"},{start:1,name:"stop",type:"number"},{start:2,name:"num",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"OneHot",category:"creation",inputs:[{start:0,name:"indices",type:"tensor"},{start:1,name:"depth",type:"number"},{start:2,name:"onValue",type:"number",defaultValue:1},{start:3,name:"offValue",type:"number",defaultValue:0}],attrs:[{tfName:"axis",name:"axis",type:"number",notSupported:!0},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Ones",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"OnesLike",category:"creation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"}]},{tfOpName:"RandomUniform",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"minval",name:"minval",type:"number",defaultValue:0},{tfName:"maxval",name:"maxval",type:"number",defaultValue:1},{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"seed",name:"seed",type:"number",defaultValue:0},{tfName:"seed2",name:"seed2",type:"number",defaultValue:0,notSupported:!0},{tfName:"T",name:"T",type:"number",notSupported:!0}]},{tfOpName:"Range",category:"creation",inputs:[{start:0,name:"start",type:"number"},{start:1,name:"stop",type:"number"},{start:2,name:"step",type:"number",defaultValue:0}],attrs:[{tfName:"Tidx",name:"dtype",type:"dtype"}]},{tfOpName:"TruncatedNormal",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"means",name:"mean",type:"number",defaultValue:0},{tfName:"stddev",name:"stdDev",type:"number",defaultValue:1},{tfName:"seed",name:"seed",type:"number"},{tfName:"seed2",name:"seed2",type:"number",defaultValue:0,notSupported:!0},{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"T",name:"T",type:"number",notSupported:!0}]},{tfOpName:"Zeros",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"ZerosLike",category:"creation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"Multinomial",category:"creation",inputs:[{start:0,name:"logits",type:"tensor"},{start:1,name:"numSamples",type:"number"}],attrs:[{tfName:"seed",name:"seed",type:"number"},{tfName:"seed2",name:"seed2",type:"number"},{tfName:"T",name:"dtype",type:"dtype"},{tfName:"output_dtype",name:"output_dtype",type:"dtype"}]}],nb=[{tfOpName:"NonMaxSuppressionV2",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"}]},{tfOpName:"NonMaxSuppressionV3",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"},{start:4,name:"scoreThreshold",type:"number"}]},{tfOpName:"NonMaxSuppressionV4",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"},{start:4,name:"scoreThreshold",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"T_threshold",name:"threshold",type:"dtype",notSupported:!0},{tfName:"pad_to_max_output_size",name:"padToMaxOutputSize",type:"bool"}]},{tfOpName:"NonMaxSuppressionV5",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"},{start:4,name:"scoreThreshold",type:"number"},{start:5,name:"softNmsSigma",type:"number"}]},{tfOpName:"Where",category:"dynamic",inputs:[{start:0,name:"condition",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ListDiff",category:"dynamic",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"y",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],sb=[{tfOpName:"TopKV2",category:"evaluation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"k",type:"number"}],attrs:[{tfName:"sorted",name:"sorted",type:"bool"}]},{tfOpName:"Unique",category:"evaluation",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"UniqueV2",category:"evaluation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]}],ab=[{tfOpName:"PlaceholderWithDefault",category:"graph",inputs:[{start:0,name:"default",type:"tensor"}],attrs:[{tfName:"shape",name:"shape",type:"shape"},{tfName:"dtype",name:"dtype",type:"dtype"}]},{tfOpName:"Placeholder",category:"graph",attrs:[{tfName:"shape",name:"shape",type:"shape"},{tfName:"dtype",name:"dtype",type:"dtype"}]},{tfOpName:"Const",category:"graph"},{tfOpName:"Identity",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"IdentityN",category:"graph",inputs:[{start:0,end:0,name:"x",type:"tensors"}]},{tfOpName:"Snapshot",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"Rank",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"Size",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"Shape",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"ShapeN",category:"graph",inputs:[{start:0,end:0,name:"x",type:"tensors"}]},{tfOpName:"Print",category:"graph",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"data",type:"tensors"}],attrs:[{tfName:"message",name:"message",type:"string"},{tfName:"first_n",name:"firstN",type:"number",notSupported:!0},{tfName:"summarize",name:"summarize",type:"number",defaultValue:3}]},{tfOpName:"NoOp",category:"graph",inputs:[]},{tfOpName:"StopGradient",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"FakeQuantWithMinMaxVars",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"min",name:"min",type:"number"},{tfName:"max",name:"max",type:"number"}]}],rb=[{tfOpName:"HashTable",category:"hash_table",inputs:[],attrs:[{tfName:"shared_name",name:"sharedName",type:"string"},{tfName:"use_node_name_sharing",name:"useNodeNameSharing",type:"bool"},{tfName:"key_dtype",name:"keyDType",type:"dtype"},{tfName:"value_dtype",name:"valueDType",type:"dtype"}]},{tfOpName:"HashTableV2",category:"hash_table",inputs:[],attrs:[{tfName:"shared_name",name:"sharedName",type:"string"},{tfName:"use_node_name_sharing",name:"useNodeNameSharing",type:"bool"},{tfName:"key_dtype",name:"keyDType",type:"dtype"},{tfName:"value_dtype",name:"valueDType",type:"dtype"}]},{tfOpName:"LookupTableImport",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"values",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableImportV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"values",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableFind",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableFindV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableSize",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"}]},{tfOpName:"LookupTableSizeV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"}]}],ib=[{tfOpName:"ResizeBilinear",category:"image",inputs:[{start:0,name:"images",type:"tensor"},{start:1,name:"size",type:"number[]"}],attrs:[{tfName:"align_corners",name:"alignCorners",type:"bool"},{tfName:"half_pixel_centers",name:"halfPixelCenters",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ResizeNearestNeighbor",category:"image",inputs:[{start:0,name:"images",type:"tensor"},{start:1,name:"size",type:"number[]"}],attrs:[{tfName:"align_corners",name:"alignCorners",type:"bool"},{tfName:"half_pixel_centers",name:"halfPixelCenters",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"CropAndResize",category:"image",inputs:[{start:0,name:"image",type:"tensor"},{start:1,name:"boxes",type:"tensor"},{start:2,name:"boxInd",type:"tensor"},{start:3,name:"cropSize",type:"number[]"}],attrs:[{tfName:"method",name:"method",type:"string"},{tfName:"extrapolation_value",name:"extrapolationValue",type:"number"}]}],ob=[{tfOpName:"Equal",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"NotEqual",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Greater",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"GreaterEqual",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Less",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LessEqual",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LogicalAnd",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LogicalNot",category:"logical",inputs:[{start:0,name:"a",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LogicalOr",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Select",category:"logical",inputs:[{start:0,name:"condition",type:"tensor"},{start:1,name:"a",type:"tensor"},{start:2,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"SelectV2",category:"logical",inputs:[{start:0,name:"condition",type:"tensor"},{start:1,name:"a",type:"tensor"},{start:2,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],lb=[{tfOpName:"_FusedMatMul",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"},{start:2,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"num_args",name:"numArgs",type:"number"},{tfName:"fused_ops",name:"fusedOps",type:"string[]",defaultValue:[]},{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:1e-4},{tfName:"transpose_a",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"transpose_b",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MatMul",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"transpose_a",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"transpose_b",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"BatchMatMul",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"adj_x",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"adj_y",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"BatchMatMulV2",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"adj_x",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"adj_y",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Transpose",category:"matrices",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"perm",type:"number[]"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],ub=[{tfOpName:"FusedBatchNorm",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"scale",type:"tensor"},{start:2,name:"offset",type:"tensor"},{start:3,name:"mean",type:"tensor"},{start:4,name:"variance",type:"tensor"}],attrs:[{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:.001},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"FusedBatchNormV2",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"scale",type:"tensor"},{start:2,name:"offset",type:"tensor"},{start:3,name:"mean",type:"tensor"},{start:4,name:"variance",type:"tensor"}],attrs:[{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:.001},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"FusedBatchNormV3",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"scale",type:"tensor"},{start:2,name:"offset",type:"tensor"},{start:3,name:"mean",type:"tensor"},{start:4,name:"variance",type:"tensor"}],attrs:[{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:.001},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"LRN",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"depth_radius",name:"radius",type:"number",defaultValue:5},{tfName:"bias",name:"bias",type:"number",defaultValue:1},{tfName:"alpha",name:"alpha",type:"number",defaultValue:1},{tfName:"beta",name:"beta",type:"number",defaultValue:.5}]},{tfOpName:"Softmax",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"LogSoftmax",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"SparseToDense",category:"normalization",inputs:[{start:0,name:"sparseIndices",type:"tensor"},{start:1,name:"outputShape",type:"number[]"},{start:2,name:"sparseValues",type:"tensor"},{start:3,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"validate_indices",name:"validateIndices",type:"bool",defaultValue:!0,notSupported:!0}]}],cb=[{tfOpName:"Bincount",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"size",type:"number"},{start:2,name:"weights",type:"tensor"}]},{tfOpName:"DenseBincount",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"size",type:"number"},{start:2,name:"weights",type:"tensor"}],attrs:[{tfName:"binary_output",name:"binaryOutput",type:"bool"}]},{tfOpName:"Max",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Mean",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Min",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Sum",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"All",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Any",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"ArgMax",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]},{tfOpName:"ArgMin",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]},{tfOpName:"Prod",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Cumsum",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}],attrs:[{tfName:"exclusive",name:"exclusive",type:"bool"},{tfName:"reverse",name:"reverse",type:"bool"}]}],hb=[{tfOpName:"ConcatV2",category:"slice_join",inputs:[{start:0,end:-1,name:"tensors",type:"tensors"},{start:-1,name:"axis",type:"number"}],attrs:[{tfName:"N",name:"n",type:"number",defaultValue:2}]},{tfOpName:"Concat",category:"slice_join",inputs:[{start:1,end:0,name:"tensors",type:"tensors"},{start:0,name:"axis",type:"number"}],attrs:[{tfName:"N",name:"n",type:"number",defaultValue:2}]},{tfOpName:"GatherV2",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"indices",type:"tensor"},{start:2,name:"axis",type:"number",defaultValue:0}],attrs:[{tfName:"batch_dims",name:"batchDims",type:"number",defaultValue:0}]},{tfOpName:"Gather",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"indices",type:"tensor"}],attrs:[{tfName:"validate_indices",name:"validateIndices",type:"bool",notSupported:!0}]},{tfOpName:"Reverse",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"dims",type:"bool[]"}]},{tfOpName:"ReverseV2",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}]},{tfOpName:"Slice",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"begin",type:"number[]"},{start:2,name:"size",type:"number[]"}]},{tfOpName:"StridedSlice",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"begin",type:"number[]"},{start:2,name:"end",type:"number[]"},{start:3,name:"strides",type:"number[]"}],attrs:[{tfName:"begin_mask",name:"beginMask",type:"number",defaultValue:0},{tfName:"end_mask",name:"endMask",type:"number",defaultValue:0},{tfName:"new_axis_mask",name:"newAxisMask",type:"number",defaultValue:0},{tfName:"ellipsis_mask",name:"ellipsisMask",type:"number",defaultValue:0},{tfName:"shrink_axis_mask",name:"shrinkAxisMask",type:"number",defaultValue:0}]},{tfOpName:"Pack",category:"slice_join",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}],attrs:[{tfName:"axis",name:"axis",type:"number",defaultValue:0}]},{tfOpName:"Unpack",category:"slice_join",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"axis",name:"axis",type:"number",defaultValue:0},{tfName:"num",name:"num",type:"number",defaultValue:0,notSupported:!0}]},{tfOpName:"Tile",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"reps",type:"number[]"}]},{tfOpName:"Split",category:"slice_join",inputs:[{start:0,name:"axis",type:"number",defaultValue:0},{start:1,name:"x",type:"tensor"}],attrs:[{tfName:"num_split",name:"numOrSizeSplits",type:"number",defaultValue:1}]},{tfOpName:"SplitV",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"numOrSizeSplits",type:"number[]"},{start:2,name:"axis",type:"number",defaultValue:0}]},{tfOpName:"ScatterNd",category:"slice_join",inputs:[{start:0,name:"indices",type:"tensor"},{start:1,name:"values",type:"tensor"},{start:2,name:"shape",type:"number[]"}]},{tfOpName:"GatherNd",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"indices",type:"tensor"}]},{tfOpName:"SparseToDense",category:"slice_join",inputs:[{start:0,name:"sparseIndices",type:"tensor"},{start:1,name:"outputShape",type:"number[]"},{start:2,name:"sparseValues",type:"tensor"},{start:3,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"validate_indices",name:"validateIndices",type:"bool",defaultValue:!1,notSupported:!0}]}],pb=[{tfOpName:"FFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"IFFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"RFFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"fft_length",type:"number",notSupported:!0}]},{tfOpName:"IRFFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"fft_length",type:"number",notSupported:!0}]}],db=[{tfOpName:"Cast",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"SrcT",name:"sdtype",type:"dtype",notSupported:!0},{tfName:"DstT",name:"dtype",type:"dtype"}]},{tfOpName:"ExpandDims",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]},{tfOpName:"MirrorPad",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"padding",type:"number[]"}],attrs:[{tfName:"mode",name:"mode",type:"string"}]},{tfOpName:"Pad",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"padding",type:"number[]"}],attrs:[{tfName:"constant_value",name:"constantValue",type:"number",defaultValue:0}]},{tfOpName:"PadV2",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"padding",type:"number[]"},{start:2,name:"constantValue",type:"number",defaultValue:0}]},{tfOpName:"Reshape",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"shape",type:"number[]"}]},{tfOpName:"Squeeze",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"axis",tfDeprecatedName:"squeeze_dims",name:"axis",type:"number[]"}]},{tfOpName:"SpaceToBatchND",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"blockShape",type:"number[]"},{start:2,name:"paddings",type:"number[]"}]},{tfOpName:"BatchToSpaceND",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"blockShape",type:"number[]"},{start:2,name:"crops",type:"number[]"}]},{tfOpName:"DepthToSpace",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"block_size",name:"blockSize",type:"number"},{tfName:"data_format",name:"dataFormat",type:"string"}]},{tfOpName:"BroadcastTo",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"shape",type:"number[]"}],attrs:[]}];class fb{static get Instance(){return this._instance||(this._instance=new this)}constructor(){const e=[].concat(...[r,i,o,l,u,c,h,m,f,p,g,y,b,x,w,k,d].map((e=>e.json)));this.opMappers=e.reduce(((e,t)=>(e[t.tfOpName]=t,e)),{})}transformGraph(e,t={}){const n=e.node,s=[],a=[],r=[],i=n.reduce(((e,t)=>(e[t.name]=this.mapNode(t),t.op.startsWith("Placeholder")?s.push(e[t.name]):"Const"===t.op?a.push(e[t.name]):null!=t.input&&0!==t.input.length||r.push(e[t.name]),e)),{});let o=[];const l=[];let u={},c={};null!=t&&(u=this.mapSignatureEntries(t.inputs),c=this.mapSignatureEntries(t.outputs));const h=Object.keys(i);h.forEach((e=>{const t=i[e];t.inputNames.forEach((e=>{const[n]=jy(e);t.inputs.push(i[n]),i[n].children.push(t)}))})),0===Object.keys(c).length?h.forEach((e=>{const t=i[e];0===t.children.length&&l.push(t)})):Object.keys(c).forEach((e=>{const[t]=jy(e),n=i[t];null!=n&&(n.signatureKey=c[e],l.push(n))})),Object.keys(u).length>0?Object.keys(u).forEach((e=>{const[t]=jy(e),n=i[t];n&&(n.signatureKey=u[e],o.push(n))})):o=s;let p={};null!=e.library&&null!=e.library.function&&(p=e.library.function.reduce(((e,t)=>(e[t.signature.name]=this.mapFunction(t),e)),{}));const d={nodes:i,inputs:o,outputs:l,weights:a,placeholders:s,signature:t,functions:p};return r.length>0&&(d.initNodes=r),d}mapSignatureEntries(e){return Object.keys(e||{}).reduce(((t,n)=>(t[e[n].name]=n,t)),{})}mapNode(e){const t=Uy(e.op)||this.opMappers[e.op]||{};null==e.attr&&(e.attr={});const n={name:e.name,op:e.op,category:t.category,inputNames:(e.input||[]).map((e=>e.startsWith("^")?e.substr(1):e)),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:e.attr};return null!=t.inputs&&(n.inputParams=t.inputs.reduce(((e,t)=>(e[t.name]={type:t.type,inputIndexStart:t.start,inputIndexEnd:t.end},e)),{})),null!=t.attrs&&(n.attrParams=t.attrs.reduce(((t,n)=>{const s=n.type;let a;switch(n.type){case"string":a=gb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=gb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"string[]":a=Cb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=Cb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"number":a=bb(e.attr,n.tfName,n.defaultValue||0),void 0===a&&n.tfDeprecatedName&&(a=bb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"number[]":a=$b(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=$b(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"bool":a=yb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=yb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"bool[]":a=Tb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=Tb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"shape":a=Ib(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=Ib(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"shape[]":a=Sb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=Sb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"dtype":a=kb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=kb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"dtype[]":a=vb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=vb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"func":a=wb(e.attr,n.tfName,n.defaultValue),void 0===a&&n.tfDeprecatedName&&(a=wb(e.attr,n.tfDeprecatedName,n.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${n.type} for op: ${e.op}`)}return t[n.name]={value:a,type:s},t}),{})),n}mapFunction(e){const t=e.nodeDef,n=[];let s={};null!=t&&(s=t.reduce(((e,t)=>(e[t.name]=this.mapNode(t),"Const"===t.op&&n.push(e[t.name]),e)),{}));const a=[],r=[];e.signature.inputArg.forEach((e=>{const[t]=jy(e.name),n={name:t,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:xb(e.type),type:"dtype"}},children:[]};n.signatureKey=e.name,a.push(n),s[t]=n})),Object.keys(s).forEach((e=>{const t=s[e];t.inputNames.forEach((e=>{const[n]=jy(e);t.inputs.push(s[n]),s[n].children.push(t)}))}));const i=e.ret;e.signature.outputArg.forEach((e=>{const[t,n]=jy(i[e.name]),a=s[t];null!=a&&(a.defaultOutput=n,r.push(a))}));const o=this.mapArgsToSignature(e);return{nodes:s,inputs:a,outputs:r,weights:n,placeholders:[],signature:o}}mapArgsToSignature(e){return{methodName:e.signature.name,inputs:e.signature.inputArg.reduce(((e,t)=>(e[t.name]=this.mapArgToTensorInfo(t),e)),{}),outputs:e.signature.outputArg.reduce(((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n,e.ret),t)),{})}}mapArgToTensorInfo(e,t){let n=e.name;return null!=t&&(n=t[n]),{name:n,dtype:e.type}}}function mb(e,t){const n=Array.isArray(e)?String.fromCharCode.apply(null,e):function(e){const t=ue().global;if(void 0!==t.atob)return t.atob(e);if("undefined"!=typeof Buffer)return new Buffer(e,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}(e);return t?n:n.toLowerCase()}function gb(e,t,n,s=!1){const a=e[t];return null!=a?mb(a.s,s):n}function yb(e,t,n){const s=e[t];return s?s.b:n}function bb(e,t,n){const s=e[t]||{},a=null!=s.i?s.i:null!=s.f?s.f:n;return"number"==typeof a?a:parseInt(a,10)}function xb(e){switch("string"==typeof e&&(e=Hg[e]),e){case Hg.DT_FLOAT:return"float32";case Hg.DT_INT32:case Hg.DT_INT64:case Hg.DT_INT8:case Hg.DT_UINT8:return"int32";case Hg.DT_BOOL:return"bool";case Hg.DT_DOUBLE:return"float32";case Hg.DT_STRING:return"string";default:return null}}function wb(e,t,n){const s=e[t];return s&&s.func?s.func.name:n}function kb(e,t,n){const s=e[t];return s&&s.type?xb(s.type):n}function vb(e,t,n){const s=e[t];return s&&s.list&&s.list.type?s.list.type.map((e=>xb(e))):n}function Nb(e){if(!e.unknownRank)return null!=e.dim?e.dim.map((e=>"number"==typeof e.size?e.size:parseInt(e.size,10))):[]}function Ib(e,t,n){const s=e[t];return s&&s.shape?Nb(s.shape):n}function $b(e,t,n){const s=e[t];return s?((s.list.f&&s.list.f.length?s.list.f:s.list.i)||[]).map((e=>"number"==typeof e?e:parseInt(e,10))):n}function Cb(e,t,n,s=!1){const a=e[t];return a&&a.list&&a.list.s?a.list.s.map((e=>mb(e,s))):n}function Sb(e,t,n){const s=e[t];return s&&s.list&&s.list.shape?s.list.shape.map((e=>Nb(e))):n}function Tb(e,t,n){const s=e[t];return s&&s.list&&s.list.b?s.list.b:n}class Eb{constructor(e,t,n){this.node=e,this.tensorMap=t,this.context=n,this.inputs=[],this.attrs={},this.inputs=e.inputNames.map((e=>this.getInput(e))),null!=e.rawAttrs&&(this.attrs=Object.keys(e.rawAttrs).reduce(((e,t)=>(e[t]=this.getAttr(t),e)),{}))}getInput(e){return Hy(e,this.tensorMap,this.context)}getAttr(e,t){const n=this.node.rawAttrs[e];if(null!=n.tensor)return Hy(e,this.tensorMap,this.context);if(null!=n.i||null!=n.f)return bb(this.node.rawAttrs,e,t);if(null!=n.s)return gb(this.node.rawAttrs,e,t);if(null!=n.b)return yb(this.node.rawAttrs,e,t);if(null!=n.shape)return Ib(this.node.rawAttrs,e,t);if(null!=n.type)return kb(this.node.rawAttrs,e,t);if(null!=n.list){if(null!=n.list.i||null!=n.list.f)return $b(this.node.rawAttrs,e,t);if(null!=n.list.s)return Cb(this.node.rawAttrs,e,t);if(null!=n.list.shape)return Sb(this.node.rawAttrs,e,t);if(null!=n.list.b)return Tb(this.node.rawAttrs,e,t);if(null!=n.list.type)return vb(this.node.rawAttrs,e,t)}return t}}const Ab=la({addN_:function(e){E(Array.isArray(e),(()=>"The argument passed to tf.addN() must be a list of tensors")),E(e.length>=1,(()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`));const t=e.map(((e,t)=>ia(e,`tensors${t}`,"addN"))),n=t[0];t.forEach((e=>{if(e.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")})),t.forEach((e=>{if(!_(e.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")}));const s=t;return Zs.runKernel(be,s)}});function Rb(e,t,n=""){if("number"!=typeof e&&"number"!=typeof t){E(e.length===t.length,(()=>n+` Shapes ${e} and ${t} must match`));for(let s=0;s<e.length;s++){const a=e[s],r=t[s];E(a<0||r<0||a===r,(()=>n+` Shapes ${e} and ${t} must match`))}}}function Fb(e){return"number"!=typeof e&&!e.some((e=>e<0))}function Db(e,t,n){let s=_b(e,n);const a=!Fb(s);if(a&&0===t.length)throw new Error(`Tried to calculate elements of an empty list with non-fully-defined elementShape: ${s}`);if(a&&t.forEach((e=>{s=_b(e.shape,s)})),!Fb(s))throw new Error(`Non-fully-defined elementShape: ${s}`);return s}function _b(e,t){if("number"==typeof e)return t;if("number"==typeof t)return e;if(e.length!==t.length)throw new Error(`Incompatible ranks during merge: ${e} vs. ${t}`);const n=[];for(let s=0;s<e.length;++s){const a=e[s],r=t[s];if(a>=0&&r>=0&&a!==r)throw new Error(`Incompatible shape during merge: ${e} vs. ${t}`);n[s]=a>=0?a:r}return n}class Ob{constructor(e,t,n,s,a,r,i){this.name=e,this.dtype=t,this.maxSize=n,this.elementShape=s,this.identicalElementShapes=a,this.dynamicSize=r,this.clearAfterRead=i,this.tensors=[],this.closed_=!1,this.idTensor=Fr(0),Tr(this.idTensor)}get id(){return this.idTensor.id}get closed(){return this.closed_}clearAndClose(e){this.tensors.forEach((t=>{null!=e&&e.has(t.tensor.id)||t.tensor.dispose()})),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||e>=this.size())throw new Error(`Tried to read from index ${e}, but array size is: ${this.size()}`);const t=this.tensors[e];if(t.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${e} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(t.cleared=!0),t.read=!0,t.tensor}readMany(e){return e.map((e=>this.read(e)))}write(e,t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||!this.dynamicSize&&e>=this.maxSize)throw new Error(`Tried to write to index ${e}, but array is not resizeable and size is: ${this.maxSize}`);const n=this.tensors[e]||{};if(t.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e},\n because the value dtype is ${t.dtype}, but TensorArray dtype is ${this.dtype}.`);if(0!==this.size()||null!=this.elementShape&&0!==this.elementShape.length||(this.elementShape=t.shape),Rb(this.elementShape,t.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${e}.`),n.read)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been read.`);if(n.written)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been written.`);n.tensor=t,Tr(t),n.written=!0,this.tensors[e]=n}writeMany(e,t){if(e.length!==t.length)throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${e.length} is not the same as tensors size: ${t.length}.`);e.forEach(((e,n)=>this.write(e,t[n])))}gather(e,t){if(t&&t!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${t}`);if(e)e=e.slice(0,this.size());else{e=[];for(let t=0;t<this.size();t++)e.push(t)}if(0===e.length)return ha([],[0].concat(this.elementShape));const n=this.readMany(e);return Rb(this.elementShape,n[0].shape,"TensorArray shape mismatch: "),Fo(n,0)}concat(e){if(e&&e!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but concat requested dtype ${e}`);if(0===this.size())return ha([],[0].concat(this.elementShape));const t=[];for(let e=0;e<this.size();e++)t.push(e);const n=this.readMany(t);return Rb(this.elementShape,n[0].shape,`TensorArray shape mismatch: tensor array shape (${this.elementShape}) vs first tensor shape (${n[0].shape})`),ii(n,0)}scatter(e,t){if(t.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${t.dtype}`);if(e.length!==t.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${e.length} vs. ${t.shape[0]}`);const n=Math.max(...e);if(!this.dynamicSize&&n>=this.maxSize)throw new Error(`Max index must be < array size (${n} vs. ${this.maxSize})`);this.writeMany(e,Lo(t,0))}split(e,t){if(t.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${t.dtype}`);let n=0;const s=e.map((e=>(n+=e,n)));if(n!==t.shape[0])throw new Error(`Expected sum of lengths to be equal to\n tensor.shape[0], but sum of lengths is\n ${n}, and tensor's shape is: ${t.shape}`);if(!this.dynamicSize&&e.length!==this.maxSize)throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${e.length}), and the TensorArray is not marked as dynamically resizeable`);const a=0===n?0:t.size/n,r=[];Cr((()=>{t=Zr(t,[1,n,a]);for(let n=0;n<e.length;++n){const i=[0,0===n?0:s[n-1],0],o=[1,e[n],a];r[n]=Zr(vo(t,i,o),this.elementShape)}return r}));const i=[];for(let t=0;t<e.length;t++)i[t]=t;this.writeMany(i,r)}}class Mb{constructor(e,t,n,s=-1){this.tensors=e,this.elementShape=t,this.elementDtype=n,null!=e&&e.forEach((e=>{if(n!==e.dtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${e.dtype}`);Rb(t,e.shape,"TensorList shape mismatch: "),Tr(e)})),this.idTensor=Fr(0),this.maxNumElements=s,Tr(this.idTensor)}get id(){return this.idTensor.id}copy(){return new Mb([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(e){this.tensors.forEach((t=>{null!=e&&e.has(t.id)||t.dispose()})),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(e,t,n=-1){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(-1!==n&&this.tensors.length!==n)throw new Error(`Operation expected a list with ${n} elements but got a list with ${this.tensors.length} elements.`);Rb(e,this.elementShape,"TensorList shape mismatch: ");const s=Db(this.elementShape,this.tensors,e);return Cr((()=>{const e=this.tensors.map((e=>Zr(e,s)));return Fo(e,0)}))}popBack(e,t){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(0===this.size())throw new Error("Trying to pop from an empty list.");const n=Db(this.elementShape,this.tensors,e),s=this.tensors.pop();return Rb(s.shape,e,"TensorList shape mismatch: "),Zr(s,n)}pushBack(e){if(e.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`);if(Rb(e.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw new Error("Trying to push element into a full list.");Tr(e),this.tensors.push(e)}resize(e){if(e<0)throw new Error(`TensorListResize expects size to be non-negative. Got: ${e}`);if(-1!==this.maxNumElements&&e>this.maxNumElements)throw new Error(`TensorListResize input size ${e} is greater maxNumElement ${this.maxNumElements}.`);this.tensors.length=e}getItem(e,t,n){if(n!==this.elementDtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${this.elementDtype}`);if(e<0||e>this.tensors.length)throw new Error(`Trying to access element ${e} in a list with ${this.tensors.length} elements.`);if(null==this.tensors[e])throw new Error(`element at index ${e} is null.`);Rb(this.tensors[e].shape,t,"TensorList shape mismatch: ");const s=Db(this.elementShape,this.tensors,t);return Zr(this.tensors[e],s)}setItem(e,t){if(t.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(e<0||-1!==this.maxNumElements&&e>=this.maxNumElements)throw new Error(`Trying to set element ${e} in a list with max ${this.maxNumElements} elements.`);Rb(this.elementShape,t.shape,"TensorList shape mismatch: "),Tr(t),this.tensors[e]=t}gather(e,t,n){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);Rb(this.elementShape,n,"TensorList shape mismatch: "),e=e.slice(0,this.size());const s=Db(this.elementShape,this.tensors,n);return 0===e.length?ha([],[0].concat(s)):Cr((()=>{const t=e.map((e=>Zr(this.tensors[e],s)));return Fo(t,0)}))}concat(e,t){if(e&&e!==this.elementDtype)throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${e}`);Rb(this.elementShape,t,"TensorList shape mismatch: ");const n=Db(this.elementShape,this.tensors,t);return 0===this.size()?ha([],[0].concat(n)):Cr((()=>{const e=this.tensors.map((e=>Zr(e,n)));return ii(e,0)}))}}const Lb=la({maxPoolWithArgmax_:function(e,t,n,s,a=!1){const r={x:ia(e,"x","maxPoolWithArgmax")},i={filterSize:t,strides:n,pad:s,includeBatchInIndex:a},o=Zs.runKernel(Kt,r,i);return{result:o[0],indexes:o[1]}}});function zb(e,t,n){const[s,a]=Gy("fusedOps",e,t,n),r="biasadd"===s,i="prelu"===a,o="fusedbatchnorm"===s,l=Gy("numArgs",e,t,n);if(r){if(i&&2!==l)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!i&&1!==l)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(o)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");const u=Gy("strides",e,t,n),c=Xy(e,t,n),h=Gy("dataFormat",e,t,n).toUpperCase(),p=Gy("dilations",e,t,n),[d,f]=Gy("args",e,t,n);return{stride:u,pad:c,dataFormat:h,dilations:p,biasArg:d,preluArg:f,activationFunc:a,leakyreluAlpha:Gy("leakyreluAlpha",e,t,n)}}function Pb(e,t,n){if(n<=0)throw new Error("The number of values should be positive.");const s={start:e,stop:t,num:n};return Zs.runKernel(Dt,{},s)}const Bb=la({multinomial_:function(e,t,n,s=!1){const a=ia(e,"logits","multinomial"),r=a.size,i=a.rank;if(r<2)throw new Error(`Error in multinomial: you need at least 2 outcomes, but got ${r}.`);if(i>2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${i}`);n=n||Math.random();const o={logits:1===i?Zr(a,[1,-1]):a},l={numSamples:t,seed:n,normalized:s},u=Zs.runKernel(Qt,o,l);return 1===i?Zr(u,[u.size]):u}}),Wb=async function(e){const t=ia(e,"condition","whereAsync","bool"),n=await t.data(),s=Wu(t.shape,n);return e!==t&&t.dispose(),s};function Vb(e,t,n){return{boxes:Gy("boxes",e,t,n),scores:Gy("scores",e,t,n),maxOutputSize:Gy("maxOutputSize",e,t,n),iouThreshold:Gy("iouThreshold",e,t,n),scoreThreshold:Gy("scoreThreshold",e,t,n),softNmsSigma:Gy("softNmsSigma",e,t,n)}}class Ub{constructor(e,t){this.keyDType=e,this.valueDType=t,this.handle=Fr(0),this.tensorMap=new Map,Tr(this.handle)}get id(){return this.handle.id}clearAndClose(){this.tensorMap.forEach((e=>e.dispose())),this.tensorMap.clear(),this.handle.dispose()}size(){return this.tensorMap.size}tensorSize(){return Fr(this.size(),"int32")}async import(e,t){this.checkKeyAndValueTensor(e,t);const n=await e.data();return this.tensorMap.forEach((e=>e.dispose())),this.tensorMap.clear(),Cr((()=>{const e=Lo(t),s=n.length,a=e.length;E(s===a,(()=>`The number of elements doesn't match, keys has ${s} elements, the values has ${a} elements.`));for(let t=0;t<s;t++){const s=n[t],a=e[t];Tr(a),this.tensorMap.set(s,a)}return this.handle}))}async find(e,t){this.checkKeyAndValueTensor(e,t);const n=await e.data();return Cr((()=>{const e=[];for(let s=0;s<n.length;s++){const a=n[s],r=this.findWithDefault(a,t);e.push(r)}return Fo(e)}))}findWithDefault(e,t){const n=this.tensorMap.get(e);return null!=n?n:t}checkKeyAndValueTensor(e,t){if(e.dtype!==this.keyDType)throw new Error(`Expect key dtype ${this.keyDType}, but got ${e.dtype}`);if(t.dtype!==this.valueDType)throw new Error(`Expect value dtype ${this.valueDType}, but got ${t.dtype}`)}}const Gb=la({sparseToDense_:function(e,t,n,s=0){const a=ia(e,"sparseIndices","sparseToDense","int32"),r=ia(t,"sparseValues","sparseToDense"),i=ia(s,"defaultValue","sparseToDense",r.dtype);!function(e,t,n,s){if("int32"!==e.dtype)throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${e.dtype}.`);if(e.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${e.shape}.`);const a=e.rank>0?e.shape[0]:1,r=e.rank>1?e.shape[1]:1;if(n.length!==r)throw new Error(`outputShape has incorrect number of elements:, ${n.length}, should be: ${r}.`);const i=t.size;if(0!==t.rank&&(1!==t.rank||i!==a))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${a}]`);if(t.dtype!==s.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}(a,r,n,i);const o={sparseIndices:a,sparseValues:r,defaultValue:i},l={outputShape:n};return Zs.runKernel(Gn,o,l)}}),Hb=la({bincount_:function(e,t,n){const s=ia(e,"x","bincount"),a=ia(t,"weights","bincount");E("int32"===s.dtype,(()=>`Error in bincount: input dtype must be int32, but got ${s.dtype}`)),E(n>=0,(()=>`size must be non-negative, but got ${n}.`)),E(a.size===s.size||0===a.size,(()=>`Error in bincount: weights must have the same size as input or0-length, but got input shape: ${s.shape}, weights shape: ${a.shape}.`));const r={x:s,weights:a},i={size:n};return Zs.runKernel(_e,r,i)}}),jb=la({denseBincount_:function(e,t,n,s=!1){const a=ia(e,"x","denseBincount"),r=ia(t,"weights","denseBincount");E("int32"===a.dtype,(()=>`Error in denseBincount: input dtype must be int32, but got ${a.dtype}`)),E(a.rank<=2,(()=>`Error in denseBincount: input must be at most rank 2, but got rank ${a.rank}.`)),E(n>=0,(()=>`size must be non-negative, but got ${n}.`)),E(r.size===a.size||0===r.size,(()=>`Error in denseBincount: weights must have the same shape as x or 0-length, but got x shape: ${a.shape}, weights shape: ${r.shape}.`));const i={x:a,weights:r},o={size:n,binaryOutput:s};return Zs.runKernel(Je,i,o)}}),Kb=la({scatterND_:function(e,t,n){const s=ia(e,"indices","scatterND","int32"),a=ia(t,"updates","scatterND");mu(a,s,n);const r={indices:s,updates:a},i={shape:n};return Zs.runKernel(Sn,r,i)}}),qb=la({gatherND_:function(e,t){const n=ia(t,"indices","gatherND","int32"),s={params:ia(e,"x","gatherND"),indices:n};return Zs.runKernel(kt,s)}});function Xb(e,t,n,s){const a=((e,t,n)=>{switch(e.category){case"arithmetic":return Cr((()=>((e,t,n)=>{switch(e.op){case"BiasAdd":case"AddV2":case"Add":return[Or(Gy("a",e,t,n),Gy("b",e,t,n))];case"AddN":return[Ab(Gy("tensors",e,t,n))];case"FloorMod":case"Mod":return[pp(Gy("a",e,t,n),Gy("b",e,t,n))];case"Mul":return[Mi(Gy("a",e,t,n),Gy("b",e,t,n))];case"RealDiv":case"Div":return[bi(Gy("a",e,t,n),Gy("b",e,t,n))];case"DivNoNan":return[ep(Gy("a",e,t,n),Gy("b",e,t,n))];case"FloorDiv":return[yi(Gy("a",e,t,n),Gy("b",e,t,n))];case"Sub":return[Li(Gy("a",e,t,n),Gy("b",e,t,n))];case"Minimum":return[ji(Gy("a",e,t,n),Gy("b",e,t,n))];case"Maximum":return[Ui(Gy("a",e,t,n),Gy("b",e,t,n))];case"Pow":return[Dl(Gy("a",e,t,n),Gy("b",e,t,n))];case"SquaredDifference":return[Wl(Gy("a",e,t,n),Gy("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"basic_math":return Cr((()=>((e,t,n)=>{switch(e.op){case"Abs":case"ComplexAbs":return[_r(Gy("x",e,t,n))];case"Acos":return[Uh(Gy("x",e,t,n))];case"Acosh":return[Gh(Gy("x",e,t,n))];case"Asin":return[jh(Gy("x",e,t,n))];case"Asinh":return[Kh(Gy("x",e,t,n))];case"Atan":return[qh(Gy("x",e,t,n))];case"Atan2":return[Xh(Gy("x",e,t,n),Gy("y",e,t,n))];case"Atanh":return[Yh(Gy("x",e,t,n))];case"Ceil":return[Jh(Gy("x",e,t,n))];case"Complex":return[ua(Gy("real",e,t,n),Gy("imag",e,t,n))];case"Cos":return[Fh(Gy("x",e,t,n))];case"Cosh":return[_h(Gy("x",e,t,n))];case"Elu":return[xi(Gy("x",e,t,n))];case"Erf":return[np(Gy("x",e,t,n))];case"Exp":return[_i(Gy("x",e,t,n))];case"Expm1":return[sp(Gy("x",e,t,n))];case"Floor":return[Ti(Gy("x",e,t,n))];case"Log":return[Di(Gy("x",e,t,n))];case"Log1p":return[Vl(Gy("x",e,t,n))];case"Imag":return[Uo(Gy("x",e,t,n))];case"Neg":return[so(Gy("x",e,t,n))];case"Reciprocal":return[mp(Gy("x",e,t,n))];case"Real":return[Go(Gy("x",e,t,n))];case"Relu":return[yo(Gy("x",e,t,n))];case"Round":return[gp(Gy("x",e,t,n))];case"Selu":return[xo(Gy("x",e,t,n))];case"Sigmoid":return[ko(Gy("x",e,t,n))];case"Sin":return[bc(Gy("x",e,t,n))];case"Sign":return[yp(Gy("x",e,t,n))];case"Sinh":return[wc(Gy("x",e,t,n))];case"Softplus":return[To(Gy("x",e,t,n))];case"Sqrt":return[Ao(Gy("x",e,t,n))];case"Square":return[to(Gy("x",e,t,n))];case"Tanh":return[Do(Gy("x",e,t,n))];case"Tan":return[xp(Gy("x",e,t,n))];case"ClipByValue":return[ri(Gy("x",e,t,n),Gy("clipValueMin",e,t,n),Gy("clipValueMax",e,t,n))];case"Relu6":return[Yo(Gy("x",e,t,n))];case"Rsqrt":return[Dc(Hy(e.inputNames[0],t,n))];case"Prod":return[fp(Gy("x",e,t,n),Gy("axes",e,t,n))];case"LeakyRelu":return[Fi(Gy("x",e,t,n),Gy("alpha",e,t,n))];case"Prelu":return[co(Gy("x",e,t,n),Gy("alpha",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"control":return(async(e,t,n)=>{switch(e.op){case"If":case"StatelessIf":{const s=Gy("thenBranch",e,t,n),a=Gy("elseBranch",e,t,n),r=Gy("cond",e,t,n),i=Gy("args",e,t,n);return(await r.data())[0]?n.functionMap[s].executeFunctionAsync(i,n.tensorArrayMap,n.tensorListMap):n.functionMap[a].executeFunctionAsync(i,n.tensorArrayMap,n.tensorListMap)}case"While":case"StatelessWhile":{const s=Gy("body",e,t,n),a=Gy("cond",e,t,n),r=Gy("args",e,t,n),i=await n.functionMap[a].executeFunctionAsync(r,n.tensorArrayMap,n.tensorListMap),o=r.map((e=>e.id));let l=await i[0].data();i.forEach((e=>{e.kept||-1!==o.indexOf(e.id)||e.dispose()}));let u=r;for(;l[0];){const e=u;u=await n.functionMap[s].executeFunctionAsync(u,n.tensorArrayMap,n.tensorListMap);const t=u.map((e=>e.id));e.forEach((e=>{e.kept||-1!==o.indexOf(e.id)||-1!==t.indexOf(e.id)||e.dispose()}));const r=await n.functionMap[a].executeFunctionAsync(u,n.tensorArrayMap,n.tensorListMap);l=await r[0].data(),r.forEach((e=>{e.kept||-1!==o.indexOf(e.id)||-1!==t.indexOf(e.id)||e.dispose()}))}return u}case"LoopCond":return[Yy(Gy("pred",e,t,n))];case"Switch":{const s=Gy("pred",e,t,n);let a=Gy("data",e,t,n);return a.kept||(a=Yy(a)),(await s.data())[0]?[void 0,a]:[a,void 0]}case"Merge":{const s=e.inputNames.find((e=>void 0!==Hy(e,t,n)));return s?[Yy(Hy(s,t,n))]:void 0}case"Enter":{const s=Gy("frameName",e,t,n),a=Gy("tensor",e,t,n);return n.enterFrame(s),[Yy(a)]}case"Exit":{const s=Gy("tensor",e,t,n);return n.exitFrame(),[Yy(s)]}case"NextIteration":{const s=Gy("tensor",e,t,n);return n.nextIteration(),[Yy(s)]}case"TensorArrayV3":{const s=Gy("size",e,t,n),a=Gy("dtype",e,t,n),r=Gy("elementShape",e,t,n),i=Gy("dynamicSize",e,t,n),o=Gy("clearAfterRead",e,t,n),l=Gy("identicalElementShapes",e,t,n),u=Gy("name",e,t,n),c=new Ob(u,a,s,r,l,i,o);return n.addTensorArray(c),[c.idTensor,Fr(1)]}case"TensorArrayWriteV3":{const s=Gy("tensorArrayId",e,t,n),a=Gy("index",e,t,n),r=Gy("tensor",e,t,n),i=n.getTensorArray(s.id);return i.write(a,r),[i.idTensor]}case"TensorArrayReadV3":{const s=Gy("tensorArrayId",e,t,n),a=Gy("index",e,t,n);return[n.getTensorArray(s.id).read(a)]}case"TensorArrayGatherV3":{const s=Gy("tensorArrayId",e,t,n),a=Gy("indices",e,t,n),r=Gy("dtype",e,t,n);return[n.getTensorArray(s.id).gather(a,r)]}case"TensorArrayScatterV3":{const s=Gy("tensorArrayId",e,t,n),a=Gy("indices",e,t,n),r=Gy("tensor",e,t,n),i=n.getTensorArray(s.id);return i.scatter(a,r),[i.idTensor]}case"TensorArrayConcatV3":{const s=Gy("tensorArrayId",e,t,n),a=n.getTensorArray(s.id),r=Gy("dtype",e,t,n);return[a.concat(r)]}case"TensorArraySplitV3":{const s=Gy("tensorArrayId",e,t,n),a=Gy("tensor",e,t,n),r=Gy("lengths",e,t,n),i=n.getTensorArray(s.id);return i.split(r,a),[i.idTensor]}case"TensorArraySizeV3":{const s=Gy("tensorArrayId",e,t,n);return[Fr(n.getTensorArray(s.id).size(),"int32")]}case"TensorArrayCloseV3":{const s=Gy("tensorArrayId",e,t,n),a=n.getTensorArray(s.id);return a.clearAndClose(),[a.idTensor]}case"TensorListSetItem":{const s=Gy("tensorListId",e,t,n),a=Gy("index",e,t,n),r=Gy("tensor",e,t,n),i=n.getTensorList(s.id);return i.setItem(a,r),[i.idTensor]}case"TensorListGetItem":{const s=Gy("tensorListId",e,t,n),a=Gy("index",e,t,n),r=Gy("elementShape",e,t,n),i=Gy("elementDType",e,t,n);return[n.getTensorList(s.id).getItem(a,r,i)]}case"TensorListScatterV2":case"TensorListScatter":{const s=Gy("indices",e,t,n),a=function(e,t,n,s){if(t.length!==e.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t.length} vs. ${e.shape[0]}`);const a=Math.max(...t);if(null!=s&&-1!==s&&a>=s)throw new Error(`Max index must be < array size (${a} vs. ${s})`);const r=new Mb([],n,e.dtype,s),i=Lo(e,0);return t.forEach(((e,t)=>{r.setItem(e,i[t])})),r}(Gy("tensor",e,t,n),s,Gy("elementShape",e,t,n),Gy("numElements",e,t,n));return n.addTensorList(a),[a.idTensor]}case"TensorListReserve":case"EmptyTensorList":{const s=Gy("elementShape",e,t,n),a=Gy("elementDType",e,t,n);let r;r="TensorListReserve"===e.op?"numElements":"maxNumElements";const i=function(e,t,n){return new Mb([],e,t,n)}(s,a,Gy(r,e,t,n));return n.addTensorList(i),[i.idTensor]}case"TensorListGather":{const s=Gy("tensorListId",e,t,n),a=Gy("indices",e,t,n),r=Gy("elementShape",e,t,n),i=Gy("elementDType",e,t,n);return[n.getTensorList(s.id).gather(a,i,r)]}case"TensorListStack":{const s=Gy("tensorListId",e,t,n),a=Gy("elementShape",e,t,n),r=Gy("elementDType",e,t,n),i=Gy("numElements",e,t,n);return[n.getTensorList(s.id).stack(a,r,i)]}case"TensorListFromTensor":{const s=function(e,t,n){const s=e.dtype;if(e.shape.length<1)throw new Error(`Tensor must be at least a vector, but saw shape: ${e.shape}`);if(e.dtype!==n)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${n}`);Rb(e.shape.slice(1),t,"TensorList shape mismatch: ");const a=Lo(e);return new Mb(a,t,s)}(Gy("tensor",e,t,n),Gy("elementShape",e,t,n),Gy("elementDType",e,t,n));return n.addTensorList(s),[s.idTensor]}case"TensorListConcat":{const s=Gy("tensorListId",e,t,n),a=n.getTensorList(s.id),r=Gy("dtype",e,t,n),i=Gy("elementShape",e,t,n);return[a.concat(r,i)]}case"TensorListPushBack":{const s=Gy("tensorListId",e,t,n),a=Gy("tensor",e,t,n),r=n.getTensorList(s.id);return r.pushBack(a),[r.idTensor]}case"TensorListPopBack":{const s=Gy("tensorListId",e,t,n),a=Gy("elementShape",e,t,n),r=Gy("elementDType",e,t,n);return[n.getTensorList(s.id).popBack(a,r)]}case"TensorListSplit":{const s=Gy("tensor",e,t,n),a=Gy("elementShape",e,t,n),r=function(e,t,n){let s=0;const a=t.map((e=>(s+=e,s)));if(s!==e.shape[0])throw new Error(`Expected sum of lengths to be equal to\n tensor.shape[0], but sum of lengths is\n ${s}, and tensor's shape is: ${e.shape}`);const r=_b(e.shape.slice(1),n),i=0===s?0:e.size/s,o=Cr((()=>{const n=[];e=Zr(e,[1,s,i]);for(let s=0;s<t.length;++s){const o=[0,0===s?0:a[s-1],0],l=[1,t[s],i];n[s]=Zr(vo(e,o,l),r)}return e.dispose(),n})),l=new Mb([],n,e.dtype,t.length);for(let e=0;e<o.length;e++)l.setItem(e,o[e]);return l}(s,Gy("lengths",e,t,n),a);return n.addTensorList(r),[r.idTensor]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n);case"convolution":return Cr((()=>((e,t,n)=>{switch(e.op){case"Conv1D":{const s=Gy("stride",e,t,n),a=Gy("pad",e,t,n),r=Gy("dataFormat",e,t,n).toUpperCase(),i=Gy("dilation",e,t,n);return[pi(Gy("x",e,t,n),Gy("filter",e,t,n),s,a,r,i)]}case"Conv2D":{const s=Gy("strides",e,t,n),a=Xy(e,t,n),r=Gy("dataFormat",e,t,n).toUpperCase(),i=Gy("dilations",e,t,n);return[hi(Gy("x",e,t,n),Gy("filter",e,t,n),[s[1],s[2]],a,r,[i[1],i[2]])]}case"_FusedConv2D":{const{stride:s,pad:a,dataFormat:r,dilations:i,biasArg:o,preluArg:l,activationFunc:u,leakyreluAlpha:c}=zb(e,t,n);return[nl({x:Gy("x",e,t,n),filter:Gy("filter",e,t,n),strides:[s[1],s[2]],pad:a,dataFormat:r,dilations:[i[1],i[2]],bias:o,activation:u,preluActivationWeights:l,leakyreluAlpha:c})]}case"FusedDepthwiseConv2dNative":{const{stride:s,pad:a,dataFormat:r,dilations:i,biasArg:o,preluArg:l,activationFunc:u,leakyreluAlpha:c}=zb(e,t,n);return[rl({x:Gy("x",e,t,n),filter:Gy("filter",e,t,n),strides:[s[1],s[2]],pad:a,dataFormat:r,dilations:[i[1],i[2]],bias:o,activation:u,preluActivationWeights:l,leakyreluAlpha:c})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{const s=Gy("outputShape",e,t,n),a=Gy("strides",e,t,n),r=Xy(e,t,n);return[fi(Gy("x",e,t,n),Gy("filter",e,t,n),s,[a[1],a[2]],r)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{const s=Gy("strides",e,t,n),a=Xy(e,t,n),r=Gy("dilations",e,t,n),i=Gy("dataFormat",e,t,n).toUpperCase();return[gi(Gy("input",e,t,n),Gy("filter",e,t,n),[s[1],s[2]],a,i,[r[1],r[2]])]}case"Conv3D":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("dataFormat",e,t,n).toUpperCase(),i=Gy("dilations",e,t,n);return[mi(Gy("x",e,t,n),Gy("filter",e,t,n),[s[1],s[2],s[3]],a,r,[i[1],i[2],i[3]])]}case"AvgPool":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("kernelSize",e,t,n);return[Qr(Gy("x",e,t,n),[r[1],r[2]],[s[1],s[2]],a)]}case"MaxPool":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("kernelSize",e,t,n);return[Wi(Gy("x",e,t,n),[r[1],r[2]],[s[1],s[2]],a)]}case"MaxPoolWithArgmax":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("kernelSize",e,t,n),i=Gy("includeBatchInIndex",e,t,n),{result:o,indexes:l}=Lb(Gy("x",e,t,n),[r[1],r[2]],[s[1],s[2]],a,i);return[o,l]}case"AvgPool3D":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("kernelSize",e,t,n);return[ei(Gy("x",e,t,n),[r[1],r[2],r[3]],[s[1],s[2],s[3]],a)]}case"MaxPool3D":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("kernelSize",e,t,n);return[Vi(Gy("x",e,t,n),[r[1],r[2],r[3]],[s[1],s[2],s[3]],a)]}case"Dilation2D":{const s=Gy("strides",e,t,n),a=Gy("pad",e,t,n),r=Gy("dilations",e,t,n),i=s[1],o=s[2],l=r[1],u=r[2];return[Qh(Gy("x",e,t,n),Gy("filter",e,t,n),[i,o],a,[l,u],"NHWC")]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"creation":return Cr((()=>((e,t,n)=>{switch(e.op){case"Fill":{const s=Gy("shape",e,t,n),a=Gy("dtype",e,t,n);return[Si(s,Gy("value",e,t,n),a)]}case"LinSpace":return[Pb(Gy("start",e,t,n),Gy("stop",e,t,n),Gy("num",e,t,n))];case"Multinomial":{const s=Gy("logits",e,t,n),a=Gy("numSamples",e,t,n),r=Gy("seed",e,t,n);return[Bb(s,a,r)]}case"OneHot":{const s=Gy("indices",e,t,n),a=Gy("depth",e,t,n),r=Gy("onValue",e,t,n),i=Gy("offValue",e,t,n);return[ro(s,a,r,i)]}case"Ones":return[oo(Gy("shape",e,t,n),Gy("dtype",e,t,n))];case"OnesLike":return[lo(Gy("x",e,t,n))];case"RandomUniform":return[go(Gy("shape",e,t,n),Gy("minval",e,t,n),Gy("maxval",e,t,n),Gy("dtype",e,t,n))];case"Range":return[Rl(Gy("start",e,t,n),Gy("stop",e,t,n),Gy("step",e,t,n),Gy("dtype",e,t,n))];case"TruncatedNormal":{const s=Gy("shape",e,t,n),a=Gy("mean",e,t,n),r=Gy("stdDev",e,t,n),i=Gy("seed",e,t,n);return[Mo(s,a,r,Gy("dtype",e,t,n),i)]}case"Zeros":return[io(Gy("shape",e,t,n),Gy("dtype",e,t,n))];case"ZerosLike":return[Bo(Gy("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"dynamic":return(async(e,t,n)=>{switch(e.op){case"NonMaxSuppressionV5":{const{boxes:s,scores:a,maxOutputSize:r,iouThreshold:i,scoreThreshold:o,softNmsSigma:l}=Vb(e,t,n),u=await Gl.nonMaxSuppressionWithScoreAsync(s,a,r,i,o,l);return[u.selectedIndices,u.selectedScores]}case"NonMaxSuppressionV4":{const{boxes:s,scores:a,maxOutputSize:r,iouThreshold:i,scoreThreshold:o}=Vb(e,t,n),l=Gy("padToMaxOutputSize",e,t,n),u=await Gl.nonMaxSuppressionPaddedAsync(s,a,r,i,o,l);return[u.selectedIndices,u.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{const{boxes:s,scores:a,maxOutputSize:r,iouThreshold:i,scoreThreshold:o}=Vb(e,t,n);return[await Gl.nonMaxSuppressionAsync(s,a,r,i,o)]}case"Where":{const s=ja(Gy("condition",e,t,n),"bool"),a=[await Wb(s)];return s.dispose(),a}case"ListDiff":return async function(e,t){const n=ia(e,"x","setdiff1d"),s=ia(t,"y","setdiff1d");E(n.dtype===s.dtype,(()=>`x and y should have the same dtype, but got x (${n.dtype}) and y (${s.dtype}).`)),E(1===n.rank,(()=>`x should be 1D tensor, but got x (${n.shape}).`)),E(1===s.rank,(()=>`y should be 1D tensor, but got y (${s.shape}).`));const a=await n.data(),r=await s.data(),i=new Set(r);let o=0;for(let e=0;e<a.length;e++)i.has(a[e])||o++;const l=new Es([o],n.dtype),u=new Es([o],"int32");for(let e=0,t=0;e<a.length;e++)i.has(a[e])||(l.values[t]=a[e],u.values[t]=e,t++);return[l.toTensor(),u.toTensor()]}(Gy("x",e,t,n),Gy("y",e,t,n));default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n);case"evaluation":return Cr((()=>((e,t,n)=>{switch(e.op){case"TopKV2":{const s=Gy("x",e,t,n),a=Gy("k",e,t,n),r=Gy("sorted",e,t,n),i=wp(s,a,r);return[i.values,i.indices]}case"Unique":{const s=Gy("x",e,t,n),a=kp(s);return[a.values,a.indices]}case"UniqueV2":{const s=Gy("x",e,t,n),a=Gy("axis",e,t,n),r=kp(s,a);return[r.values,r.indices]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"image":return Cr((()=>((e,t,n)=>{switch(e.op){case"ResizeBilinear":{const s=Gy("images",e,t,n),a=Gy("size",e,t,n),r=Gy("alignCorners",e,t,n),i=Gy("halfPixelCenters",e,t,n);return[Gl.resizeBilinear(s,[a[0],a[1]],r,i)]}case"ResizeNearestNeighbor":{const s=Gy("images",e,t,n),a=Gy("size",e,t,n),r=Gy("alignCorners",e,t,n),i=Gy("halfPixelCenters",e,t,n);return[Gl.resizeNearestNeighbor(s,[a[0],a[1]],r,i)]}case"CropAndResize":{const s=Gy("image",e,t,n),a=Gy("boxes",e,t,n),r=Gy("boxInd",e,t,n),i=Gy("cropSize",e,t,n),o=Gy("method",e,t,n),l=Gy("extrapolationValue",e,t,n);return[Gl.cropAndResize(s,a,r,i,o,l)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"graph":return Cr((()=>((e,t,n)=>{switch(e.op){case"Const":return t[e.name];case"PlaceholderWithDefault":const s=Gy("default",e,t,n);return[Hy(e.name,t,n)||s];case"Placeholder":return[Hy(e.name,t,n)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":return[Yy(Gy("x",e,t,n))];case"IdentityN":return Gy("x",e,t,n).map((e=>Yy(e)));case"Snapshot":return[Yy(Gy("x",e,t,n))];case"Shape":return[_o(Gy("x",e,t,n).shape,"int32")];case"ShapeN":return Gy("x",e,t,n).map((e=>_o(e.shape)));case"Size":return[Fr(Gy("x",e,t,n).size,"int32")];case"Rank":return[Fr(Gy("x",e,t,n).rank,"int32")];case"NoOp":return[Fr(1)];case"Print":const a=Gy("x",e,t,n),r=Gy("data",e,t,n),i=Gy("message",e,t,n),o=Gy("summarize",e,t,n);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(i);for(let e=0;e<r.length;e++)console.log(Array.prototype.slice.call(r[e].dataSync()).slice(0,o));return[a];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"logical":return Cr((()=>((e,t,n)=>{switch(e.op){case"Equal":return[Ni(Gy("a",e,t,n),Gy("b",e,t,n))];case"NotEqual":return[ao(Gy("a",e,t,n),Gy("b",e,t,n))];case"Greater":return[Ai(Gy("a",e,t,n),Gy("b",e,t,n))];case"GreaterEqual":return[Ri(Gy("a",e,t,n),Gy("b",e,t,n))];case"Less":return[Zc(Gy("a",e,t,n),Gy("b",e,t,n))];case"LessEqual":return[Al(Gy("a",e,t,n),Gy("b",e,t,n))];case"LogicalAnd":return[Bi(Gy("a",e,t,n),Gy("b",e,t,n))];case"LogicalNot":return[Sh(Gy("a",e,t,n))];case"LogicalOr":return[up(Gy("a",e,t,n),Gy("b",e,t,n))];case"Select":case"SelectV2":return[Po(Gy("condition",e,t,n),Gy("a",e,t,n),Gy("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"matrices":return Cr((()=>((e,t,n)=>{switch(e.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[il(Gy("a",e,t,n),Gy("b",e,t,n),Gy("transposeA",e,t,n),Gy("transposeB",e,t,n))];case"Transpose":return[Wo(Gy("x",e,t,n),Gy("perm",e,t,n))];case"_FusedMatMul":const[s,a]=Gy("fusedOps",e,t,n),r="biasadd"===s,i="prelu"===a,o=Gy("numArgs",e,t,n),l=Gy("leakyreluAlpha",e,t,n);if(r){if(i&&2!==o)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!i&&1!==o)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}const[u,c]=Gy("args",e,t,n);return[ol({a:Gy("a",e,t,n),b:Gy("b",e,t,n),transposeA:Gy("transposeA",e,t,n),transposeB:Gy("transposeB",e,t,n),bias:u,activation:a,preluActivationWeights:c,leakyreluAlpha:l})];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"normalization":return Cr((()=>((e,t,n)=>{switch(e.op){case"FusedBatchNorm":case"FusedBatchNormV2":case"FusedBatchNormV3":return[ti(Gy("x",e,t,n),Gy("mean",e,t,n),Gy("variance",e,t,n),Gy("offset",e,t,n),Gy("scale",e,t,n),Gy("epsilon",e,t,n))];case"LRN":return[op(Gy("x",e,t,n),Gy("radius",e,t,n),Gy("bias",e,t,n),Gy("alpha",e,t,n),Gy("beta",e,t,n))];case"Softmax":return[So(Gy("x",e,t,n))];case"LogSoftmax":return[Pi(Gy("x",e,t,n))];case"SparseToDense":return[Gb(Gy("sparseIndices",e,t,n),Gy("outputShape",e,t,n),Gy("sparseValues",e,t,n),Gy("defaultValue",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"reduction":return Cr((()=>((e,t,n)=>{switch(e.op){case"Max":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[Oi(Gy("x",e,t,n),s,a)]}case"Mean":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[Gi(Gy("x",e,t,n),s,a)]}case"Min":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[Hi(Gy("x",e,t,n),s,a)]}case"Sum":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[zi(Gy("x",e,t,n),s,a)]}case"All":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[Mr(Gy("x",e,t,n),s,a)]}case"Any":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[Lr(Gy("x",e,t,n),s,a)]}case"ArgMax":{const s=Gy("axis",e,t,n);return[zr(Gy("x",e,t,n),s)]}case"ArgMin":{const s=Gy("axis",e,t,n);return[Hh(Gy("x",e,t,n),s)]}case"Prod":{const s=Gy("axis",e,t,n),a=Gy("keepDims",e,t,n);return[fp(Gy("x",e,t,n),s,a)]}case"Cumsum":{const s=Gy("axis",e,t,n),a=Gy("exclusive",e,t,n),r=Gy("reverse",e,t,n);return[vc(Gy("x",e,t,n),s,a,r)]}case"Bincount":const s=Gy("x",e,t,n),a=Gy("weights",e,t,n),r=Gy("size",e,t,n);return[Hb(s,a,r)];case"DenseBincount":{const s=Gy("x",e,t,n),a=Gy("weights",e,t,n),r=Gy("size",e,t,n),i=Gy("binaryOutput",e,t,n);return[jb(s,a,r,i)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"slice_join":return Cr((()=>((e,t,n)=>{switch(e.op){case"ConcatV2":case"Concat":{const s=Gy("n",e,t,n),a=Gy("axis",e,t,n);let r=Gy("tensors",e,t,n);return r=r.slice(0,s),[ii(r,a)]}case"Gather":{const s=Gy("x",e,t,n),a=Gy("indices",e,t,n);return[Ei(s,ja(a,"int32"),0)]}case"GatherV2":{const s=Gy("axis",e,t,n),a=Gy("batchDims",e,t,n),r=Gy("x",e,t,n),i=Gy("indices",e,t,n);return[Ei(r,ja(i,"int32"),s,a)]}case"Reverse":{const s=Gy("dims",e,t,n),a=[];for(let e=0;e<s.length;e++)s[e]&&a.push(e);const r=Gy("x",e,t,n);return[bo(r,a)]}case"ReverseV2":{const s=Gy("axis",e,t,n),a=Gy("x",e,t,n);return[bo(a,s)]}case"Slice":{const s=Gy("begin",e,t,n),a=Gy("size",e,t,n);return[vo(Gy("x",e,t,n),s,a)]}case"StridedSlice":{const s=Gy("begin",e,t,n),a=Gy("end",e,t,n),r=Gy("strides",e,t,n),i=Gy("beginMask",e,t,n),o=Gy("endMask",e,t,n),l=Gy("ellipsisMask",e,t,n),u=Gy("newAxisMask",e,t,n),c=Gy("shrinkAxisMask",e,t,n),h=Gy("x",e,t,n);return[bp(h,s,a,r,i,o,l,u,c)]}case"Pack":return Cr((()=>{const s=Gy("axis",e,t,n),a=Gy("tensors",e,t,n),r=a[0].shape,i=Ro(a[0]).shape,o=a.map((e=>{const t=_(e.shape,r);if(!t&&!_(Ro(e).shape,i))throw new Error("the input tensors shape does not match");return t?e:Zr(e,r)}));return[Fo(o,s)]}));case"Unpack":{const s=Gy("axis",e,t,n),a=Gy("tensor",e,t,n);return Lo(a,s)}case"Tile":{const s=Gy("reps",e,t,n);return[$i(Gy("x",e,t,n),s)]}case"Split":case"SplitV":{const s=Gy("axis",e,t,n),a=Gy("numOrSizeSplits",e,t,n),r=Gy("x",e,t,n);return Eo(r,a,s)}case"ScatterNd":{const s=Gy("indices",e,t,n),a=Gy("values",e,t,n),r=Gy("shape",e,t,n);return[Kb(s,a,r)]}case"GatherNd":{const s=Gy("x",e,t,n),a=Gy("indices",e,t,n);return[qb(s,a)]}case"SparseToDense":{const s=Gy("sparseIndices",e,t,n),a=Gy("outputShape",e,t,n),r=Gy("sparseValues",e,t,n),i=Gy("defaultValue",e,t,n);return[Gb(s,r,a,r.dtype===i.dtype?i:ja(i,r.dtype))]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"spectral":return Cr((()=>((e,t,n)=>{switch(e.op){case"FFT":return[Ho(Gy("x",e,t,n))];case"IFFT":return[Ko(Gy("x",e,t,n))];case"RFFT":return[jo(Gy("x",e,t,n))];case"IRFFT":return[qo(Gy("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"transformation":return Cr((()=>((e,t,n)=>{switch(e.op){case"Cast":return[ja(Gy("x",e,t,n),Gy("dtype",e,t,n))];case"ExpandDims":{const s=Gy("axis",e,t,n);return[Ii(Gy("x",e,t,n),s)]}case"Squeeze":{const s=Gy("axis",e,t,n);return[Ro(Gy("x",e,t,n),s)]}case"Reshape":return[Zr(Gy("x",e,t,n),Gy("shape",e,t,n))];case"MirrorPad":return[hp(Gy("x",e,t,n),Gy("padding",e,t,n),Gy("mode",e,t,n))];case"PadV2":case"Pad":return[uo(Gy("x",e,t,n),Gy("padding",e,t,n),Gy("constantValue",e,t,n))];case"SpaceToBatchND":{const s=Gy("blockShape",e,t,n),a=Gy("paddings",e,t,n);return[rc(Gy("x",e,t,n),s,a)]}case"BatchToSpaceND":{const s=Gy("blockShape",e,t,n),a=Gy("crops",e,t,n);return[Ph(Gy("x",e,t,n),s,a)]}case"DepthToSpace":{const s=Gy("blockSize",e,t,n),a=Gy("dataFormat",e,t,n).toUpperCase();return[Zh(Gy("x",e,t,n),s,a)]}case"BroadcastTo":return[zo(Gy("x",e,t,n),Gy("shape",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n)));case"hash_table":return(async(e,t,n,s)=>{switch(e.op){case"HashTable":case"HashTableV2":{const a=Gy("keyDType",e,t,n),r=Gy("valueDType",e,t,n),i=new Ub(a,r);return s.addHashTable(e.name,i),[i.handle]}case"LookupTableImport":case"LookupTableImportV2":{const a=Gy("tableHandle",e,t,n,s),r=Gy("keys",e,t,n),i=Gy("values",e,t,n),o=s.getHashTableById(a.id);return[await o.import(r,i)]}case"LookupTableFind":case"LookupTableFindV2":{const a=Gy("tableHandle",e,t,n,s),r=Gy("keys",e,t,n),i=Gy("defaultValue",e,t,n),o=s.getHashTableById(a.id);return[await o.find(r,i)]}case"LookupTableSize":case"LookupTableSizeV2":{const a=Gy("tableHandle",e,t,n,s);return[s.getHashTableById(a.id).tensorSize()]}default:throw TypeError(`Node type ${e.op} is not implemented`)}})(e,t,n,s);case"custom":const a=Uy(e.op);if(a&&a.customExecutor)return a.customExecutor(new Eb(e,t,n));throw TypeError(`Custom op ${e.op} is not registered.`);default:throw TypeError(`Unknown op '${e.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(e,t,n);return oe(a)?a.then((e=>[].concat(e))):[].concat(a)}class Yb{constructor(e={},t={},n={},s={}){this.weightMap=e,this.tensorArrayMap=t,this.tensorListMap=n,this.functionMap=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(e,t){return{id:e,frameName:t,iterationId:0}}set currentContext(e){this.contexts!==e&&(this.contexts=e,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){const e=[];for(let t=0;t<this.contexts.length-1;t++){const n=this.contexts.slice(0,this.contexts.length-t);e.push(this.contextIdforContexts(n))}e.push(""),this._currentContextIds=e}contextIdforContexts(e){return e?e.map((e=>0===e.id&&0===e.iterationId?"":`${e.frameName}-${e.iterationId}`)).join("/"):""}enterFrame(e){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,e)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(!(this.contexts&&this.contexts.length>1))throw new Error("Cannot exit frame, the context is empty");this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift()}nextIteration(){if(!(this.contexts&&this.contexts.length>0))throw new Error("Cannot increase frame iteration, the context is empty");{this.contexts=this.contexts.slice(),this.lastId++;const e=Object.assign({},this.contexts[this.contexts.length-1]);e.iterationId+=1,e.id=this.lastId,this.contexts.splice(-1,1,e),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}}getWeight(e){return this.weightMap[e]}addTensorArray(e){this.tensorArrayMap[e.id]=e}getTensorArray(e){return this.tensorArrayMap[e]}addTensorList(e){this.tensorListMap[e.id]=e}getTensorList(e){return this.tensorListMap[e]}dispose(e){for(const t in this.tensorArrayMap)this.tensorArrayMap[t].clearAndClose(e);for(const t in this.tensorListMap)this.tensorListMap[t].clearAndClose(e)}}function Jb(e,t,n,s){const a=new Set,r=[];let i=null,o=null;const l=new Set,u=Object.keys(e).map((e=>qy(e)[0]));let c=[];null!=s&&(c=s.map((e=>qy(e.name)[0])));const h=[...t];for(;h.length>0;){const e=h.pop();(tx(e)||nx(e)||sx(e))&&null==i&&(i=e,o=i.children.map((e=>e.name)).filter((e=>a.has(e)))),a.add(e.name),null==n[e.name]&&-1===u.indexOf(e.name)&&-1===c.indexOf(e.name)&&(0!==e.inputs.length?e.inputs.forEach((e=>{l.has(e.name)||(l.add(e.name),h.push(e))})):r.push(e.name))}return{inputs:e,outputs:t,usedNodes:a,missingInputs:r,dynamicNode:i,syncInputs:o}}const Zb=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],Qb=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"],ex=["HashTable","HashTableV2","LookupTableImport","LookupTableImportV2","LookupTableFind","LookupTableFindV2","LookupTableSize","LookupTableSizeV2"];function tx(e){return Zb.indexOf(e.op)>=0}function nx(e){return Qb.indexOf(e.op)>=0}function sx(e){return ex.indexOf(e.op)>=0}class ax{constructor(e,t){this.graph=e,this.parent=t,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=e.outputs,this._inputs=e.inputs,this._initNodes=e.initNodes,this._signature=e.signature,this._functions=e.functions,null!=e.functions&&Object.keys(e.functions).forEach((t=>{this._functionExecutorMap[t]=new ax(e.functions[t],this)}))}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(e){const t=Object.keys(e).map((t=>e[t].map((e=>e.id))));this._weightIds=[].concat(...t),this._weightMap=e}set resourceManager(e){this._resourceManager=e}get inputs(){return this._inputs.map((e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0})))}get outputs(){return this._outputs.map((e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0})))}get inputNodes(){return this._inputs.map((e=>e.signatureKey||e.name))}get outputNodes(){return this._outputs.map((e=>{const t=e.signatureKey||e.name;return e.defaultOutput?`${t}:${e.defaultOutput}`:t}))}get functions(){return Object.keys(this._functions).reduce(((e,t)=>(e[t]=this._functions[t].signature,e)),{})}getCompilationKey(e,t){const n=e.map((e=>e.name)).sort(),s=t.map((e=>e.name)).sort();return n.join(this.SEPERATOR)+"--"+s.join(this.SEPERATOR)}compile(e,t){const n=Jb(e,t,this.weightMap,this._initNodes),{missingInputs:s,dynamicNode:a,syncInputs:r}=n;if(null!=a)throw new Error(`This execution contains the node '${a.name}', which has the dynamic op '${a.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${r}]`);if(s.length>0){const n=t.map((e=>e.name)),a=Object.keys(e);throw new Error(`Cannot compute the outputs [${n}] from the provided inputs [${a}]. Missing the following inputs: [${s}]`)}return function(e,t,n){const{usedNodes:s,inputs:a}=n,r=[],i=Object.keys(a).map((e=>qy(e)[0])).map((t=>e.nodes[t])),o=e.initNodes;i.forEach((e=>{s.has(e.name)&&r.push(e)})),e.weights.forEach((e=>{s.has(e.name)&&r.push(e)})),null!=o&&o.forEach((e=>{s.has(e.name)&&r.push(e)}));const l=new Set,u=[];for(;r.length>0;){const e=r.pop();l.add(e.name),t[e.name]||u.push(e),e.children.forEach((e=>{!l.has(e.name)&&s.has(e.name)&&e.inputs.every((e=>l.has(e.name)))&&r.push(e)}))}return u}(this.graph,this.weightMap,n)}execute(e,t){e=this.mapInputs(e);const n=Object.keys(e).sort();this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t);const s=n.map((e=>this.graph.nodes[qy(e)[0]])),a=t.map((e=>qy(e)[0]));let r=a.map((e=>this.graph.nodes[e]));0===r.length&&(r=this._outputs);const i=this.getCompilationKey(s,r);let o=this.compiledMap.get(i);null==o&&(o=this.compile(e,r),this.compiledMap.set(i,o));const l={},u={};return Cr((()=>{const n=new Yb(this.weightMap,l,u,this.functionExecutorMap),s=Object.assign({},this.weightMap);Object.keys(e).forEach((t=>{const[n,a]=qy(t),r=[];r[a]=e[t],s[n]=r}));const r=this.getFrozenTensorIds(s),i={};for(let e=0;e<o.length;e++){const t=o[e];if(!s[t.name]){const e=Xb(t,s,n,this._resourceManager);if(oe(e))throw new Error(`The execution of the op '${t.op}' returned a promise. Please use model.executeAsync() instead.`);s[t.name]=e,this.checkTensorForDisposal(t.name,t,s,n,r,a,i)}}return null==this.parent&&n.dispose(r),t.map((e=>Hy(e,s,n)))}))}getFrozenTensorIds(e){const t=[].concat.apply([],Object.keys(e).map((t=>e[t])).map((e=>e.map((e=>e.id)))));return new Set(t)}checkTensorForDisposal(e,t,n,s,a,r,i){"control"!==t.category&&-1===r.indexOf(e)&&(n[e].forEach((e=>{null!=e&&(i[e.id]=(i[e.id]||0)+t.children.length)})),t.inputs.forEach((e=>{if("control"!==e.category){const t=function(e,t,n){return t[Ky(e,n.currentContextId)]}(e.name,n,s);null!=t&&t.forEach((e=>{if(e&&!a.has(e.id)){const t=i[e.id];1===t?(e.dispose(),delete i[e.id]):null!=t&&i[e.id]--}}))}})))}async executeAsync(e,t){return this._executeAsync(e,t)}async _executeAsync(e,t,n=!1,s={},a={}){n||(e=this.mapInputs(e),this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t));const r=new Yb(this.weightMap,s,a,this.functionExecutorMap),i=await this.executeWithControlFlow(e,r,t,n),o=t.map((e=>Hy(e,i,r))),l=o.map((e=>e.id)),u=Object.keys(e).map((t=>e[t].id)),c=new Set([...l,...u,...this.weightIds]);return Object.keys(i).forEach((e=>{i[e].forEach((e=>{!e||e.isDisposed||c.has(e.id)||e.dispose()}))})),null==this.parent&&r.dispose(c),o}async executeFunctionAsync(e,t,n){const s=e.reduce(((e,t,n)=>(e[this.inputs[n].name]=t,e)),{});return this._executeAsync(s,this.outputNodes,!0,t,n)}async executeWithControlFlow(e,t,n,s){const a=Object.keys(e),r=a.map((e=>this.graph.nodes[qy(e)[0]])),i=n.map((e=>qy(e)[0]));let o=i.map((e=>this.graph.nodes[e]));0===o.length&&(o=this._outputs);const{usedNodes:l,missingInputs:u,dynamicNode:c,syncInputs:h}=Jb(e,o,this.weightMap,this._initNodes),p=[...r,...this.graph.weights,...this._initNodes||[]].map((e=>({node:e,contexts:t.currentContext}))),d=Object.assign({},this.weightMap);Object.keys(e).forEach((t=>{const[n,s]=qy(t),a=[];a[s]=e[t],d[n]=a}));const f={},m=this.getFrozenTensorIds(d),g={};for(;p.length>0;){const e=this.processStack(r,p,t,d,g,m,i,f,l);await Promise.all(e)}null!=c||s||console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");const y=o.filter((e=>!tx(e)&&!Hy(e.name,d,t))).map((e=>e.name));if(y.length>0){let e="";throw null!=c&&(e=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${h}]`),new Error(`Cannot compute the outputs [${y}] from the provided inputs [${a}]. Consider providing the following inputs: [${u}]. ${e}`)}return d}processStack(e,t,n,s,a,r,i,o,l){const u=[];for(;t.length>0;){const e=t.pop();n.currentContext=e.contexts;let c="";if("Enter"===e.node.op&&Gy("isConstant",e.node,s,n)&&([c]=jy(e.node.name,n)),null==s[e.node.name]){const h=Xb(e.node,s,n,this._resourceManager);c||([c]=jy(e.node.name,n));const p=n.currentContext;oe(h)?u.push(h.then((u=>(s[c]=u,n.currentContext=p,this.checkTensorForDisposal(c,e.node,s,n,r,i,o),this.processChildNodes(e.node,t,n,s,a,l),u)))):(s[c]=h,this.checkTensorForDisposal(c,e.node,s,n,r,i,o),this.processChildNodes(e.node,t,n,s,a,l))}else this.processChildNodes(e.node,t,n,s,a,l)}return u}processChildNodes(e,t,n,s,a,r){e.children.forEach((e=>{const[i]=jy(e.name,n);!a[i]&&r.has(e.name)&&("Merge"===e.op?e.inputNames.some((e=>!!Hy(e,s,n)))&&(a[i]=!0,t.push({contexts:n.currentContext,node:e})):e.inputNames.every((e=>!!Hy(e,s,n)))&&(a[i]=!0,t.push({contexts:n.currentContext,node:e})))}))}dispose(){Object.keys(this.weightMap).forEach((e=>this.weightMap[e].forEach((e=>e.dispose()))))}checkInputShapeAndType(e){Object.keys(e).forEach((t=>{const n=e[t],[s]=qy(t),a=this.graph.nodes[s];if(a.attrParams.shape&&a.attrParams.shape.value){const e=a.attrParams.shape.value;E(e.length===n.shape.length&&n.shape.every(((t,n)=>-1===e[n]||e[n]===t)),(()=>`The shape of dict['${a.name}'] provided in model.execute(dict) must be [${e}], but was [${n.shape}]`))}a.attrParams.dtype&&a.attrParams.dtype.value&&E(n.dtype===a.attrParams.dtype.value,(()=>`The dtype of dict['${a.name}'] provided in model.execute(dict) must be ${a.attrParams.dtype.value}, but was ${n.dtype}`))}))}mapInputs(e){const t={};for(const n in e)null!=this._signature&&null!=this._signature.inputs&&null!=this._signature.inputs[n]?t[this._signature.inputs[n].name]=e[n]:t[n]=e[n];return t}checkInputs(e){const t=Object.keys(e).filter((e=>{const[t]=qy(e);return null==this.graph.nodes[t]}));if(t.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${t}] that are not part of graph`)}mapOutputs(e){return e.map((e=>null!=this._signature&&null!=this._signature.outputs&&null!=this._signature.outputs[e]?this._signature.outputs[e].name:e),{})}checkOutputs(e){e.forEach((e=>{const[t]=qy(e);if(!this.graph.nodes[t])throw new Error(`The output '${e}' is not found in the graph`)}))}}class rx{constructor(e={},t={}){this.hashTableNameToHandle=e,this.hashTableMap=t}addHashTable(e,t){this.hashTableNameToHandle[e]=t.handle,this.hashTableMap[t.id]=t}getHashTableHandleByName(e){return this.hashTableNameToHandle[e]}getHashTableById(e){return this.hashTableMap[e]}dispose(){for(const e in this.hashTableMap)this.hashTableMap[e].clearAndClose(),delete this.hashTableMap[e];for(const e in this.hashTableNameToHandle)this.hashTableNameToHandle[e].dispose(),delete this.hashTableNameToHandle[e]}}class ix{constructor(e,t={}){this.modelUrl=e,this.loadOptions=t,this.version="n/a",null==t&&(this.loadOptions={}),this.resourceManager=new rx}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}get metadata(){return this.artifacts.userDefinedMetadata}get modelSignature(){return this.signature}findIOHandler(){const e=this.modelUrl;if(null!=e.load)this.handler=e;else if(null!=this.loadOptions.requestInit)this.handler=nr(e,this.loadOptions);else{const s=(t=e,n=this.loadOptions,wa.getLoadHandlers(t,n));if(0===s.length)s.push(nr(e,this.loadOptions));else if(s.length>1)throw new Error(`Found more than one (${s.length}) load handlers for URL '${[e]}'`);this.handler=s[0]}var t,n}async load(){if(this.findIOHandler(),null==this.handler.load)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const e=await this.handler.load();return this.loadSync(e)}loadSync(e){this.artifacts=e;const t=this.artifacts.modelTopology;let n;n=null!=this.artifacts.userDefinedMetadata&&null!=this.artifacts.userDefinedMetadata.signature?this.artifacts.userDefinedMetadata.signature:this.artifacts.signature,this.signature=n,this.version=`${t.versions.producer}.${t.versions.minConsumer}`;const s=function(e,t){const n={};let s,a=0;for(const r of t){const t=r.name,i=r.dtype,o=r.shape,l=D(o);let u;if("quantization"in r){const n=r.quantization;if("uint8"===n.dtype||"uint16"===n.dtype){if(!("min"in n)||!("scale"in n))throw new Error(`Weight ${r.name} with quantization ${n.dtype} doesn't have corresponding metadata min and scale.`)}else{if("float16"!==n.dtype)throw new Error(`Weight ${r.name} has unknown quantization dtype ${n.dtype}. Supported quantization dtypes are: 'uint8', 'uint16', and 'float16'.`);if("float32"!==i)throw new Error(`Weight ${r.name} is quantized with ${n.dtype} which only supports weights of type float32 not ${i}.`)}const o=pa[n.dtype],c=e.slice(a,a+l*o),h="uint8"===n.dtype?new Uint8Array(c):new Uint16Array(c);if("float32"===i)if("uint8"===n.dtype||"uint16"===n.dtype){u=new Float32Array(h.length);for(let e=0;e<h.length;e++){const t=h[e];u[e]=t*n.scale+n.min}}else{if("float16"!==n.dtype)throw new Error(`Unsupported quantization type ${n.dtype} for weight type float32.`);void 0===s&&(s=xa()),u=s(h)}else{if("int32"!==i)throw new Error(`Unsupported dtype in weight '${t}': ${i}`);if("uint8"!==n.dtype&&"uint16"!==n.dtype)throw new Error(`Unsupported quantization type ${n.dtype} for weight type int32.`);u=new Int32Array(h.length);for(let e=0;e<h.length;e++){const t=h[e];u[e]=Math.round(t*n.scale+n.min)}}a+=l*o}else if("string"===i){const t=D(r.shape);u=[];for(let n=0;n<t;n++){const t=new Uint32Array(e.slice(a,a+4))[0];a+=4;const n=new Uint8Array(e.slice(a,a+t));u.push(n),a+=t}}else{const s=pa[i],r=e.slice(a,a+l*s);if("float32"===i)u=new Float32Array(r);else if("int32"===i)u=new Int32Array(r);else if("bool"===i)u=new Uint8Array(r);else{if("complex64"!==i)throw new Error(`Unsupported dtype in weight '${t}': ${i}`);{u=new Float32Array(r);const e=new Float32Array(u.length/2),s=new Float32Array(u.length/2);for(let t=0;t<e.length;t++)e[t]=u[2*t],s[t]=u[2*t+1];const a=ha(e,o,"float32"),i=ha(s,o,"float32");n[t]=ua(a,i),a.dispose(),i.dispose()}}a+=l*s}"complex64"!==i&&(n[t]=ha(u,o,i))}return n}(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new ax(fb.Instance.transformGraph(t,this.signature)),this.executor.weightMap=this.convertTensorMapToTensorsMap(s),this.executor.resourceManager=this.resourceManager,null!=e.modelInitializer&&null!=e.modelInitializer.node){const t=fb.Instance.transformGraph(e.modelInitializer);this.initializer=new ax(t),this.initializer.weightMap=this.executor.weightMap,this.initializer.resourceManager=this.resourceManager,this.initializer.executeAsync({},[])}return!0}async save(e,t){if("string"==typeof e){const t=ka(e);if(0===t.length)throw new Error(`Cannot find any save handlers for URL '${e}'`);if(t.length>1)throw new Error(`Found more than one (${t.length}) save handlers for URL '${e}'`);e=t[0]}if(null==e.save)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return e.save(this.artifacts)}predict(e,t){return this.execute(e,this.outputNodes)}normalizeInputs(e){if(!(e instanceof Ds||Array.isArray(e)))return e;if((e=Array.isArray(e)?e:[e]).length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${e.length} input tensors.`);return this.inputNodes.reduce(((t,n,s)=>(t[n]=e[s],t)),{})}normalizeOutputs(e){return e=e||this.outputNodes,Array.isArray(e)?e:[e]}execute(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=this.executor.execute(e,t);return n.length>1?n:n[0]}async executeAsync(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=await this.executor.executeAsync(e,t);return n.length>1?n:n[0]}convertTensorMapToTensorsMap(e){return Object.keys(e).reduce(((t,n)=>(t[n]=[e[n]],t)),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose(),this.resourceManager.dispose()}}function ox(e,t,n=new Map,s=new Set){if(null==e)return null;if(s.has(e))throw new Error("Circular references are not supported.");if(n.has(e))return n.get(e);const a=t(e);if(a.recurse&&null!==a.value)throw new Error("A deep map function may not return both a value and recurse=true.");if(a.recurse){if(hx(e)){const a=Array.isArray(e)?[]:{};s.add(e);for(const r in e){const i=ox(e[r],t,n,s);a[r]=i}return s.delete(e),a}throw new Error(`Can't recurse into non-iterable type: ${e}`)}return n.set(e,a.value),a.value}function lx(e,t=cx){return ux(e,t)}function ux(e,t,n=new Set){const s=e[0];if(n.has(s))throw new Error("Circular references are not supported.");const a=t(e);if(a.recurse&&null!==a.value)throw new Error("A deep zip function may not return both a value and recurse=true.");if(a.recurse){if(hx(s)){const a=Array.isArray(s)?[]:{};n.add(s);for(const r in s){const s=ux(e.map((e=>e[r])),t,n);a[r]=s}return n.delete(s),a}throw new Error(`Can't recurse into non-iterable type: ${s}`)}return a.value}function cx(e){return null===e?null:hx(e[0])?{value:null,recurse:!0}:{value:e,recurse:!1}}function hx(e){return null!=e&&!ArrayBuffer.isView(e)&&(Array.isArray(e)||"object"==typeof e&&!(e instanceof Ds))}function px(e){return ox(e,dx)}function dx(e){return e instanceof Ds?{value:e.clone(),recurse:!1}:hx(e)?{value:null,recurse:!0}:{value:e,recurse:!1}}class fx{constructor(e){if(this.capacity=e,this.begin=0,this.end=0,null==e)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(e<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(e),this.doubledCapacity=2*e}wrap(e){for(;e<0;)e+=this.doubledCapacity;return e%this.doubledCapacity}get(e){if(e<0)throw new RangeError("Can't get item at a negative index.");return this.data[e%this.capacity]}set(e,t){if(e<0)throw new RangeError("Can't set item at a negative index.");this.data[e%this.capacity]=t}length(){let e=this.end-this.begin;return e<0&&(e=this.doubledCapacity+e),e}isFull(){return this.length()===this.capacity}isEmpty(){return 0===this.length()}push(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,e),this.end=this.wrap(this.end+1)}pushAll(e){for(const t of e)this.push(t)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);const e=this.get(this.end);return this.set(this.end,void 0),e}unshift(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,e)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const e=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),e}shuffleExcise(e){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const t=this.wrap(this.begin+e),n=this.get(t);return this.set(t,this.pop()),n}}class mx extends fx{constructor(){super(mx.INITIAL_CAPACITY)}isFull(){return!1}push(e){super.isFull()&&this.expand(),super.push(e)}unshift(e){super.isFull()&&this.expand(),super.unshift(e)}expand(){const e=2*this.capacity,t=new Array(e),n=this.length();for(let e=0;e<n;e++)t[e]=this.get(this.wrap(this.begin+e));this.data=t,this.capacity=e,this.doubledCapacity=2*this.capacity,this.begin=0,this.end=n}}mx.INITIAL_CAPACITY=32;class gx{async toArray(){const e=[];let t=await this.next();for(;!t.done;)e.push(t.value),t=await this.next();return e}async toArrayForTest(){const e=this.prefetch(100),t=[];let n=await e.next();for(;!n.done;)t.push(n.value),n=await e.next();return t}async resolveFully(){let e=await this.next();for(;!e.done;)e=await this.next()}async resolveWhile(e){let t=await this.next(),n=e(t.value);for(;!t.done&&n;)t=await this.next(),n=e(t.value)}handleErrors(e){return new $x(this,e)}filter(e){return new Nx(this,e)}map(e){return new Ix(this,e)}mapAsync(e){return new Cx(this,e)}serialMapAsync(e){return new Cx(this,e).serial()}flatmap(e){return new Tx(this,e)}async forEachAsync(e){return this.map(e).resolveFully()}async serialForEach(e){return this.serialMapAsync(e).resolveWhile((e=>!0===e))}rowMajorBatch(e,t=!0){return new vx(this,e,t)}columnMajorBatch(e,t=!0,n=cx){return this.rowMajorBatch(e,t).map((e=>lx(e,n)))}concatenate(e,t){return new Ex(new yx([this,e]),t)}take(e){return e<0||null==e?this:new kx(this,e)}skip(e){return e<0||null==e?this:new wx(this,e)}prefetch(e){return new Fx(this,e)}shuffle(e,t){return new Dx(this,e,t)}serial(){return new xx(this)}}class yx extends gx{constructor(e){super(),this.items=e,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};const e=this.items[this.trav];return this.trav++,{value:px(e),done:!1}}}class bx extends gx{constructor(e){super(),this.nextFn=e}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(e){throw e.message=`Error thrown while iterating through a dataset: ${e.message}`,e}}}class xx extends gx{constructor(e){super(),this.upstream=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}async serialNext(){return this.upstream.next()}}class wx extends gx{constructor(e,t){super(),this.upstream=e,this.maxCount=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}async serialNext(){for(;this.count++<this.maxCount;){const e=await this.upstream.next();if(e.done)return e;Sr(e.value)}return this.upstream.next()}}class kx extends gx{constructor(e,t){super(),this.upstream=e,this.maxCount=t,this.count=0}summary(){return`${this.upstream.summary()} -> Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class vx extends gx{constructor(e,t,n=!0){super(),this.upstream=e,this.batchSize=t,this.enableSmallLastBatch=n,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}async serialNext(){const e=[];for(;e.length<this.batchSize;){const t=await this.upstream.next();if(t.done)return this.enableSmallLastBatch&&e.length>0?{value:e,done:!1}:{value:null,done:!0};e.push(t.value)}return{value:e,done:!1}}}class Nx extends gx{constructor(e,t){super(),this.upstream=e,this.predicate=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}async serialNext(){for(;;){const e=await this.upstream.next();if(e.done||this.predicate(e.value))return e;Sr(e.value)}}}class Ix extends gx{constructor(e,t){super(),this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Map`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=js(e.value),n=this.transform(e.value),s=js(n);for(const e of t)Hs(e,s)||e.dispose();return{value:n,done:!1}}}class $x extends gx{constructor(e,t){super(),this.upstream=e,this.handler=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(e){if(!this.handler(e))return{value:null,done:!0}}}}class Cx extends gx{constructor(e,t){super(),this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=js(e.value),n=await this.transform(e.value),s=js(n);for(const e of t)Hs(e,s)||e.dispose();return{value:n,done:!1}}}class Sx extends gx{constructor(){super(),this.outputQueue=new mx,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}async serialNext(){for(;0===this.outputQueue.length();)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class Tx extends Sx{constructor(e,t){super(),this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){const e=await this.upstream.next();if(e.done)return!1;const t=js(e.value),n=this.transform(e.value),s=js(n);this.outputQueue.pushAll(n);for(const e of t)Hs(e,s)||e.dispose();return!0}}class Ex extends gx{constructor(e,t){super(),this.baseErrorHandler=t,this.lastRead=null,this.iterator=null,this.moreIterators=e}summary(){return"TODO: fill in upstream of chained summaries -> Chained"}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(e){if(await e,null==this.iterator){const e=await this.moreIterators.next();if(e.done)return{value:null,done:!0};this.iterator=e.value,null!=this.baseErrorHandler&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}const t=await this.iterator.next();return t.done?(this.iterator=null,this.readFromChain(e)):t}}var Ax,Rx;(Rx=Ax||(Ax={}))[Rx.FAIL=0]="FAIL",Rx[Rx.SHORTEST=1]="SHORTEST",Rx[Rx.LONGEST=2]="LONGEST";class Fx extends gx{constructor(e,t){super(),this.upstream=e,this.bufferSize=t,this.buffer=new fx(t)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){const e=this.upstream.next();this.buffer.push(e)}}next(){return this.refill(),this.buffer.shift()}}class Dx extends Fx{constructor(e,t,n){super(e,t),this.upstream=e,this.windowSize=t,this.upstreamExhausted=!1,this.random=ho.alea(n||bs().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then((()=>this.serialNext())),this.lastRead}randomInt(e){return Math.floor(this.random()*e)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){const e=this.chooseIndex(),t=await this.buffer.shuffleExcise(e);if(!t.done)return this.refill(),t;this.upstreamExhausted=!0}return{value:null,done:!0}}}class _x{constructor(){this.size=null}batch(e,t=!0){const n=this;let s;return E(e>0,(()=>`batchSize needs to be positive, but it is\n ${e}`)),s=this.size===1/0||null==this.size?this.size:t?Math.ceil(this.size/e):Math.floor(this.size/e),Ox((async()=>(await n.iterator()).columnMajorBatch(e,t,Mx)),s)}concatenate(e){const t=this;let n;return n=this.size===1/0||e.size===1/0?1/0:null!=this.size&&null!=e.size?this.size+e.size:null,Ox((async()=>(await t.iterator()).concatenate(await e.iterator())),n)}filter(e){const t=this;let n;return n=this.size===1/0?1/0:null,Ox((async()=>(await t.iterator()).filter((t=>Cr((()=>e(t)))))),n)}async forEachAsync(e){return(await this.iterator()).forEachAsync(e)}map(e){const t=this;return Ox((async()=>(await t.iterator()).map((t=>Cr((()=>e(t)))))),this.size)}mapAsync(e){const t=this;return Ox((async()=>(await t.iterator()).mapAsync(e)),this.size)}prefetch(e){if(null==e)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");const t=this;return Ox((async()=>(await t.iterator()).prefetch(e)),this.size)}repeat(e){const t=this;let n;return n=null!=this.size&&e>0?this.size*e:0===e?0:null!=this.size&&(void 0===e||e<0)?1/0:null,Ox((async()=>{return n=(a=async()=>({value:await t.iterator(),done:!1}),new bx(a)).take(e),new Ex(n,s);var n,s,a}),n)}skip(e){const t=this;let n;return n=null!=this.size&&e>=0&&this.size>=e?this.size-e:null!=this.size&&(this.size<e||void 0===e||e<0)?0:null,Ox((async()=>(await t.iterator()).skip(e)),n)}shuffle(e,t,n=!0){if(null==e||e<0)throw null==this.size?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);const s=this,a=ho.alea(t||bs().toString());return Ox((async()=>{let t=a.int32();return n&&(t+=a.int32()),(await s.iterator()).shuffle(e,t.toString())}),this.size)}take(e){const t=this;let n;return n=null!=this.size&&this.size>e?e:null!=this.size&&this.size<=e?this.size:null,Ox((async()=>(await t.iterator()).take(e)),n)}async toArray(){if(this.size===1/0)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===1/0)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}function Ox(e,t=null){return new class extends _x{constructor(){super(...arguments),this.size=t}async iterator(){return e()}}}function Mx(e){if(null===e)return null;return null==(t=e[0])||null===(n=t)||"object"!=typeof n&&"function"!=typeof n||Array.isArray(t)||"object"==typeof t&&t instanceof Ds||H(t)?{value:function(e){if(0===e.length)throw new Error("Can't make a batch of zero elements.");return e[0]instanceof Ds?Fo(e):ha(e)}(e),recurse:!1}:{value:null,recurse:!0};var t,n}function Lx(e,t){Array.isArray(e)||(e=[e]),e.forEach((e=>{null!=e&&E("complex64"!==e.dtype,(()=>`${t} does not support complex64 tensors in the CPU backend.`))}))}_x.MAX_BUFFER_SIZE=1e4,Symbol("out"),Symbol("field"),Symbol("quote"),Symbol("quoteafterquote"),Symbol("quoteinquote");const zx=Wu;class Px extends I{constructor(){super(),this.blockSize=48,this.firstUse=!0,this.data=new N(this,Ir())}nextDataId(){return Px.nextDataId++}write(e,t,n){this.firstUse&&(this.firstUse=!1,ue().get("IS_NODE")&&$u("\n============================\nHi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, which binds to TensorFlow C++, by running npm i @tensorflow/tfjs-node, or npm i @tensorflow/tfjs-node-gpu if you have CUDA. Then call require('@tensorflow/tfjs-node'); (-gpu suffix for CUDA) at the start of your program. Visit https://github.com/tensorflow/tfjs-node for more details.\n============================"));const s={id:this.nextDataId()};return this.data.set(s,{values:e,dtype:n,refCount:1}),s}makeTensorInfo(e,t,n){let s;if("string"===t&&null!=n&&n.length>0&&K(n[0])){const a=n.map((e=>xs(e)));s=this.write(a,e,t)}else s=this.write(n,e,t);return{dataId:s,shape:e,dtype:t}}refCount(e){return this.data.has(e)?this.data.get(e).refCount:0}incRef(e){this.data.get(e).refCount++}decRef(e){this.data.has(e)&&this.data.get(e).refCount--}move(e,t,n,s,a){this.data.set(e,{values:t,dtype:s,refCount:a})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(e){const{dtype:t,complexTensorInfos:n}=this.data.get(e);return"complex64"===t?Su(this.readSync(n.real.dataId),this.readSync(n.imag.dataId)):this.data.get(e).values}bufferSync(e){const t=this.readSync(e.dataId);let n=t;if("string"===e.dtype)try{n=t.map((e=>ws(e)))}catch(e){throw new Error("Failed to decode encoded string bytes into utf-8")}return Ha(e.shape,e.dtype,n)}makeOutput(e,t,n){const s=this.write(e,t,n);return Ir().makeTensorFromDataId(s,t,n,this)}disposeData(e,t=!1){if(this.data.has(e)){if(this.data.get(e).refCount--,!t&&this.data.get(e).refCount>0)return!1;const{complexTensorInfos:n}=this.data.get(e);null!=n&&(this.disposeData(n.real.dataId,!0),this.disposeData(n.imag.dataId,!0)),this.data.delete(e)}return!0}disposeIntermediateTensorInfo(e){this.disposeData(e.dataId)}async time(e){const t=bs();return e(),{kernelMs:bs()-t}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}where(e){Lx([e],"where");const t=this.readSync(e.dataId);return zx(e.shape,t)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}}function Bx(e,t,n){return({inputs:s,attrs:a,backend:r})=>{const{x:i}=s;if(Lx(i,e),"string"===i.dtype||"string"===n)throw new Error("unaryKernelFunc does not support string input/output");const o=r,l=o.data.get(i.dataId).values,u=D(i.shape),c=n||i.dtype,h=U(c,u);for(let e=0;e<u;++e)h[e]=t(l[e],a);return o.makeTensorInfo(i.shape,c,h)}}function Wx(e,t,n){return({inputs:s,attrs:a,backend:r})=>{const{x:i}=s;if(Lx(i,e),"string"===i.dtype||"string"===n)throw new Error("unaryKernelFunc does not support string input/output");const o=r,l=o.data.get(i.dataId).values,u=n||i.dtype,c=t(l,u,a);return o.makeTensorInfo(i.shape,u,c)}}Px.nextDataId=0,Ar("cpu",(()=>new Px),1);const Vx=Bx(ot,(e=>e>=0?e:Math.exp(e)-1)),Ux={kernelName:ot,backendName:"cpu",kernelFunc:Vx};function Gx(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const Hx={kernelName:It,backendName:"cpu",kernelFunc:Gx};function jx(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{alpha:r}=s;Lx([a],"leakyRelu");const i=D(a.shape),o=n.data.get(a.dataId).values,l=V("float32",i);for(let e=0;e<o.length;e++)l[e]=o[e]<0?r*o[e]:o[e];return n.makeTensorInfo(a.shape,"float32",l)}const Kx={kernelName:At,backendName:"cpu",kernelFunc:jx};function qx(e){return(t,n,s,a,r)=>{const i=vi(t,n),o=i.length,l=Z(i),u=V(r,D(i)),c=t.length,h=n.length,p=Z(t),d=Z(n),f=wi(t,i),m=wi(n,i);if(f.length+m.length===0)for(let t=0;t<u.length;++t)u[t]=e(s[t%s.length],a[t%a.length]);else for(let t=0;t<u.length;++t){const n=ie(t,o,l),r=n.slice(-c);f.forEach((e=>r[e]=0));const i=re(r,c,p),g=n.slice(-h);m.forEach((e=>g[e]=0));const y=re(g,h,d);u[t]=e(s[i],a[y])}return[u,i]}}const Xx=qx(((e,t)=>e<0?t*e:e));function Yx(e){const{inputs:t,backend:n}=e,{x:s,alpha:a}=t;Lx([s,a],"prelu");const r=n.data.get(s.dataId).values,i=n.data.get(a.dataId).values,[o,l]=Xx(s.shape,a.shape,r,i,s.dtype);return n.makeTensorInfo(l,s.dtype,o)}const Jx={kernelName:pn,backendName:"cpu",kernelFunc:Yx},Zx=Bx(yn,(e=>Math.max(0,e))),Qx={kernelName:yn,backendName:"cpu",kernelFunc:Zx},ew=Bx(Nn,(e=>Math.min(Math.max(0,e),6))),tw={kernelName:Nn,backendName:"cpu",kernelFunc:ew};function nw(e,t,n,s,a){if("linear"===n)return Gx({inputs:{x:t},backend:e});if("relu"===n)return Zx({inputs:{x:t},backend:e});if("elu"===n)return Vx({inputs:{x:t},backend:e});if("relu6"===n)return ew({inputs:{x:t},backend:e});if("prelu"===n)return Yx({inputs:{x:t,alpha:s},backend:e});if("leakyrelu"===n)return jx({inputs:{x:t},backend:e,attrs:{alpha:a}});throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}function sw(e){const{inputs:t,backend:n}=e,{real:s,imag:a}=t,r=n.data.get(s.dataId).values,i=n.data.get(a.dataId).values,o=n.makeTensorInfo(s.shape,"complex64");return n.data.get(o.dataId).complexTensorInfos={real:n.makeTensorInfo(s.shape,"float32",r),imag:n.makeTensorInfo(a.shape,"float32",i)},o}const aw={kernelName:ze,backendName:"cpu",kernelFunc:sw};function rw(e,t,n="float32"){if("complex64"===n)return sw({inputs:{real:rw(e,t,"float32"),imag:rw(e,t,"float32")},backend:e});const s=ne(D(t),n);return e.makeTensorInfo(t,n,s)}function iw(e){const{inputs:t,backend:n}=e,{input:s}=t,a=n.data.get(s.dataId).complexTensorInfos.real,r=n.data.get(a.dataId).values;return n.makeTensorInfo(a.shape,a.dtype,r)}const ow={kernelName:mn,backendName:"cpu",kernelFunc:iw};function lw(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{dtype:r}=s;if("complex64"===r){if("complex64"===a.dtype)return Gx({inputs:{x:a},backend:n});const e=rw(n,a.shape,a.dtype),t=lw({inputs:{x:a},backend:n,attrs:{dtype:"float32"}}),s=sw({inputs:{real:t,imag:e},backend:n});return n.disposeIntermediateTensorInfo(e),n.disposeIntermediateTensorInfo(t),s}if("complex64"===a.dtype){const e=iw({inputs:{input:a},backend:n}),t=lw({inputs:{x:e},backend:n,attrs:{dtype:r}});return n.disposeIntermediateTensorInfo(e),t}if(!G(a.dtype,r)){const e=Gx({inputs:{x:a},backend:n});return{dataId:e.dataId,shape:e.shape,dtype:r}}if("int32"===r){const e=n.data.get(a.dataId).values,t=Int32Array.from(e);return n.makeTensorInfo(a.shape,"int32",t)}if("bool"===r){const e=n.data.get(a.dataId).values,t=ys([0],a.dtype),[s,r]=qx(((e,t)=>e!==t?1:0))(a.shape,[],e,t,"bool");return n.makeTensorInfo(r,"bool",s)}throw new Error(`Error in Cast: failed to cast ${a.dtype} to ${r}`)}const uw={kernelName:Oe,backendName:"cpu",kernelFunc:lw};function cw(e,t,n,s){return null==n?({inputs:n,backend:a})=>{const{a:r,b:i}=n,o=a;Lx([r,i],e);const l=o.data.get(r.dataId).values,u=o.data.get(i.dataId).values,c=s||r.dtype,[h,p]=t(r.shape,i.shape,l,u,c);return o.makeTensorInfo(p,c,h)}:({inputs:e,backend:a})=>{const{a:r,b:i}=e,o=a;if("complex64"===r.dtype||"complex64"===i.dtype){const e=lw({inputs:{x:r},backend:o,attrs:{dtype:"complex64"}}),t=o.data.get(e.dataId),s=t.complexTensorInfos.real,a=t.complexTensorInfos.imag,l=o.data.get(s.dataId).values,u=o.data.get(a.dataId).values,c=lw({inputs:{x:i},backend:o,attrs:{dtype:"complex64"}}),h=o.data.get(c.dataId),p=h.complexTensorInfos.real,d=h.complexTensorInfos.imag,f=o.data.get(p.dataId).values,m=o.data.get(d.dataId).values,[g,y,b]=n(r.shape,i.shape,l,u,f,m),x=o.makeTensorInfo(b,"float32",g),w=o.makeTensorInfo(b,"float32",y),k=sw({inputs:{real:x,imag:w},backend:o});return o.disposeIntermediateTensorInfo(e),o.disposeIntermediateTensorInfo(c),o.disposeIntermediateTensorInfo(x),o.disposeIntermediateTensorInfo(w),k}{const e=o.data.get(r.dataId).values,n=o.data.get(i.dataId).values,a=s||r.dtype,[l,u]=t(r.shape,i.shape,e,n,a);return o.makeTensorInfo(u,a,l)}}}function hw(e){return(t,n,s,a,r,i)=>{const o=vi(t,n),l=D(o),u=o.length,c=Z(o),h=V("float32",l),p=V("float32",l),d=wi(t,o),f=wi(n,o),m=Su(s,a),g=Su(r,i),y=t.length,b=Z(t),x=n.length,w=Z(n);if(d.length+f.length===0)for(let t=0;t<h.length;t++){const n=t%m.length,s=t%g.length,a=e(m[2*n],m[2*n+1],g[2*s],g[2*s+1]);h[t]=a.real,p[t]=a.imag}else for(let t=0;t<h.length;t++){const n=ie(t,u,c),s=n.slice(-y);d.forEach((e=>s[e]=0));const a=re(s,y,b),r=n.slice(-x);f.forEach((e=>r[e]=0));const i=re(r,x,w),o=e(m[2*a],m[2*a+1],g[2*i],g[2*i+1]);h[t]=o.real,p[t]=o.imag}return[h,p,o]}}const pw=qx(((e,t)=>e+t)),dw=hw(((e,t,n,s)=>({real:e+n,imag:t+s}))),fw=cw(ye,pw,dw),mw={kernelName:ye,backendName:"cpu",kernelFunc:fw};function gw(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{shape:r}=s,i=D(a.shape),o=P(r,i),l=D(o);E(i===l,(()=>`The new shape (${o}) has ${l} elements and the old shape (${a.shape}) has ${i} elements. The new shape and old shape must have the same number of elements.`)),n.incRef(a.dataId);const u=n.data.get(a.dataId);if(null!=u.complexTensorInfos){const e=u.complexTensorInfos.real,t=u.complexTensorInfos.imag;e.shape=o,t.shape=o}return{dataId:a.dataId,shape:o,dtype:a.dtype}}const yw={kernelName:bn,backendName:"cpu",kernelFunc:gw};function bw(e){const{inputs:t,backend:n,attrs:s}=e,{a,b:r}=t,{transposeA:i,transposeB:o}=s;Lx([a,r],"matMul");const l=a.shape.length,u=r.shape.length,c=i?a.shape[l-2]:a.shape[l-1],h=o?r.shape[u-1]:r.shape[u-2],p=i?a.shape[l-1]:a.shape[l-2],d=o?r.shape[u-2]:r.shape[u-1],f=a.shape.slice(0,-2),m=r.shape.slice(0,-2),g=D(f),y=D(m);E(l>=2&&u>=2&&(g===y||1===g||1===y),(()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${f}) and (${m}).`));const b=(g>y?a.shape.slice(0,-2):r.shape.slice(0,-2)).concat([p,d]);E(c===h,(()=>`Error in matMul: inner shapes (${c}) and (${h}) of Tensors with shapes ${a.shape} and ${r.shape} and transposeA=${i} and transposeB=${o} must match.`));const x=o?[y,d,h]:[y,h,d],w=gw({inputs:{x:a},backend:n,attrs:{shape:i?[g,c,p]:[g,p,c]}}),k=gw({inputs:{x:r},backend:n,attrs:{shape:x}}),v=i?w.shape[1]:w.shape[2],N=i?w.shape[2]:w.shape[1],I=o?k.shape[1]:k.shape[2],$=Math.max(g,y),C=n.data.get(w.dataId).values,S=n.data.get(k.dataId).values,T=Z(w.shape),A=Z(k.shape),[R,F,_]=i?[T[0],1,T[1]]:[T[0],T[1],1],[O,M,L]=o?[1,A[1],A[0]]:[A[1],1,A[0]],z=N*I,P=Ha([$,N,I],w.dtype),B=P.values,W=n.blockSize;for(let e=0;e<$;e++)for(let t=0;t<N;t+=W)for(let n=0;n<I;n+=W)for(let s=0;s<v;s+=W){const a=Math.min(t+W,N),r=Math.min(n+W,I),i=Math.min(s+W,v);for(let o=t;o<a;o++)for(let t=n;t<r;t++){let n=0;for(let a=s;a<i;a++){const s=Math.min(e,g-1)*R,r=Math.min(e,y-1)*L;n+=C[s+o*F+a*_]*S[a*O+t*M+r]}B[e*z+(o*I+t)]+=n}}return n.disposeIntermediateTensorInfo(w),n.disposeIntermediateTensorInfo(k),n.makeTensorInfo(b,P.dtype,P.values)}const xw={kernelName:Fe,backendName:"cpu",kernelFunc:bw},ww={kernelName:rs,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{a,b:r,bias:i,preluActivationWeights:o}=t,{transposeA:l,transposeB:u,activation:c,leakyreluAlpha:h}=s;let p,d,f;const m=[];p=bw({inputs:{a,b:r},attrs:{transposeA:l,transposeB:u},backend:n}),i&&(d=fw({inputs:{a:p,b:i},backend:n}),m.push(p),p=d),c&&(f=nw(n,p,c,o,h),m.push(p),p=f);for(const e of m)n.disposeIntermediateTensorInfo(e);return p}};function kw(e){const t=new Float32Array(e.length);for(let n=0;n<e.length;++n)t[n]=Math.abs(e[n]);return t}const vw={kernelName:fe,backendName:"cpu",kernelFunc:e=>{const{x:t}=e.inputs,n=e.backend;Lx(t,"abs");let s=new Float32Array(D(t.shape));return s=kw(n.data.get(t.dataId).values),n.makeOutput(s,t.shape,"float32")}},Nw=Bx(me,(e=>Math.acos(e))),Iw={kernelName:me,backendName:"cpu",kernelFunc:Nw},$w=Bx(ge,(e=>Math.acosh(e))),Cw={kernelName:ge,backendName:"cpu",kernelFunc:$w},Sw={kernelName:be,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,s=t;Lx(t,"addN");const a=s.map((e=>n.data.get(e.dataId).values)),r=Ha(s[0].shape,s[0].dtype),i=r.values;for(let e=0;e<s.length;e++){const t=a[e];for(let e=0;e<i.length;e++)i[e]+=t[e]}return n.makeTensorInfo(r.shape,r.dtype,r.values)}};function Tw(e,t,n,s,a){const r=t.length,i=D(t),o=Z(t),l=Z(a),u=V(n,D(a));for(let t=0;t<i;++t){const n=ie(t,r,o),a=new Array(n.length);for(let e=0;e<a.length;e++)a[e]=n[s[e]];u[re(a,r,l)]=e[t]}return u}function Ew(e){const{inputs:t,attrs:n,backend:s}=e,{x:a}=t,{perm:r}=n;Lx(a,"transpose");const i=a.shape.length,o=new Array(i);for(let e=0;e<o.length;e++)o[e]=a.shape[r[e]];const l=Tw(s.data.get(a.dataId).values,a.shape,a.dtype,r,o);return{dataId:s.write(l,o,a.dtype),shape:o,dtype:a.dtype}}const Aw={kernelName:Jn,backendName:"cpu",kernelFunc:Ew},Rw={kernelName:xe,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s;Lx(a,"all");const o=B(r,a.shape);let l=o;const u=Zi(l,a.shape.length);let c=a;null!=u&&(c=Ew({inputs:{x:a},backend:n,attrs:{perm:u}}),l=eo(l.length,a.shape.length)),Ji("all",l,c.shape.length);const[h,p]=Xi(c.shape,l),d=D(p),f=ne(D(h),c.dtype),m=n.data.get(c.dataId).values;for(let e=0;e<f.length;++e){const t=e*d;let n=m[t];for(let e=0;e<d;++e){const s=m[t+e];n=n&&s}f[e]=n}null!=u&&n.disposeIntermediateTensorInfo(c);const g=n.makeTensorInfo(h,c.dtype,f);if(i){const e=gw({inputs:{x:g},backend:n,attrs:{shape:Yi(h,o)}});return n.disposeIntermediateTensorInfo(g),e}return g}},Fw={kernelName:we,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s;Lx(a,"any");const o=B(r,a.shape);let l=o;const u=Zi(l,a.shape.length);let c=a;null!=u&&(c=Ew({inputs:{x:a},backend:n,attrs:{perm:u}}),l=eo(l.length,a.shape.length)),Ji("any",l,c.shape.length);const[h,p]=Xi(c.shape,l),d=D(p),f=ne(D(h),c.dtype),m=n.data.get(c.dataId).values;for(let e=0;e<f.length;++e){const t=e*d;let n=m[t];for(let e=0;e<d;++e){const s=m[t+e];n=n||s}f[e]=n}null!=u&&n.disposeIntermediateTensorInfo(c);const g=n.makeTensorInfo(h,c.dtype,f);if(i){const e=gw({inputs:{x:g},backend:n,attrs:{shape:Yi(h,o)}});return n.disposeIntermediateTensorInfo(g),e}return g}},Dw={kernelName:ke,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r}=s;Lx(a,"argMax");let i=B(r,a.shape);const o=Zi(i,a.shape.length);let l=a;const u=[];null!=o&&(l=Ew({inputs:{x:a},backend:n,attrs:{perm:o}}),u.push(l),i=eo(i.length,l.shape.length)),i=[i[0]],Ji("argMax",i,l.shape.length);const[c,h]=Xi(l.shape,i),p=ne(D(c),"int32"),d=D(h),f=n.data.get(l.dataId).values;for(let e=0;e<p.length;++e){const t=e*d;let n=f[t],s=0;for(let e=0;e<d;++e){const a=f[t+e];a>n&&(n=a,s=e)}p[e]=s}return u.forEach((e=>n.disposeIntermediateTensorInfo(e))),n.makeTensorInfo(c,"int32",p)}},_w={kernelName:ve,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r}=s;Lx(a,"argMin");let i=B(r,a.shape);const o=Zi(i,a.shape.length);let l=a;const u=[];null!=o&&(l=Ew({inputs:{x:a},backend:n,attrs:{perm:o}}),u.push(l),i=eo(i.length,l.shape.length)),i=[i[0]],Ji("argMin",i,l.shape.length);const[c,h]=Xi(l.shape,i),p=ne(D(c),"int32"),d=D(h),f=n.data.get(l.dataId).values;for(let e=0;e<p.length;++e){const t=e*d;let n=f[t],s=0;for(let e=0;e<d;++e){const a=f[t+e];a<n&&(n=a,s=e)}p[e]=s}return u.forEach((e=>n.disposeIntermediateTensorInfo(e))),n.makeTensorInfo(c,"int32",p)}},Ow=Bx(Ne,(e=>Math.asin(e))),Mw={kernelName:Ne,backendName:"cpu",kernelFunc:Ow},Lw=Bx(Ie,(e=>Math.asinh(e))),zw={kernelName:Ie,backendName:"cpu",kernelFunc:Lw},Pw=Bx($e,(e=>Math.atan(e))),Bw={kernelName:$e,backendName:"cpu",kernelFunc:Pw},Ww=qx(((e,t)=>Math.atan2(e,t))),Vw=cw(Se,Ww),Uw={kernelName:Se,backendName:"cpu",kernelFunc:Vw},Gw=Bx(Ce,(e=>Math.atanh(e))),Hw={kernelName:Ce,backendName:"cpu",kernelFunc:Gw};function jw(e,t,n,s,a,r){const i=a.strideHeight,o=a.strideWidth,l=a.dilationHeight,u=a.dilationWidth,c=a.effectiveFilterHeight,h=a.effectiveFilterWidth,p=a.padInfo.top,d=a.padInfo.left,f="max"===r?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,m=Ha(a.outShape,n),g=m.values,y=a.outShape[1]*a.outShape[2]*a.outShape[3],b=a.outShape[2]*a.outShape[3],x=a.outShape[3];for(let t=0;t<a.batchSize;++t){const n=t*y,m=t*s[0];for(let t=0;t<a.inChannels;++t)for(let y=0;y<a.outHeight;++y){const w=y*i-p,k=Math.max(0,w),v=Math.min(a.inHeight,c+w),N=n+y*b;for(let n=0;n<a.outWidth;++n){const i=n*o-d,c=Math.max(0,i),p=Math.min(a.inWidth,h+i);let y=f,b=0,w=0;for(let n=k;n<v;n+=l){const a=m+n*s[1];for(let n=c;n<p;n+=u){const i=e[a+n*s[2]+t];"max"===r&&i>y?y=i:"avg"===r&&(b+=i,w++)}if(isNaN(y))break}g[N+n*x+t]="avg"===r?b/w:y}}}return m}function Kw(e,t,n,s,a=!1,r=!1){const i=Ha(s.outShape,"int32"),o=s.strideHeight,l=s.strideWidth,u=s.dilationHeight,c=s.dilationWidth,h=s.effectiveFilterHeight,p=s.effectiveFilterWidth,d=s.padInfo.top,f=s.padInfo.left,m=Ha(t,n,e);for(let e=0;e<s.batchSize;++e)for(let t=0;t<s.inChannels;++t)for(let n=0;n<s.outHeight;++n){const g=n*o-d;let y=g;for(;y<0;)y+=u;const b=Math.min(s.inHeight,h+g);for(let o=0;o<s.outWidth;++o){const h=o*l-f;let d=h;for(;d<0;)d+=c;const x=Math.min(s.inWidth,p+h);let w=Number.NEGATIVE_INFINITY,k=-1;for(let n=y;n<b;n+=u){const i=n-g;for(let o=d;o<x;o+=c){const l=o-h,u=m.get(e,n,o,t);u>w&&(w=u,k=a?r?((e*s.inHeight+n)*s.inWidth+o)*s.inChannels+t:(n*s.inWidth+o)*s.inChannels+t:i*p+l)}}i.set(k,e,n,o,t)}}return i}function qw(e,t,n,s,a,r){const i=a.strideDepth,o=a.strideHeight,l=a.strideWidth,u=a.dilationDepth,c=a.dilationHeight,h=a.dilationWidth,p=a.effectiveFilterDepth,d=a.effectiveFilterHeight,f=a.effectiveFilterWidth,m=a.padInfo.front,g=a.padInfo.top,y=a.padInfo.left,b="max"===r?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,x=Ha(a.outShape,n),w=x.values,k=a.outShape[1]*a.outShape[2]*a.outShape[3]*a.outShape[4],v=a.outShape[2]*a.outShape[3]*a.outShape[4],N=a.outShape[3]*a.outShape[4],I=a.outShape[4];for(let t=0;t<a.batchSize;++t){const n=t*k,x=t*s[0];for(let t=0;t<a.inChannels;++t)for(let k=0;k<a.outDepth;++k){const $=k*i-m;let C=$;for(;C<0;)C+=u;const S=Math.min(a.inDepth,p+$),T=n+k*v;for(let n=0;n<a.outHeight;++n){const i=n*o-g;let p=i;for(;p<0;)p+=c;const m=Math.min(a.inHeight,d+i),k=T+n*N;for(let n=0;n<a.outWidth;++n){const i=n*l-y;let o=i;for(;o<0;)o+=h;const d=Math.min(a.inWidth,f+i),g=k+n*I;let v=b,N=0,$=0;for(let n=C;n<S;n+=u){const a=x+n*s[1];for(let n=p;n<m;n+=c){const i=a+n*s[2];for(let n=o;n<d;n+=h){const a=e[i+n*s[3]+t];if("max"===r&&a>v?v=a:"avg"===r&&(N+=a,$++),isNaN(v))break}if(isNaN(v))break}if(isNaN(v))break}w[g+t]="avg"===r?N/$:v}}}}return x}const Xw={kernelName:Te,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t;Lx(a,"avgPool");const{filterSize:r,strides:i,pad:o,dimRoundingMode:l}=s;E(Yr(i,1),(()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${i} and dilations '1'`));const u=Br(a.shape,r,i,1,o,l);let c;if(1===u.filterWidth&&1===u.filterHeight&&_(u.inShape,u.outShape))c=Gx({inputs:{x:a},backend:n});else{const e=n.data.get(a.dataId).values,t=Z(a.shape),s=jw(e,a.shape,a.dtype,t,u,"avg");c=n.makeTensorInfo(u.outShape,a.dtype,s.values)}return c}},Yw={kernelName:Ae,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{filterSize:r,strides:i,pad:o,dimRoundingMode:l,dataFormat:u}=s;Lx(a,"avgPool3d");const c=Wr(a.shape,r,i,1,o,l,u),h=qw(n.data.get(a.dataId).values,a.shape,a.dtype,Z(a.shape),c,"avg");return n.makeTensorInfo(h.shape,"float32",h.values)}},Jw={kernelName:Re,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r}=t,{filterSize:i,strides:o,pad:l,dimRoundingMode:u}=s;Lx([a,r],"avgPool3DGrad");const c=Wr(r.shape,i,o,1,l,u),h=c.strideDepth,p=c.strideHeight,d=c.strideWidth,f=c.filterDepth,m=c.filterHeight,g=c.filterWidth,y=c.dilationDepth,b=c.dilationHeight,x=c.dilationWidth,w=c.effectiveFilterDepth,k=c.effectiveFilterHeight,v=c.effectiveFilterWidth,N=w-1-c.padInfo.front,I=v-1-c.padInfo.left,$=k-1-c.padInfo.top,C=Ha(r.shape,"float32"),S=1/(f*m*g),T=n.bufferSync(a);for(let e=0;e<c.batchSize;++e)for(let t=0;t<c.inChannels;++t)for(let n=0;n<c.inDepth;++n)for(let s=0;s<c.inHeight;++s)for(let a=0;a<c.inWidth;++a){const r=n-N,i=s-$,o=a-I;let l=0;for(let n=0;n<w;n+=y){const s=(r+n)/h;if(!(s<0||s>=c.outDepth||Math.floor(s)!==s))for(let n=0;n<k;n+=b){const a=(i+n)/p;if(!(a<0||a>=c.outHeight||Math.floor(a)!==a))for(let n=0;n<v;n+=x){const r=(o+n)/d;r<0||r>=c.outWidth||Math.floor(r)!==r||(l+=T.get(e,s,a,r,t))}}}C.set(l*S,e,n,s,a,t)}return n.makeTensorInfo(C.shape,C.dtype,C.values)}},Zw={kernelName:Ee,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r}=t,i=r;Lx([a,r],"avgPoolGrad");const{filterSize:o,strides:l,pad:u}=s,c=Br(i.shape,o,l,1,u),h=c.strideHeight,p=c.strideWidth,d=c.filterHeight,f=c.filterWidth,m=c.dilationHeight,g=c.dilationWidth,y=c.effectiveFilterHeight,b=c.effectiveFilterWidth,x=b-1-c.padInfo.left,w=y-1-c.padInfo.top,k=Ha(i.shape,"float32"),v=1/(d*f),N=n.data.get(a.dataId).values,I=Ha(a.shape,"float32",N);for(let e=0;e<c.batchSize;++e)for(let t=0;t<c.inChannels;++t)for(let n=0;n<c.inHeight;++n)for(let s=0;s<c.inWidth;++s){const a=n-w,r=s-x;let i=0;for(let n=0;n<y;n+=m){const s=(a+n)/h;if(!(s<0||s>=c.outHeight||Math.floor(s)!==s))for(let n=0;n<b;n+=g){const a=(r+n)/p;a<0||a>=c.outWidth||Math.floor(a)!==a||(i+=I.get(e,s,a,t))}}k.set(i*v,e,n,s,t)}return n.makeTensorInfo(k.shape,k.dtype,k.values)}},Qw={kernelName:xt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,scale:r,offset:i,mean:o,variance:l}=t;E(o.shape.length===l.shape.length,(()=>"Batch normalization gradient requires mean and variance to have equal ranks.")),E(null==i||o.shape.length===i.shape.length,(()=>"Batch normalization gradient requires mean and offset to have equal ranks.")),E(null==r||o.shape.length===r.shape.length,(()=>"Batch normalization gradient requires mean and scale to have equal ranks.")),Lx([a,o,l,r,i],"batchNorm");let{varianceEpsilon:u}=s;null==u&&(u=.001);const c=n.data.get(a.dataId).values,h=n.data.get(o.dataId).values,p=n.data.get(l.dataId).values,d=r?n.data.get(r.dataId).values:new Float32Array([1]),f=i?n.data.get(i.dataId).values:new Float32Array([0]),m=new Float32Array(c.length),g=f.length,y=d.length,b=p.length,x=h.length;let w=0,k=0,v=0,N=0;for(let e=0;e<c.length;++e)m[e]=f[w++]+(c[e]-h[k++])*d[v++]/Math.sqrt(p[N++]+u),w>=g&&(w=0),k>=x&&(k=0),v>=y&&(v=0),N>=b&&(N=0);return n.makeTensorInfo(a.shape,a.dtype,m)}};function ek(e,t,n,s,a){const r=yr(s,t,n),i=D(n),o=Z(s);if(r){const n=br(t,o);return"string"===a?e.slice(n,n+i):e.subarray(n,n+i)}const l=Ha(s,a,"string"===a?Pu(e):e),u=Ha(n,a);for(let e=0;e<u.size;++e){const n=u.indexToLoc(e),s=n.map(((e,n)=>e+t[n]));u.set(l.get(...s),...n)}return"string"===a?Bu(u.values):u.values}function tk(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{begin:r,size:i}=s;Lx(a,"slice");const[o,l]=xr(a,r,i);rr(a,o,l);const u=ek(n.data.get(a.dataId).values,o,l,a.shape,a.dtype);return n.makeTensorInfo(l,a.dtype,u)}const nk={kernelName:An,backendName:"cpu",kernelFunc:tk},sk={kernelName:De,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{blockShape:r,crops:i}=s;Lx([a],"batchToSpaceND");const o=r.reduce(((e,t)=>e*t)),l=lu(a.shape,r,o),u=uu(l.length,r.length),c=cu(a.shape,r,o),h=hu(i,r.length),p=pu(c,i,r.length),d=gw({inputs:{x:a},backend:n,attrs:{shape:l}}),f=Ew({inputs:{x:d},backend:n,attrs:{perm:u}}),m=gw({inputs:{x:f},backend:n,attrs:{shape:c}}),g=tk({inputs:{x:m},backend:n,attrs:{begin:h,size:p}});return n.disposeIntermediateTensorInfo(d),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(m),g}};function ak(e,t,n,s,a){const r=D(s),i=ne(a,n);for(let n=0;n<e.length;n++){const s=e[n];if(s<0)throw new Error("Input x must be non-negative!");s>=a||(i[s]+=r>0?t[n]:1)}return i}function rk(e,t,n,s=!1){const a=e.shape[0],r=e.shape[1],i=Ha([a,n],t.dtype);for(let o=0;o<a;o++)for(let a=0;a<r;a++){const r=e.get(o,a);if(r<0)throw new Error("Input x must be non-negative!");r>=n||(s?i.set(1,o,r):t.size>0?i.set(i.get(o,r)+t.get(o,a),o,r):i.set(i.get(o,r)+1,o,r))}return i}const ik={kernelName:_e,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,weights:r}=t,{size:i}=s,o=ak(n.data.get(a.dataId).values,n.data.get(r.dataId).values,r.dtype,r.shape,i);return n.makeTensorInfo([i],r.dtype,o)}};function ok(e){return(t,n,s)=>{const a=V(n,t.length);for(let n=0;n<t.length;++n)a[n]=e(t[n],s);return a}}const lk=ok((e=>Math.ceil(e))),uk=Wx(Me,lk),ck={kernelName:Me,backendName:"cpu",kernelFunc:uk},hk=Bx(Le,((e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:e<n.clipValueMin?n.clipValueMin:e})),pk={kernelName:Le,backendName:"cpu",kernelFunc:hk},dk={kernelName:Pe,backendName:"cpu",kernelFunc:e=>{const{x:t}=e.inputs,n=e.backend,s=new Float32Array(D(t.shape)),a=n.data.get(t.dataId),r=a.complexTensorInfos.real,i=a.complexTensorInfos.imag,o=n.data.get(r.dataId).values,l=n.data.get(i.dataId).values;for(let e=0;e<o.length;e++){const t=o[e],n=l[e];s[e]=Math.hypot(t,n)}return n.makeOutput(s,t.shape,"float32")}};function fk(e,t,n,s){const a=U(n,D(t));if(s&&"string"!==n){let t=0;e.forEach((e=>{const n=D(e.shape);a.set(e.vals,t),t+=n}))}else{let s=0;e.forEach((e=>{const r="string"===n?Pu(e.vals):e.vals;let i=0;for(let n=0;n<e.shape[0];++n){const o=n*t[1]+s;for(let t=0;t<e.shape[1];++t)a[o+t]=r[i++]}s+=e.shape[1]}))}return a}function mk(e){const{inputs:t,backend:n}=e,{input:s}=t,a=n.data.get(s.dataId).complexTensorInfos.imag,r=n.data.get(a.dataId).values;return n.makeTensorInfo(a.shape,a.dtype,r)}const gk={kernelName:Ct,backendName:"cpu",kernelFunc:mk};function yk(e){const{inputs:t,backend:n,attrs:s}=e,{axis:a}=s,r=B(a,t[0].shape)[0];let i=au(t.map((e=>e.shape)),r);if(0===D(i))return n.makeTensorInfo(i,t[0].dtype,[]);const o=t.filter((e=>D(e.shape)>0));if(1===o.length)return Gx({inputs:{x:o[0]},backend:n});if(su(o.map((e=>e.shape)),r),"complex64"===o[0].dtype){const e=o.map((e=>iw({inputs:{input:e},backend:n}))),t=o.map((e=>mk({inputs:{input:e},backend:n}))),s=yk({inputs:e,backend:n,attrs:{axis:r}}),a=yk({inputs:t,backend:n,attrs:{axis:r}}),i=sw({inputs:{real:s,imag:a},backend:n});return e.forEach((e=>n.disposeIntermediateTensorInfo(e))),t.forEach((e=>n.disposeIntermediateTensorInfo(e))),n.disposeIntermediateTensorInfo(s),n.disposeIntermediateTensorInfo(a),i}const l=o.map((e=>{const t=D(e.shape.slice(r));return gw({inputs:{x:e},backend:n,attrs:{shape:[-1,t]}})})),u=l.map((e=>({vals:n.data.get(e.dataId).values,shape:e.shape})));i=au(l.map((e=>e.shape)),1);const c=1===l[0].shape[0],h=fk(u,i,t[0].dtype,c),p=au(o.map((e=>e.shape)),r),d=n.makeTensorInfo(p,t[0].dtype,h);return l.forEach((e=>n.disposeIntermediateTensorInfo(e))),d}const bk={kernelName:Be,backendName:"cpu",kernelFunc:yk};function xk(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dataFormat:l,dilations:u,dimRoundingMode:c}=s;Lx([a,r],"conv2d");const h=Jr(l),p=Vr(a.shape,r.shape,i,u,o,c,!1,h),d=p.filterHeight,f=p.filterWidth,m=p.dilationHeight,g=p.dilationWidth,y=p.padInfo.left,b=p.padInfo.top,x="channelsLast"===p.dataFormat,w=new Es(p.outShape,a.dtype),k=Z(a.shape),v=Z(r.shape),N=k[0],I=x?k[1]:k[2],$=x?k[2]:1,C=x?1:k[1],S=w.strides[0],T=x?w.strides[1]:w.strides[2],E=x?w.strides[2]:1,A=x?1:w.strides[1],R=n.data.get(a.dataId).values,F=n.data.get(r.dataId).values,D=w.values;for(let e=0;e<p.batchSize;++e){const t=e*N,n=e*S;for(let e=0;e<p.outHeight;++e){const s=n+e*T,a=e*p.strideHeight-b;for(let e=0;e<d;++e){const n=a+e*m;if(n<0||n>=p.inHeight)continue;const r=e*v[0],i=t+n*I;for(let e=0;e<p.outWidth;++e){const t=s+e*E,n=e*p.strideWidth-y;for(let e=0;e<f;++e){const s=n+e*g;if(s<0||s>=p.inWidth)continue;const a=i+s*$;let o=r+e*v[1];for(let e=0;e<p.inChannels;++e){const n=R[a+e*C];for(let e=0;e<p.outChannels;++e)D[t+e*A]+=n*F[o+e];o+=p.outChannels}}}}}}return n.makeTensorInfo(w.shape,w.dtype,D)}const wk={kernelName:We,backendName:"cpu",kernelFunc:xk},kk={kernelName:Ve,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,dy:r}=t,{strides:i,pad:o,dataFormat:l,dimRoundingMode:u,filterShape:c}=s;Lx([a,r],"conv2dBackpropFilter");const h=Jr(l),p=Vr(a.shape,c,i,1,o,u,!1,h),{strideHeight:d,strideWidth:f,filterHeight:m,filterWidth:g}=p,y="channelsLast"===p.dataFormat,b=new Es(p.filterShape,"float32"),x=p.padInfo.left,w=p.padInfo.top,k=n.data.get(a.dataId).values,v=n.data.get(r.dataId).values,N=new Es(a.shape,a.dtype,k),I=new Es(r.shape,r.dtype,v);for(let e=0;e<m;++e){const t=Math.max(0,Math.ceil((w-e)/d)),n=Math.min(p.outHeight,(p.inHeight+w-e)/d);for(let s=0;s<g;++s){const a=Math.max(0,Math.ceil((x-s)/f)),r=Math.min(p.outWidth,(p.inWidth+x-s)/f);for(let i=0;i<p.inChannels;++i)for(let o=0;o<p.outChannels;++o){let l=0;for(let u=0;u<p.batchSize;++u)for(let c=t;c<n;++c){const t=e+c*d-w;for(let e=a;e<r;++e){const n=s+e*f-x;l+=y?N.get(u,t,n,i)*I.get(u,c,e,o):N.get(u,i,t,n)*I.get(u,o,c,e)}}b.set(l,e,s,i,o)}}}return n.makeTensorInfo(b.shape,b.dtype,b.values)}},vk={kernelName:Ue,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,filter:r}=t,{inputShape:i,strides:o,pad:l,dataFormat:u,dimRoundingMode:c}=s;Lx([a,r],"conv2dBackpropInput");const h=Z(r.shape),p=Z(a.shape);let d=Jr(u);const f=Vr(i,r.shape,o,1,l,c,!1,d),m=new Es(f.inShape,"float32"),g=m.values,y=n.data.get(a.dataId).values,b=n.data.get(r.dataId).values,[x,w,k]=h,{batchSize:v,filterHeight:N,filterWidth:I,inChannels:$,inHeight:C,inWidth:S,outChannels:T,outHeight:E,outWidth:A,strideHeight:R,strideWidth:F}=f;d=f.dataFormat;const D=N-1-f.padInfo.top,_=I-1-f.padInfo.left,O="channelsLast"===d,M=m.strides[0],L=O?m.strides[1]:m.strides[2],z=O?m.strides[2]:1,P=O?1:m.strides[1],B=p[0],W=O?p[1]:p[2],V=O?p[2]:1,U=O?1:p[1];for(let e=0;e<v;++e)for(let t=0;t<$;++t)for(let n=0;n<C;++n){const s=n-D,a=Math.max(0,Math.ceil(s/R)),r=Math.min(E,(N+s)/R);for(let i=0;i<S;++i){const o=i-_,l=Math.max(0,Math.ceil(o/F)),u=Math.min(A,(I+o)/F);let c=0;for(let n=a;n<r;++n){const a=n*R-s;for(let s=l;s<u;++s){const r=B*e+W*n+V*s,i=x*(N-1-a)+w*(I-1-(s*F-o))+k*t;for(let e=0;e<T;++e)c+=y[r+U*e]*b[i+e]}}g[M*e+L*n+z*i+P*t]=c}}return n.makeTensorInfo(m.shape,m.dtype,m.values)}},Nk={kernelName:Ge,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dilations:l}=s;Lx([a,r],"conv3d");const u=Ur(a.shape,r.shape,i,l,o),{filterDepth:c,filterHeight:h,filterWidth:p,dilationDepth:d,dilationHeight:f,dilationWidth:m,padInfo:g}=u,y=g.front,b=g.left,x=g.top,w=new Es(u.outShape,a.dtype),k=n.data.get(a.dataId).values,v=n.data.get(r.dataId).values,N=w.values,I=Z(a.shape),$=Z(r.shape);for(let e=0;e<u.batchSize;++e){const t=e*I[0],n=e*w.strides[0];for(let e=0;e<u.outDepth;++e){const s=n+e*w.strides[1],a=e*u.strideDepth-y;for(let e=0;e<c;++e){const n=a+e*d;if(n<0||n>=u.inDepth)continue;const r=e*$[0],i=t+n*I[1];for(let e=0;e<u.outHeight;++e){const t=s+e*w.strides[2],n=e*u.strideHeight-x;for(let e=0;e<h;++e){const s=n+e*f;if(s<0||s>=u.inHeight)continue;const a=r+e*$[1],o=i+s*I[2];for(let e=0;e<u.outWidth;++e){const n=t+e*u.outChannels,s=e*u.strideWidth-b;for(let e=0;e<p;++e){const t=s+e*m;if(t<0||t>=u.inWidth)continue;const r=a+e*$[2],i=o+t*u.inChannels;let l=r;for(let e=0;e<u.inChannels;++e){const t=k[i+e];for(let e=0;e<u.outChannels;++e)N[n+e]+=t*v[l+e];l+=u.outChannels}}}}}}}}return n.makeTensorInfo(w.shape,w.dtype,w.values)}},Ik={kernelName:He,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,dy:r}=t,{strides:i,pad:o,filterShape:l}=s;Lx([a,r],"conv3dBackpropFilterV2");const u=Z(a.shape),c=Z(r.shape),h=Ur(a.shape,l,i,1,o),p=h.strideDepth,d=h.strideHeight,f=h.strideWidth,m=h.filterDepth,g=h.filterHeight,y=h.filterWidth,b=new Es(h.filterShape,"float32"),x=b.values,[w,k,v,N]=b.strides,I=n.data.get(r.dataId).values,[$,C,S,T]=c,E=n.data.get(a.dataId).values,[A,R,F,D]=u,_=h.padInfo.front,O=h.padInfo.left,M=h.padInfo.top;for(let e=0;e<m;++e){const t=Math.max(0,Math.ceil((_-e)/p)),n=Math.min(h.outDepth,(h.inDepth+_-e)/p),s=e*w;for(let a=0;a<g;++a){const r=Math.max(0,Math.ceil((M-a)/d)),i=Math.min(h.outHeight,(h.inHeight+M-a)/d),o=a*k+s;for(let s=0;s<y;++s){const l=Math.max(0,Math.ceil((O-s)/f)),u=Math.min(h.outWidth,(h.inWidth+O-s)/f),c=s*v+o;for(let o=0;o<h.inChannels;++o){const m=o*N+c;for(let c=0;c<h.outChannels;++c){let g=0;for(let m=0;m<h.batchSize;++m){const h=m*A,y=m*$;for(let m=t;m<n;++m){const t=(e+m*p-_)*R+h,n=m*C+y;for(let e=r;e<i;++e){const r=(a+e*d-M)*F+t,i=e*S+n;for(let e=l;e<u;++e){const t=e*T+i;g+=E[(s+e*f-O)*D+r+o]*I[t+c]}}}}x[m+c]=g}}}}}return n.makeTensorInfo(b.shape,b.dtype,b.values)}},$k={kernelName:je,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,filter:r}=t,{pad:i,strides:o,inputShape:l}=s;Lx([a],"conv3dBackpropInputV2");const u=Z(a.shape),c=Z(r.shape),h=Ur(l,r.shape,o,1,i),p=new Es(h.inShape,"float32"),d=p.values,[f,m,g,y]=p.strides,b=n.data.get(a.dataId).values,[x,w,k,v]=u,N=n.data.get(r.dataId).values,[I,$,C,S]=c,{batchSize:T,filterDepth:E,filterHeight:A,filterWidth:R,inChannels:F,inDepth:D,inHeight:_,inWidth:O,outChannels:M,outDepth:L,outHeight:z,outWidth:P,strideDepth:B,strideHeight:W,strideWidth:V}=h,U=E-1-h.padInfo.front,G=A-1-h.padInfo.top,H=R-1-h.padInfo.left;for(let e=0;e<T;++e)for(let t=0;t<F;++t)for(let n=0;n<D;++n){const s=n-U,a=Math.max(0,Math.ceil(s/B)),r=Math.min(L,(E+s)/B);for(let i=0;i<_;++i){const o=i-G,l=Math.max(0,Math.ceil(o/W)),u=Math.min(z,(A+o)/W);for(let c=0;c<O;++c){const h=c-H,p=Math.max(0,Math.ceil(h/V)),T=Math.min(P,(R+h)/V);let F=0;for(let n=a;n<r;++n){const a=n*B-s;for(let s=l;s<u;++s){const r=s*W-o;for(let i=p;i<T;++i){const o=x*e+w*n+k*s+v*i,l=I*(E-1-a)+$*(A-1-r)+C*(R-1-(i*V-h))+S*t;for(let e=0;e<M;++e)F+=b[o+e]*N[l+e]}}}d[f*e+m*n+g*i+y*c+t]=F}}}return n.makeTensorInfo(p.shape,p.dtype,p.values)}},Ck=Bx(Ke,(e=>Math.cos(e))),Sk={kernelName:Ke,backendName:"cpu",kernelFunc:Ck},Tk=Bx(qe,(e=>Math.cosh(e))),Ek={kernelName:qe,backendName:"cpu",kernelFunc:Tk},Ak={kernelName:Ye,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{image:a,boxes:r,boxInd:i}=t,{cropSize:o,method:l,extrapolationValue:u}=s,[c,h,p,d]=a.shape,f=r.shape[0],[m,g]=o,y=Ha([f,m,g,d],"float32"),b=n.data.get(r.dataId).values,x=n.data.get(i.dataId).values,w=n.data.get(a.dataId).values,k=Z(a.shape),v=Z(y.shape);for(let e=0;e<f;e++){const t=4*e,n=b[t],s=b[t+1],a=b[t+2],r=b[t+3],i=x[e];if(i>=c)continue;const o=m>1?(a-n)*(h-1)/(m-1):0,f=g>1?(r-s)*(p-1)/(g-1):0;for(let t=0;t<m;t++){const c=m>1?n*(h-1)+t*o:.5*(n+a)*(h-1);if(c<0||c>h-1)for(let n=0;n<g;n++)for(let s=0;s<d;s++){const a=s+n*v[2]+t*v[1]+e*v[0];y.values[a]=u}else if("bilinear"===l){const n=Math.floor(c),a=Math.ceil(c),o=c-n;for(let l=0;l<g;l++){const c=g>1?s*(p-1)+l*f:.5*(s+r)*(p-1);if(c<0||c>p-1){for(let n=0;n<d;n++){const s=n+l*v[2]+t*v[1]+e*v[0];y.values[s]=u}continue}const h=Math.floor(c),m=Math.ceil(c),b=c-h;for(let s=0;s<d;s++){let r=s+h*k[2]+n*k[1]+i*k[0];const u=w[r];r=s+m*k[2]+n*k[1]+i*k[0];const c=w[r];r=s+h*k[2]+a*k[1]+i*k[0];const p=w[r];r=s+m*k[2]+a*k[1]+i*k[0];const d=u+(c-u)*b,f=p+(w[r]-p)*b;r=s+l*v[2]+t*v[1]+e*v[0],y.values[r]=d+(f-d)*o}}}else for(let n=0;n<g;++n){const a=g>1?s*(p-1)+n*f:.5*(s+r)*(p-1);if(a<0||a>p-1){for(let s=0;s<d;s++){const a=s+n*v[2]+t*v[1]+e*v[0];y.values[a]=u}continue}const o=Math.round(a),l=Math.round(c);for(let s=0;s<d;s++){const a=s+o*k[2]+l*k[1]+i*k[0],r=s+n*v[2]+t*v[1]+e*v[0];y.values[r]=w[a]}}}}return n.makeTensorInfo(y.shape,y.dtype,y.values)}},Rk={kernelName:Xe,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,exclusive:i,reverse:o}=s;Lx(a,"cumsum");const l=Zi([r],a.shape.length);let u=a;null!=l&&(u=Ew({inputs:{x:a},backend:n,attrs:{perm:l}}));const c=eo(1,a.shape.length)[0];if(c!==u.shape.length-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${u.shape.length-1} but got axis=${c}`);const h=Vs(u.dtype,"int32"),p=ne(D(u.shape),h),d=n.data.get(u.dataId).values,f=u.shape[u.shape.length-1],m=o?(e,t)=>e+f-t-1:(e,t)=>e+t;for(let e=0;e<d.length;e+=f)for(let t=0;t<f;t++){const n=m(e,t);if(0===t)p[n]=i?0:d[n];else{const s=m(e,t-1);p[n]=i?d[s]+p[s]:d[n]+p[s]}}const g=n.makeTensorInfo(u.shape,h,p);if(null!=l){const e=Ew({inputs:{x:g},backend:n,attrs:{perm:Qi(l)}});return n.disposeIntermediateTensorInfo(g),n.disposeIntermediateTensorInfo(u),e}return g}},Fk={kernelName:Je,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,weights:r}=t,{size:i,binaryOutput:o}=s;if(1===a.shape.length){const e=ak(n.data.get(a.dataId).values,n.data.get(r.dataId).values,r.dtype,r.shape,i);return n.makeTensorInfo([i],r.dtype,e)}if(2===a.shape.length){const e=rk(n.bufferSync(a),n.bufferSync(r),i,o);return n.makeTensorInfo(e.shape,r.dtype,e.values)}throw new Error(`Error in denseBincount: input must be at most rank 2, but got rank${a.shape.length}.`)}},Dk={kernelName:Ze,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{blockSize:r,dataFormat:i}=s;E("NHWC"===i,(()=>`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${i}`)),E(r>1,(()=>`blockSize should be > 1 for depthToSpace, but was: ${r}`));const o=a.shape[0],l=a.shape[1],u=a.shape[2],c=a.shape[3],h=l*r,p=u*r,d=c/(r*r),f=n.data.get(a.dataId).values,m=new Float32Array(o*h*p*d);let g=0;for(let e=0;e<o;++e)for(let t=0;t<h;++t){const n=Math.floor(t/r),s=t%r;for(let t=0;t<p;++t){const a=Math.floor(t/r),i=(s*r+t%r)*d;for(let t=0;t<d;++t){const s=t+i+c*(a+u*(n+l*e));m[g++]=f[s]}}}return n.makeTensorInfo([o,h,p,d],a.dtype,m)}};function _k(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dilations:l,dimRoundingMode:u}=s;Lx([a,r],"depthwiseConv2DNative");const c=Z(a.shape),h=Z(r.shape);let p=l;null==p&&(p=[1,1]),E(Yr(i,p),(()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${p}'`));const d=Vr(a.shape,r.shape,i,p,o,u,!0),{filterHeight:f,filterWidth:m,dilationHeight:g,dilationWidth:y,padInfo:b}=d,x=b.left,w=b.top,k=d.outChannels/d.inChannels,v=new Es(d.outShape,a.dtype),N=n.data.get(a.dataId).values,I=n.data.get(r.dataId).values,$=v.values;for(let e=0;e<d.batchSize;++e){const t=e*c[0],n=e*v.strides[0];for(let e=0;e<d.outHeight;++e){const s=n+e*v.strides[1],a=e*d.strideHeight-x;for(let e=0;e<f;++e){const n=a+e*g;if(n<0||n>=d.inHeight)continue;const r=e*h[0],i=t+n*c[1];for(let e=0;e<d.outWidth;++e){const t=s+e*v.strides[2],n=e*d.strideWidth-w;for(let e=0;e<m;++e){const s=n+e*y;if(s<0||s>=d.inWidth)continue;const a=r+e*h[1],o=i+s*d.inChannels;let l=t,u=a;for(let e=0;e<d.inChannels;++e){const t=N[o+e];for(let e=0;e<k;++e)$[l+e]+=t*I[u+e];l+=k,u+=k}}}}}}return n.makeTensorInfo(v.shape,v.dtype,v.values)}const Ok={kernelName:Qe,backendName:"cpu",kernelFunc:_k},Mk={kernelName:et,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,dy:r}=t,{strides:i,dilations:o,pad:l,dimRoundingMode:u,filterShape:c}=s;Lx([a,r],"depthwiseConv2dNativeBackpropFilter");const h=Vr(a.shape,c,i,o,l,u,!0),{strideHeight:p,strideWidth:d,filterHeight:f,filterWidth:m}=h,g=new Es(h.filterShape,"float32"),y=h.padInfo.left,b=h.padInfo.top,x=h.outChannels/h.inChannels,w=n.data.get(a.dataId).values,k=new Es(a.shape,a.dtype,w),v=n.data.get(r.dataId).values,N=new Es(r.shape,r.dtype,v);for(let e=0;e<f;++e){const t=Math.max(0,Math.ceil((b-e)/p)),n=Math.min(h.outHeight,(h.inHeight+b-e)/p);for(let s=0;s<m;++s){const a=Math.max(0,Math.ceil((y-s)/d)),r=Math.min(h.outWidth,(h.inWidth+y-s)/d);for(let i=0;i<h.outChannels;++i){const o=Math.trunc(i/x),l=i%x;let u=0;for(let l=0;l<h.batchSize;++l)for(let c=t;c<n;++c){const t=e+c*p-b;for(let e=a;e<r;++e){const n=s+e*d-y;u+=k.get(l,t,n,o)*N.get(l,c,e,i)}}g.set(u,e,s,o,l)}}}return n.makeTensorInfo(g.shape,g.dtype,g.values)}},Lk={kernelName:tt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,filter:r}=t,{strides:i,dilations:o,pad:l,dimRoundingMode:u,inputShape:c}=s;Lx([a,r],"depthwiseConv2DNativeBackpropInput");const h=Z(a.shape),p=Z(r.shape),d=Vr(c,r.shape,i,o,l,u,!0),f=new Es(d.inShape,"float32"),m=f.values,[g,y,b]=f.strides,x=n.data.get(a.dataId).values,[w,k,v]=h,N=n.data.get(r.dataId).values,[I,$,C]=p,{batchSize:S,filterHeight:T,filterWidth:E,inChannels:A,inHeight:R,inWidth:F,outChannels:D,outHeight:_,outWidth:O,strideHeight:M,strideWidth:L}=d,z=T-1-d.padInfo.top,P=E-1-d.padInfo.left,B=D/A;for(let e=0;e<S;++e)for(let t=0;t<A;++t)for(let n=0;n<R;++n){const s=n-z,a=Math.max(0,Math.ceil(s/M)),r=Math.min(_,(T+s)/M);for(let i=0;i<F;++i){const o=i-P,l=Math.max(0,Math.ceil(o/L)),u=Math.min(O,(E+o)/L);let c=0;for(let n=a;n<r;++n){const a=n*M-s;for(let s=l;s<u;++s){const r=w*e+k*n+v*s,i=I*(T-1-a)+$*(E-1-(s*L-o))+C*t;for(let e=0;e<B;++e)c+=x[r+(t*B+e)]*N[i+e]}}m[g*e+y*n+b*i+t]=c}}return n.makeTensorInfo(f.shape,f.dtype,f.values)}},zk={kernelName:nt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s}=t,a=D(s.shape),r=n.data.get(s.dataId).values,i=Ha([a,a],s.dtype),o=i.values;for(let e=0;e<r.length;e++)o[e*a+e]=r[e];const l=[...s.shape,...s.shape];return n.makeTensorInfo(l,i.dtype,i.values)}},Pk={kernelName:st,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:a}=e,{strides:r,pad:i,dilations:o}=n,l=t,u=l.data.get(s.dataId).values,c=s.shape.length,h=l.data.get(a.dataId).values,p=a.shape.length,{batchSize:d,inHeight:f,inWidth:m,inChannels:g,outHeight:y,outWidth:b,padInfo:x,strideHeight:w,strideWidth:k,filterHeight:v,filterWidth:N,dilationHeight:I,dilationWidth:$,outShape:C}=Pr(s.shape,a.shape,r,i,"NHWC",o),S=D(C),T=C.length,E=U(s.dtype,S);for(let e=0;e<d;++e)for(let t=0;t<y;++t){const n=t*w-x.top;for(let r=0;r<b;++r){const i=r*k-x.left;for(let o=0;o<g;++o){let l=Number.MIN_SAFE_INTEGER;for(let t=0;t<v;++t){const r=n+t*I;if(r>=0&&r<f)for(let n=0;n<N;++n){const d=i+n*$;if(d>=0&&d<m){const i=re([e,r,d,o],c,Z(s.shape)),f=re([t,n,o],p,Z(a.shape)),m=u[i]+h[f];m>l&&(l=m)}}}E[re([e,t,r,o],T,Z(C))]=l}}}return{dataId:l.write(ys(E,s.dtype),C,s.dtype),shape:C,dtype:s.dtype}}},Bk={kernelName:rt,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:a,dy:r}=e,{strides:i,pad:o,dilations:l}=n,u=t,c=ee(s.shape,u.data.get(s.dataId).values),h=ee(a.shape,u.data.get(a.dataId).values),{batchSize:p,inHeight:d,inWidth:f,inChannels:m,outHeight:g,outWidth:y,padInfo:b,strideHeight:x,strideWidth:w,filterHeight:k,filterWidth:v,dilationHeight:N,dilationWidth:I,outShape:$}=Pr(s.shape,a.shape,i,o,"NHWC",l);E(r.rank===$.length,(()=>`Error in Dilation2DBackpropFilter, dy must have the same rank as output ${$.length}, but got ${r.rank}`));const C=ee($,u.data.get(r.dataId).values),S=se(a.shape,a.dtype);for(let e=0;e<p;++e)for(let t=0;t<g;++t){const n=t*x-b.top;for(let s=0;s<y;++s){const a=s*w-b.left;for(let r=0;r<m;++r){let i=Number.MIN_SAFE_INTEGER,o=0,l=0;for(let t=0;t<k;++t){const s=n+t*N;if(s>=0&&s<d)for(let n=0;n<v;++n){const u=a+n*I;if(u>=0&&u<f){const a=c[e][s][u][r]+h[t][n][r];a>i&&(i=a,o=t,l=n)}}}S[o][l][r]+=C[e][t][s][r]}}}return{dataId:u.write(ys(S,s.dtype),a.shape,a.dtype),shape:a.shape,dtype:a.dtype}}},Wk={kernelName:at,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:a,dy:r}=e,{strides:i,pad:o,dilations:l}=n,u=t,c=ee(s.shape,u.data.get(s.dataId).values),h=ee(a.shape,u.data.get(a.dataId).values),{batchSize:p,inHeight:d,inWidth:f,inChannels:m,outHeight:g,outWidth:y,padInfo:b,strideHeight:x,strideWidth:w,filterHeight:k,filterWidth:v,dilationHeight:N,dilationWidth:I,outShape:$}=Pr(s.shape,a.shape,i,o,"NHWC",l);E(r.rank===$.length,(()=>`Error in Dilation2DBackpropInput, dy must have the same rank as output ${$.length}, but got ${r.rank}`));const C=ee($,u.data.get(r.dataId).values),S=se(s.shape,s.dtype);for(let e=0;e<p;++e)for(let t=0;t<g;++t){const n=t*x-b.top;for(let s=0;s<y;++s){const a=s*w-b.left;for(let r=0;r<m;++r){let i=Number.MIN_SAFE_INTEGER,o=n<0?0:n,l=a<0?0:a;for(let t=0;t<k;++t){const s=n+t*N;if(s>=0&&s<d)for(let n=0;n<v;++n){const u=a+n*I;if(u>=0&&u<f){const a=c[e][s][u][r]+h[t][n][r];a>i&&(i=a,o=s,l=u)}}}S[e][o][l][r]+=C[e][t][s][r]}}}return{dataId:u.write(ys(S,s.dtype),s.shape,s.dtype),shape:s.shape,dtype:s.dtype}}},Vk={kernelName:lt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{dy:s,y:a}=t;Lx([s,a],"eluGrad");const r=new Float32Array(D(a.shape)),i=n.data.get(a.dataId).values,o=n.data.get(s.dataId).values;for(let e=0;e<i.length;++e){const t=i[e];r[e]=t>=1?o[e]:o[e]*(t+1)}return n.makeTensorInfo(a.shape,"float32",r)}},Uk=qx(((e,t)=>e===t?1:0)),Gk=cw(ct,Uk,null,"bool"),Hk={kernelName:ct,backendName:"cpu",kernelFunc:Gk},jk=xu,Kk=wu,qk=ku,Xk=vu,Yk=Nu,Jk=Iu,Zk=Bx(ut,(e=>{const t=Math.sign(e),n=Math.abs(e),s=1/(1+jk*n);return t*(1-((((Jk*s+Yk)*s+Xk)*s+qk)*s+Kk)*s*Math.exp(-n*n))})),Qk={kernelName:ut,backendName:"cpu",kernelFunc:Zk},ev=ok((e=>Math.exp(e))),tv=Wx(ht,ev),nv={kernelName:ht,backendName:"cpu",kernelFunc:tv};function sv(e){const{inputs:t,backend:n,attrs:s}=e,{input:a}=t,{dim:r}=s,i=a.shape.length,o=a.shape.slice();let l=r;return r<0&&(E(-(i+1)<=r,(()=>`Axis must be in the interval [${-(i+1)}, ${i}]`)),l=i+r+1),o.splice(l,0,1),gw({inputs:{x:a},backend:n,attrs:{shape:o}})}const av={kernelName:pt,backendName:"cpu",kernelFunc:sv},rv=ok((e=>Math.expm1(e))),iv=Wx(dt,rv),ov={kernelName:dt,backendName:"cpu",kernelFunc:iv},lv=qx(((e,t)=>e*t)),uv=hw(((e,t,n,s)=>({real:e*n-t*s,imag:e*s+t*n}))),cv=cw(en,lv,uv),hv={kernelName:en,backendName:"cpu",kernelFunc:cv},pv=qx(((e,t)=>e/t)),dv=cw(it,pv),fv={kernelName:it,backendName:"cpu",kernelFunc:dv},mv=qx(((e,t)=>e-t)),gv=hw(((e,t,n,s)=>({real:e-n,imag:t-s}))),yv=cw(Un,mv,gv),bv={kernelName:Un,backendName:"cpu",kernelFunc:yv};function xv(e,t,n){const s=e.shape,a=s[0],r=s[1],i=n.data.get(e.dataId),o=i.complexTensorInfos.real,l=i.complexTensorInfos.imag,u=[a,r],c=D(u),h=V("float32",c),p=V("float32",c);for(let e=0;e<a;e++){const s=tk({inputs:{x:o},backend:n,attrs:{begin:[e,0],size:[1,r]}}),a=tk({inputs:{x:l},backend:n,attrs:{begin:[e,0],size:[1,r]}}),i=sw({inputs:{real:s,imag:a},backend:n}),{real:u,imag:c}=wv(i,t,n),d=Su(u,c);for(let t=0;t<r;t++){const n=Ru(d,t);h[e*r+t]=n.real,p[e*r+t]=n.imag}n.disposeIntermediateTensorInfo(s),n.disposeIntermediateTensorInfo(a),n.disposeIntermediateTensorInfo(i)}const d=n.makeTensorInfo(u,"float32",h),f=n.makeTensorInfo(u,"float32",p),m=sw({inputs:{real:d,imag:f},backend:n});return n.disposeIntermediateTensorInfo(d),n.disposeIntermediateTensorInfo(f),m}function wv(e,t,n){const s=D(e.shape),a=n.data.get(e.dataId),r=n.data.get(a.complexTensorInfos.real.dataId).values,i=n.data.get(a.complexTensorInfos.imag.dataId).values;if(0==((o=s)&o-1)){const a=kv(r,i,s,t,n),o=[e.shape[0],e.shape[1]];if(t){const e=n.makeTensorInfo(o,"float32",a.real),t=n.makeTensorInfo(o,"float32",a.imag),r=n.makeTensorInfo([],"float32",gs(s,"float32")),i=Gx({inputs:{x:r},backend:n}),l=fv.kernelFunc({inputs:{a:e,b:r},backend:n}),u=fv.kernelFunc({inputs:{a:t,b:i},backend:n}),c=n.data.get(l.dataId).values,h=n.data.get(u.dataId).values;return n.disposeIntermediateTensorInfo(e),n.disposeIntermediateTensorInfo(t),n.disposeIntermediateTensorInfo(r),n.disposeIntermediateTensorInfo(i),n.disposeIntermediateTensorInfo(l),n.disposeIntermediateTensorInfo(u),{real:c,imag:h}}return a}return Tu(function(e,t,n){const s=new Float32Array(2*t);for(let a=0;a<t;a++){let r=0,i=0;for(let s=0;s<t;s++){const o=_u(a*s,t,n),l=Ru(e,s);r+=l.real*o.real-l.imag*o.imag,i+=l.real*o.imag+l.imag*o.real}n&&(r/=t,i/=t),Fu(s,r,i,a)}return s}(Su(r,i),s,t));var o}function kv(e,t,n,s,a){if(1===n)return{real:e,imag:t};const r=Su(e,t),i=n/2,o=Eu(r),l=o.real,u=o.imag,c=[l.length],h=a.makeTensorInfo(c,"float32",l),p=a.makeTensorInfo(c,"float32",u),d=sw({inputs:{real:h,imag:p},backend:a}),f=Au(r),m=f.real,g=f.imag,y=[m.length],b=a.makeTensorInfo(y,"float32",m),x=a.makeTensorInfo(y,"float32",g),w=sw({inputs:{real:b,imag:x},backend:a}),k=kv(l,u,i,s,a),v=k.real,N=k.imag,I=[v.length],$=a.makeTensorInfo(I,"float32",v),C=a.makeTensorInfo(I,"float32",N),S=sw({inputs:{real:$,imag:C},backend:a}),T=kv(m,g,i,s,a),E=T.real,A=T.imag,R=[E.length],F=a.makeTensorInfo(R,"float32",E),D=a.makeTensorInfo(R,"float32",A),_=sw({inputs:{real:F,imag:D},backend:a}),O=Du(n,s),M=[O.real.length],L=a.makeTensorInfo(M,"float32",O.real),z=a.makeTensorInfo(M,"float32",O.imag),P=sw({inputs:{real:L,imag:z},backend:a}),B=cv({inputs:{a:P,b:_},backend:a}),W=fw({inputs:{a:S,b:B},backend:a}),V=yv({inputs:{a:S,b:B},backend:a}),U=iw({inputs:{input:W},backend:a}),G=iw({inputs:{input:V},backend:a}),H=mk({inputs:{input:W},backend:a}),j=mk({inputs:{input:V},backend:a}),K=yk({inputs:[U,G],backend:a,attrs:{axis:0}}),q=yk({inputs:[H,j],backend:a,attrs:{axis:0}}),X=a.data.get(K.dataId).values,Y=a.data.get(q.dataId).values;return a.disposeIntermediateTensorInfo(h),a.disposeIntermediateTensorInfo(p),a.disposeIntermediateTensorInfo(d),a.disposeIntermediateTensorInfo(b),a.disposeIntermediateTensorInfo(x),a.disposeIntermediateTensorInfo(w),a.disposeIntermediateTensorInfo($),a.disposeIntermediateTensorInfo(C),a.disposeIntermediateTensorInfo(S),a.disposeIntermediateTensorInfo(F),a.disposeIntermediateTensorInfo(D),a.disposeIntermediateTensorInfo(_),a.disposeIntermediateTensorInfo(L),a.disposeIntermediateTensorInfo(z),a.disposeIntermediateTensorInfo(P),a.disposeIntermediateTensorInfo(B),a.disposeIntermediateTensorInfo(W),a.disposeIntermediateTensorInfo(V),a.disposeIntermediateTensorInfo(U),a.disposeIntermediateTensorInfo(H),a.disposeIntermediateTensorInfo(G),a.disposeIntermediateTensorInfo(j),a.disposeIntermediateTensorInfo(K),a.disposeIntermediateTensorInfo(q),{real:X,imag:Y}}const vv={kernelName:ft,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{input:s}=t,a=D(s.shape),r=s.shape[s.shape.length-1],i=gw({inputs:{x:s},backend:n,attrs:{shape:[a/r,r]}}),o=xv(i,!1,n),l=gw({inputs:{x:o},backend:n,attrs:{shape:s.shape}});return n.disposeIntermediateTensorInfo(i),n.disposeIntermediateTensorInfo(o),l}};function Nv(e){const{backend:t,attrs:n}=e,{shape:s,value:a,dtype:r}=n,i=r||X(a),o=U(i,D(s));return function(e,t,n){e.fill(t)}(o,a),t.makeTensorInfo(s,i,o)}const Iv={kernelName:mt,backendName:"cpu",kernelFunc:Nv},$v={kernelName:gt,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,a=n,r=V(s.dtype,D(s.shape)),[i,o,l,u]=s.shape,c=a.data.get(s.dataId).values;for(let e=0;e<i;e++){const t=e*l*o*u;for(let e=0;e<o;e++){const n=e*(l*u);for(let s=0;s<l;s++){const a=s*u;for(let o=0;o<u;o++){const h=[i,e,s,o][2],p=Math.round(l-h),d=t+n+a+o;let f=c[d];p>=0&&p<l&&(f=c[t+n+p*u+o]),r[d]=f}}}}return{dataId:a.write(r,s.shape,s.dtype),shape:s.shape,dtype:s.dtype}}},Cv=ok((e=>Math.floor(e))),Sv=Wx(yt,Cv),Tv={kernelName:yt,backendName:"cpu",kernelFunc:Sv},Ev=qx(((e,t)=>Math.floor(e/t))),Av=cw(bt,Ev,null,"int32"),Rv={kernelName:bt,backendName:"cpu",kernelFunc:Av},Fv={kernelName:is,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r,bias:i,preluActivationWeights:o}=t,{strides:l,pad:u,dataFormat:c,dilations:h,dimRoundingMode:p,activation:d,leakyreluAlpha:f}=s;let m=xk({inputs:{x:a,filter:r},backend:n,attrs:{strides:l,pad:u,dataFormat:c,dilations:h,dimRoundingMode:p}});if(i){const e=m;m=fw({inputs:{a:m,b:i},backend:n}),n.disposeIntermediateTensorInfo(e)}if(d){const e=m;m=nw(n,m,d,o,f),n.disposeIntermediateTensorInfo(e)}return m}},Dv={kernelName:os,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r,bias:i,preluActivationWeights:o}=t,{strides:l,pad:u,dataFormat:c,dilations:h,dimRoundingMode:p,activation:d,leakyreluAlpha:f}=s;let m=_k({inputs:{x:a,filter:r},backend:n,attrs:{strides:l,pad:u,dataFormat:c,dilations:h,dimRoundingMode:p}});if(i){const e=m;m=fw({inputs:{a:m,b:i},backend:n}),n.disposeIntermediateTensorInfo(e)}if(d){const e=m;m=nw(n,m,d,o,f),n.disposeIntermediateTensorInfo(e)}return m}},_v={kernelName:kt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{params:s,indices:a}=t,r=D(s.shape),i=a.shape,o=i[i.length-1],[l,u,c,h]=du(s,a);if(0===u)return n.makeTensorInfo(l,s.dtype,[]);const p=Ha([u,c],s.dtype),d=n.data.get(a.dataId).values,f=n.data.get(s.dataId).values;for(let e=0;e<u;e++){const t=[];let n=0;for(let s=0;s<o;s++){const a=d[e*o+s];n+=a*h[s],t.push(a)}if(n<0||n>=r/c)throw new Error(`Invalid indices: ${t} does not index into ${s.shape}`);for(let t=0;t<c;t++)p.values[e*c+t]=f[n*c+t]}return n.makeTensorInfo(l,p.dtype,p.values)}};function Ov(e,t,n){const s=Ha(n,e.dtype);for(let n=0;n<s.size;++n){const a=s.indexToLoc(n).slice(),r=a[0],i=a[2],o=t.locToIndex([r,i]);a[2]=t.values[o];const l=e.locToIndex(a);s.values[n]=e.values[l]}return s}const Mv={kernelName:wt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,indices:r}=t,{axis:i,batchDims:o}=s;Lx([a,r],"gatherV2");let l=o;null==o&&(l=0);const u=D(r.shape),c=zu(a,r,B(i,a.shape)[0],l),h=gw({inputs:{x:a},backend:n,attrs:{shape:[c.batchSize,c.outerSize,c.dimSize,c.sliceSize]}}),p=gw({inputs:{x:r},backend:n,attrs:{shape:[c.batchSize,u/c.batchSize]}}),d=[c.batchSize,c.outerSize,u/c.batchSize,c.sliceSize],f=n.bufferSync(p),m=Ov(n.bufferSync(h),f,d);return n.disposeIntermediateTensorInfo(h),n.disposeIntermediateTensorInfo(p),n.makeTensorInfo(c.outputShape,m.dtype,m.values)}},Lv=qx(((e,t)=>e>t?1:0)),zv=cw(vt,Lv,null,"bool"),Pv={kernelName:vt,backendName:"cpu",kernelFunc:zv},Bv=qx(((e,t)=>e>=t?1:0)),Wv=cw(Nt,Bv,null,"bool"),Vv={kernelName:Nt,backendName:"cpu",kernelFunc:Wv},Uv={kernelName:$t,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{input:s}=t,a=D(s.shape),r=s.shape[s.shape.length-1],i=gw({inputs:{x:s},backend:n,attrs:{shape:[a/r,r]}}),o=xv(i,!0,n),l=gw({inputs:{x:o},backend:n,attrs:{shape:s.shape}});return n.disposeIntermediateTensorInfo(i),n.disposeIntermediateTensorInfo(o),l}},Gv=Bx(St,(e=>Number.isFinite(e)?1:0),"bool"),Hv={kernelName:St,backendName:"cpu",kernelFunc:Gv},jv=Bx(Tt,(e=>Math.abs(e)===1/0?1:0),"bool"),Kv={kernelName:Tt,backendName:"cpu",kernelFunc:jv},qv=Bx(Et,(e=>Number.isNaN(e)?1:0),"bool"),Xv={kernelName:Et,backendName:"cpu",kernelFunc:qv},Yv=qx(((e,t)=>e<t?1:0)),Jv=cw(Rt,Yv,null,"bool"),Zv={kernelName:Rt,backendName:"cpu",kernelFunc:Jv},Qv=qx(((e,t)=>e<=t?1:0)),eN=cw(Ft,Qv,null,"bool"),tN={kernelName:Ft,backendName:"cpu",kernelFunc:eN};function nN(e,t,n){const s=(t-e)/(n-1),a=ne(n,"float32");a[0]=e;for(let e=1;e<a.length;e++)a[e]=a[e-1]+s;return a}const sN={kernelName:Dt,backendName:"cpu",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:s,stop:a,num:r}=n,i=nN(s,a,r);return t.makeTensorInfo([i.length],"float32",i)}},aN=ok((e=>Math.log(e))),rN=Wx(_t,aN),iN={kernelName:_t,backendName:"cpu",kernelFunc:rN},oN=Bx(Ot,(e=>Math.log1p(e))),lN={kernelName:Ot,backendName:"cpu",kernelFunc:oN},uN=qx(((e,t)=>e&&t)),cN=cw(Mt,uN,null,"bool"),hN={kernelName:Mt,backendName:"cpu",kernelFunc:cN},pN=Bx(Lt,(e=>e?0:1),"bool"),dN={kernelName:Lt,backendName:"cpu",kernelFunc:pN},fN=qx(((e,t)=>e||t)),mN=cw(zt,fN,null,"bool"),gN={kernelName:zt,backendName:"cpu",kernelFunc:mN},yN={kernelName:Pt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{depthRadius:r,bias:i,alpha:o,beta:l}=s;Lx(a,"LRN");const u=a.shape[3],c=u-1,h=n.data.get(a.dataId).values,p=D(a.shape),d=new Float32Array(p);function f(e){const t=e%u;let n=e-t+Math.max(0,t-r);const s=e-t+Math.min(t+r,c);let a=0;for(;n<=s;n++){const e=h[n];a+=e*e}return a}for(let e=0;e<p;e++){const t=f(e),n=h[e]*Math.pow(i+o*t,-l);d[e]=n}return n.makeTensorInfo(a.shape,a.dtype,d)}},bN={kernelName:Bt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,y:r,dy:i}=t,{depthRadius:o,bias:l,alpha:u,beta:c}=s;Lx(i,"LRNGrad");const h=D(i.shape),p=i.shape[3],d=n.data.get(i.dataId).values,f=n.data.get(a.dataId).values,m=n.data.get(r.dataId).values,g=new Float32Array(h),y=h;for(let e=0;e<y;e++){const t=e%p,n=e-t+Math.max(0,t-o),s=e-t+Math.min(p,t+o+1);let a=0;for(let e=n;e<s;e++)a+=Math.pow(f[e],2);a=u*a+l;for(let t=n;t<s;t++){let n=-2*u*c*f[t]*m[e]/a;e===t&&(n+=Math.pow(a,-c)),n*=d[e],g[t]+=n}}return n.makeTensorInfo(i.shape,a.dtype,g)}};function xN(e,t,n,s){const a=V(s,D(n));for(let n=0;n<a.length;++n){const s=n*t;let r=e[s];for(let n=0;n<t;++n){const t=e[s+n];t>r&&(r=t)}a[n]=r}return a}function wN(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{reductionIndices:r,keepDims:i}=s,o=n;let l=a.shape;const u=l.length,c=B(r,l);let h=c;const p=Zi(h,u);let d=o.data.get(a.dataId).values;if(null!=p){const e=new Array(u);for(let t=0;t<e.length;t++)e[t]=l[p[t]];d=Tw(d,l,a.dtype,p,e),h=eo(h.length,u),l=e}Lx(a,"max"),Ji("max",h,u);const[f,m]=Xi(l,h),g=xN(d,D(m),f,a.dtype),y=o.write(g,f,a.dtype);let b=f;return i&&(b=Yi(f,c)),{dataId:y,shape:b,dtype:a.dtype}}const kN={kernelName:Wt,backendName:"cpu",kernelFunc:wN},vN=qx(((e,t)=>Math.max(e,t))),NN=cw(Vt,vN),IN={kernelName:Vt,backendName:"cpu",kernelFunc:NN},$N={kernelName:Ut,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t;Lx(a,"maxPool");const{filterSize:r,strides:i,pad:o,dimRoundingMode:l}=s;E(Yr(i,1),(()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${i} and dilations '1'`));const u=Br(a.shape,r,i,1,o,l);let c;if(1===u.filterWidth&&1===u.filterHeight&&_(u.inShape,u.outShape))c=Gx({inputs:{x:a},backend:n});else{const e=n.data.get(a.dataId).values,t=Z(a.shape),s=jw(e,a.shape,a.dtype,t,u,"max");c=n.makeTensorInfo(u.outShape,a.dtype,s.values)}return c}},CN={kernelName:Ht,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{filterSize:r,strides:i,pad:o,dimRoundingMode:l,dataFormat:u}=s;Lx(a,"maxPool3d");const c=Wr(a.shape,r,i,1,o,l,u),h=qw(n.data.get(a.dataId).values,a.shape,a.dtype,Z(a.shape),c,"max");return n.makeTensorInfo(h.shape,"float32",h.values)}},SN={kernelName:jt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r}=t,{filterSize:i,strides:o,pad:l,dimRoundingMode:u}=s;Lx([a,r],"maxPool3DGrad");const c=Wr(r.shape,i,o,1,l,u),h=function(e,t){const n=Ha(t.outShape,"int32"),s=t.strideDepth,a=t.strideHeight,r=t.strideWidth,i=t.dilationDepth,o=t.dilationHeight,l=t.dilationWidth,u=t.effectiveFilterDepth,c=t.effectiveFilterHeight,h=t.effectiveFilterWidth,p=t.padInfo.front,d=t.padInfo.top,f=t.padInfo.left;for(let m=0;m<t.batchSize;++m)for(let g=0;g<t.inChannels;++g)for(let y=0;y<t.outDepth;++y){const b=y*s-p;let x=b;for(;x<0;)x+=i;const w=Math.min(t.inDepth,u+b);for(let s=0;s<t.outHeight;++s){const u=s*a-d;let p=u;for(;p<0;)p+=o;const k=Math.min(t.inHeight,c+u);for(let a=0;a<t.outWidth;++a){const d=a*r-f;let v=d;for(;v<0;)v+=l;const N=Math.min(t.inWidth,h+d);let I=Number.NEGATIVE_INFINITY,$=-1;for(let t=x;t<w;t+=i){const n=t-b;for(let s=p;s<k;s+=o){const a=s-u;for(let r=v;r<N;r+=l){const i=r-d,o=e.get(m,t,s,r,g);o>=I&&(I=o,$=n*c*h+a*c+i)}}}n.set($,m,y,s,a,g)}}}return n}(n.bufferSync(r),c),p=c.strideDepth,d=c.strideHeight,f=c.strideWidth,m=c.dilationDepth,g=c.dilationHeight,y=c.dilationWidth,b=c.effectiveFilterDepth,x=c.effectiveFilterHeight,w=c.effectiveFilterWidth,k=b-1-c.padInfo.front,v=w-1-c.padInfo.left,N=x-1-c.padInfo.top,I=Ha(r.shape,"float32"),$=n.bufferSync(a);for(let e=0;e<c.batchSize;++e)for(let t=0;t<c.inChannels;++t)for(let n=0;n<c.inDepth;++n)for(let s=0;s<c.inHeight;++s)for(let a=0;a<c.inWidth;++a){const r=n-k,i=s-N,o=a-v;let l=0;for(let n=0;n<b;n+=m){const s=(r+n)/p;if(!(s<0||s>=c.outDepth||Math.floor(s)!==s))for(let a=0;a<x;a+=g){const r=(i+a)/d;if(!(r<0||r>=c.outHeight||Math.floor(r)!==r))for(let i=0;i<w;i+=y){const u=(o+i)/f;if(u<0||u>=c.outWidth||Math.floor(u)!==u)continue;const p=b*x*w-1-h.get(e,s,r,u,t)===n*x*w+a*w+i?1:0;0!==p&&(l+=$.get(e,s,r,u,t)*p)}}}I.set(l,e,n,s,a,t)}return n.makeTensorInfo(I.shape,I.dtype,I.values)}},TN={kernelName:Gt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r,output:i}=t,o=r;Lx([r,i],"maxPoolGrad");const{filterSize:l,strides:u,pad:c,dimRoundingMode:h}=s,p=Br(o.shape,l,u,1,c,h),d=n.data.get(o.dataId).values,f=Ha(p.outShape,o.dtype,Kw(d,o.shape,o.dtype,p).values),m=p.strideHeight,g=p.strideWidth,y=p.dilationHeight,b=p.dilationWidth,x=p.effectiveFilterHeight,w=p.effectiveFilterWidth,k=w-1-p.padInfo.left,v=x-1-p.padInfo.top,N=Ha(o.shape,"float32"),I=n.data.get(a.dataId).values,$=Ha(a.shape,"float32",I);for(let e=0;e<p.batchSize;++e)for(let t=0;t<p.inChannels;++t)for(let n=0;n<p.inHeight;++n)for(let s=0;s<p.inWidth;++s){const a=n-v,r=s-k;let i=0;for(let n=0;n<x;n+=y){const s=(a+n)/m;if(!(s<0||s>=p.outHeight||Math.floor(s)!==s))for(let a=0;a<w;a+=b){const o=(r+a)/g;if(o<0||o>=p.outWidth||Math.floor(o)!==o)continue;const l=x*w-1-f.get(e,s,o,t)===n*w+a?1:0;0!==l&&(i+=$.get(e,s,o,t)*l)}}N.set(i,e,n,s,t)}return n.makeTensorInfo(N.shape,N.dtype,N.values)}},EN={kernelName:Kt,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:a,strides:r,pad:i,includeBatchInIndex:o}=t,l=n;Lx(s,"MaxPoolWithArgmax");const u=l.data.get(s.dataId).values,c=Br(s.shape,a,r,[1,1],i),[h,p]=function(e,t,n,s,a){const r=jw(e,0,n,Z(t),a,"max"),i=Kw(e,t,n,a,!0,s);return[r.values,i.values]}(u,s.shape,s.dtype,o,c),d=l.write(h,c.outShape,s.dtype),f=l.write(p,c.outShape,s.dtype);return[{dataId:d,shape:c.outShape,dtype:s.dtype},{dataId:f,shape:c.outShape,dtype:"int32"}]}};function AN(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s;let o;Lx(a,"sum"),o="bool"===a.dtype?lw({inputs:{x:a},backend:n,attrs:{dtype:"int32"}}):Gx({inputs:{x:a},backend:n});const l=o.shape.length,u=B(r,o.shape),c=Zi(u,l);let h=u,p=o;null!=c&&(p=Ew({inputs:{x:o},backend:n,attrs:{perm:c}}),h=eo(h.length,l)),Ji("sum",h,p.shape.length);const[d,f]=Xi(p.shape,h);let m=rw(n,d,Vs(p.dtype,"int32"));const g=D(f),y=n.data.get(m.dataId).values,b=n.data.get(p.dataId).values;for(let e=0;e<y.length;++e){const t=e*g;let n=0;for(let e=0;e<g;++e)n+=b[t+e];y[e]=n}if(i){const e=m;m=gw({inputs:{x:m},backend:n,attrs:{shape:Yi(m.shape,u)}}),n.disposeIntermediateTensorInfo(e)}return n.disposeIntermediateTensorInfo(o),null!=c&&n.disposeIntermediateTensorInfo(p),m}const RN={kernelName:Ln,backendName:"cpu",kernelFunc:AN},FN={kernelName:qt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s,o=B(r,a.shape),l=D(Xi(a.shape,o)[1]),u=[],c=n.makeTensorInfo([],"float32",new Float32Array([l]));u.push(c);const h=lw({inputs:{x:a},backend:n,attrs:{dtype:"float32"}});u.push(h);const p=dv({inputs:{a:h,b:c},backend:n});u.push(p);const d=AN({inputs:{x:p},backend:n,attrs:{axis:r,keepDims:i}});return u.forEach((e=>n.disposeIntermediateTensorInfo(e))),d}},DN={kernelName:Xt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s;Lx(a,"min");const o=B(r,a.shape);let l=o;const u=Zi(l,a.shape.length);let c=a;null!=u&&(c=Ew({inputs:{x:a},backend:n,attrs:{perm:u}}),l=eo(l.length,a.shape.length)),Ji("min",l,c.shape.length);const[h,p]=Xi(c.shape,l),d=D(p),f=ne(D(h),c.dtype),m=n.data.get(c.dataId).values;for(let e=0;e<f.length;++e){const t=e*d;let n=m[t];for(let e=0;e<d;++e){const s=m[t+e];s<n&&(n=s)}f[e]=n}null!=u&&n.disposeIntermediateTensorInfo(c);const g=n.makeTensorInfo(h,c.dtype,f);if(i){const e=gw({inputs:{x:g},backend:n,attrs:{shape:Yi(h,o)}});return n.disposeIntermediateTensorInfo(g),e}return g}},_N=qx(((e,t)=>Math.min(e,t))),ON=cw(Yt,_N),MN={kernelName:Yt,backendName:"cpu",kernelFunc:ON},LN={kernelName:Jt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{paddings:r,mode:i}=s;Lx(a,"mirrorPad");const o=r.map(((e,t)=>e[0]+a.shape[t]+e[1])),l=r.map((e=>e[0])),u=r.map(((e,t)=>e[0]+a.shape[t])),c="reflect"===i?0:1,h=n.data.get(a.dataId).values,p=a.shape.length,d=Z(a.shape),f=D(o),m=o.length,g=Z(o),y=V(a.dtype,f);for(let e=0;e<f;e++){let t=ie(e,m,g);for(let e=0;e<m;e++)t[e]<l[e]?t[e]=2*l[e]-t[e]-c:t[e]>=u[e]&&(t[e]=2*(u[e]-1)-t[e]+c);t=t.map(((e,t)=>e-l[t]));const n=re(t,p,d);y[e]=h[n]}return{dataId:n.write(y,o,a.dtype),shape:o,dtype:a.dtype}}},zN=qx(((e,t)=>{const n=e%t;return e<0&&t<0||e>=0&&t>=0?n:(n+t)%t})),PN=cw(Zt,zN),BN={kernelName:Zt,backendName:"cpu",kernelFunc:PN};function WN(e){const{inputs:t,backend:n,attrs:s}=e,{logits:a}=t,{dim:r}=s,i=a.shape.length;let o=r;if(-1===o&&(o=i-1),o!==i-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${i} and dim was ${o}`);const l=B([o],a.shape),u=wN({inputs:{x:a},backend:n,attrs:{reductionIndices:l,keepDims:!1}}),c=Yi(u.shape,l),h=gw({inputs:{x:u},backend:n,attrs:{shape:c}}),p=yv({inputs:{a,b:h},backend:n}),d=tv({inputs:{x:p},backend:n}),f=AN({inputs:{x:d},backend:n,attrs:{axis:l,keepDims:!1}}),m=gw({inputs:{x:f},backend:n,attrs:{shape:c}}),g=dv({inputs:{a:d,b:m},backend:n});return n.disposeIntermediateTensorInfo(u),n.disposeIntermediateTensorInfo(h),n.disposeIntermediateTensorInfo(p),n.disposeIntermediateTensorInfo(d),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(m),g}const VN={kernelName:Bn,backendName:"cpu",kernelFunc:WN},UN={kernelName:Qt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{logits:a}=t,{numSamples:r,seed:i,normalized:o}=s;Lx(a,"multinomial");const l=o?a:WN({inputs:{logits:a},backend:n,attrs:{dim:-1}}),u=l.shape[0],c=l.shape[1],h=n.data.get(l.dataId).values,p=[u,r],d=ne(D(p),"int32");for(let e=0;e<u;++e){const t=e*c,n=new Float32Array(c-1);n[0]=h[t];for(let e=1;e<n.length;++e)n[e]=n[e-1]+h[t+e];const s=ho.alea(i.toString()),a=e*r;for(let e=0;e<r;++e){const t=s();d[a+e]=n.length;for(let s=0;s<n.length;s++)if(t<n[s]){d[a+e]=s;break}}}return o||n.disposeIntermediateTensorInfo(l),n.makeTensorInfo(p,"int32",d)}};function GN(e,t,n){const s=gs(-1,n);return lv([],t,s,e,n)}const HN={kernelName:tn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s}=t;Lx(s,"neg");const a=n.data.get(s.dataId).values,[r,i]=GN(a,s.shape,s.dtype);return n.makeTensorInfo(i,s.dtype,r)}},jN=bl,KN={kernelName:sn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{boxes:a,scores:r}=t,{maxOutputSize:i,iouThreshold:o,scoreThreshold:l}=s;Lx(a,"NonMaxSuppression");const u=n.data.get(a.dataId).values,c=n.data.get(r.dataId).values,{selectedIndices:h}=jN(u,c,i,o,l);return n.makeTensorInfo([h.length],"int32",new Int32Array(h))}},qN=xl,XN={kernelName:an,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{boxes:a,scores:r}=t,{maxOutputSize:i,iouThreshold:o,scoreThreshold:l,padToMaxOutputSize:u}=s;Lx(a,"NonMaxSuppressionPadded");const c=n.data.get(a.dataId).values,h=n.data.get(r.dataId).values,{selectedIndices:p,validOutputs:d}=qN(c,h,i,o,l,u);return[n.makeTensorInfo([p.length],"int32",new Int32Array(p)),n.makeTensorInfo([],"int32",new Int32Array([d]))]}},YN=wl,JN={kernelName:rn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{boxes:a,scores:r}=t,{maxOutputSize:i,iouThreshold:o,scoreThreshold:l,softNmsSigma:u}=s;Lx(a,"NonMaxSuppressionWithScore");const c=n.data.get(a.dataId).values,h=n.data.get(r.dataId).values,p=i,d=o,f=l,m=u,{selectedIndices:g,selectedScores:y}=YN(c,h,p,d,f,m);return[n.makeTensorInfo([g.length],"int32",new Int32Array(g)),n.makeTensorInfo([y.length],"float32",new Float32Array(y))]}},ZN=qx(((e,t)=>e!==t?1:0)),QN=cw(nn,ZN,null,"bool"),eI={kernelName:nn,backendName:"cpu",kernelFunc:QN},tI={kernelName:ln,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{indices:a}=t,{depth:r,onValue:i,offValue:o}=s;Lx(a,"oneHot");const l=D(a.shape),u=new Float32Array(l*r);u.fill(o);const c=n.data.get(a.dataId).values;for(let e=0;e<l;++e)c[e]>=0&&c[e]<r&&(u[e*r+c[e]]=i);return n.makeTensorInfo([...a.shape,r],"int32",u)}};function nI(e){const{inputs:t,backend:n}=e,{x:s}=t;if("string"===s.dtype)throw new Error("zerosLike is not supported for string tensors");if("complex64"===s.dtype){const e=iw({inputs:{input:s},backend:n}),t=nI({inputs:{x:e},backend:n}),a=mk({inputs:{input:s},backend:n}),r=nI({inputs:{x:a},backend:n}),i=sw({inputs:{real:t,imag:r},backend:n});return n.disposeIntermediateTensorInfo(e),n.disposeIntermediateTensorInfo(t),n.disposeIntermediateTensorInfo(a),n.disposeIntermediateTensorInfo(r),i}return Nv({backend:n,attrs:{shape:s.shape,value:0,dtype:s.dtype}})}const sI={kernelName:ts,backendName:"cpu",kernelFunc:nI},aI={kernelName:on,backendName:"cpu",kernelFunc:function e(t){const{inputs:n,backend:s}=t,{x:a}=n;if("string"===a.dtype)throw new Error("onesLike is not supported for string tensors");if("complex64"===a.dtype){const t=iw({inputs:{input:a},backend:s}),n=e({inputs:{x:t},backend:s}),r=mk({inputs:{input:a},backend:s}),i=nI({inputs:{x:r},backend:s}),o=sw({inputs:{real:n,imag:i},backend:s});return s.disposeIntermediateTensorInfo(t),s.disposeIntermediateTensorInfo(n),s.disposeIntermediateTensorInfo(r),s.disposeIntermediateTensorInfo(i),o}return Nv({backend:s,attrs:{shape:a.shape,value:1,dtype:a.dtype}})}};function rI(e){const{inputs:t,backend:n,attrs:s}=e,{axis:a}=s;if(1===t.length)return sv({inputs:{input:t[0]},backend:n,attrs:{dim:a}});const r=t[0].shape,i=t[0].dtype;t.forEach((e=>{A(r,e.shape,"All tensors passed to stack must have matching shapes"),E(i===e.dtype,(()=>"All tensors passed to stack must have matching dtypes"))}));const o=[],l=yk({inputs:t.map((e=>{const t=sv({inputs:{input:e},backend:n,attrs:{dim:a}});return o.push(t),t})),backend:n,attrs:{axis:a}});return o.forEach((e=>n.disposeIntermediateTensorInfo(e))),l}const iI={kernelName:un,backendName:"cpu",kernelFunc:rI},oI={kernelName:cn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{paddings:r,constantValue:i}=s;Lx(a,"pad");const o=r.map(((e,t)=>e[0]+a.shape[t]+e[1])),l=r.map((e=>e[0])),u=n.data.get(a.dataId).values,c=D(a.shape),h=a.shape.length,p=Z(a.shape),d=D(o),f=o.length,m=Z(o),g=V(a.dtype,d);0!==i&&g.fill(i);for(let e=0;e<c;e++)g[re(ie(e,h,p).map(((e,t)=>e+l[t])),f,m)]=u[e];return{dataId:n.write(g,o,a.dtype),shape:o,dtype:a.dtype}}},lI=qx(((e,t)=>Math.pow(e,t))),uI=cw(hn,lI),cI={kernelName:hn,backendName:"cpu",kernelFunc:uI};function hI(e,t,n,s){const[a,r]=Xi(e,s),i=Vs(t,"int32"),o=ne(D(a),i),l=D(r);for(let e=0;e<o.length;++e){const t=e*l;let s=1;for(let e=0;e<l;++e)s*=n[t+e];o[e]=s}return{outVals:o,outShape:a,outDtype:i}}const pI={kernelName:dn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s;Lx(a,"prod");const o=a.shape.length,l=B(r,a.shape),u=Zi(l,o);let c=l,h=a;const p=[];null!=u&&(h=Ew({inputs:{x:a},backend:n,attrs:{perm:u}}),p.push(h),c=eo(c.length,o));const d=n.data.get(h.dataId).values,{outVals:f,outShape:m,outDtype:g}=hI(h.shape,h.dtype,d,c);let y=m;return i&&(y=Yi(m,l)),p.forEach((e=>n.disposeIntermediateTensorInfo(e))),n.makeTensorInfo(y,g,f)}};function dI(e,t,n,s){if(e===t||e<t&&n<0||t<e&&n>1)return ne(0,s);const a=ne(Math.abs(Math.ceil((t-e)/n)),s);t<e&&1===n&&(n=-1),a[0]=e;for(let e=1;e<a.length;e++)a[e]=a[e-1]+n;return a}const fI={kernelName:fn,backendName:"cpu",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:s,stop:a,dtype:r,step:i}=n,o=dI(s,a,i,r);return t.makeTensorInfo([o.length],r,o)}},mI=Bx(gn,(e=>1/e)),gI={kernelName:gn,backendName:"cpu",kernelFunc:mI},yI={kernelName:kn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a}=t,{alignCorners:r,halfPixelCenters:i,size:o}=s;Lx(a,"resizeBilinear");const l=Z(a.shape),[u,c]=o,[h,p,d,f]=a.shape,m=n.data.get(a.dataId).values,g=new Float32Array(D([h,u,c,f])),y=[r&&u>1?p-1:p,r&&c>1?d-1:d],b=[r&&u>1?u-1:u,r&&c>1?c-1:c];let x=0;const w=y[0]/b[0],k=y[1]/b[1];for(let e=0;e<h;e++)for(let t=0;t<u;t++){let n;n=i?w*(t+.5)-.5:w*t;const s=Math.max(0,Math.floor(n)),a=n-s,r=Math.min(p-1,Math.ceil(n)),o=e*l[0]+s*l[1],u=e*l[0]+r*l[1];for(let e=0;e<c;e++){let t;t=i?k*(e+.5)-.5:k*e;const n=Math.max(0,Math.floor(t)),s=t-n,r=Math.min(d-1,Math.ceil(t)),c=o+n*l[2],h=u+n*l[2],p=o+r*l[2],y=u+r*l[2];for(let e=0;e<f;e++){const t=m[c+e],n=m[h+e],r=t+(m[p+e]-t)*s,i=r+(n+(m[y+e]-n)*s-r)*a;g[x++]=i}}}return n.makeTensorInfo([h,u,c,f],"float32",g)}},bI={kernelName:vn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a,dy:r}=t,{alignCorners:i}=s;Lx([r,a],"resizeBilinearGrad");const o=Z(a.shape),[l,u,c,h]=a.shape,[,p,d]=r.shape,f=new Float32Array(l*u*c*h),m=[i&&p>1?u-1:u,i&&d>1?c-1:c],g=[i&&p>1?p-1:p,i&&d>1?d-1:d],y=m[0]/g[0],b=m[1]/g[1],x=n.data.get(r.dataId).values;let w=0;for(let e=0;e<l;e++){const t=e*o[0];for(let e=0;e<p;e++){const n=e*y,s=Math.floor(n),a=Math.min(Math.ceil(n),u-1),r=t+s*o[1],i=t+a*o[1],l=n-s,p=1-l;for(let e=0;e<d;e++){const t=e*b,n=Math.floor(t),s=Math.min(Math.ceil(t),c-1),a=t-n,u=1-a,d=r+n*o[2],m=r+s*o[2],g=i+n*o[2],y=i+s*o[2],k=p*u,v=p*a,N=l*u,I=l*a;for(let e=0;e<h;e++){const t=x[w++];f[d+e]+=t*k,f[m+e]+=t*v,f[g+e]+=t*N,f[y+e]+=t*I}}}}return n.makeTensorInfo([l,c,u,h],"float32",f)}},xI={kernelName:xn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a}=t,{alignCorners:r,halfPixelCenters:i,size:o}=s;Lx(a,"resizeNearestNeighbor");const l=Z(a.shape),[u,c]=o,[h,p,d,f]=a.shape,m=n.data.get(a.dataId).values,g=new Float32Array(h*u*c*f),y=[r&&u>1?p-1:p,r&&c>1?d-1:d],b=[r&&u>1?u-1:u,r&&c>1?c-1:c],x=y[0]/b[0],w=y[1]/b[1];let k=0;for(let e=0;e<h;e++){const t=e*l[0];for(let e=0;e<u;e++){const n=i?x*(e+.5):x*e;let s=Math.min(p-1,r?Math.round(n):Math.floor(n));i&&(s=Math.max(0,s));const a=t+s*l[1];for(let e=0;e<c;e++){const t=i?w*(e+.5):w*e;let n=Math.min(d-1,r?Math.round(t):Math.floor(t));i&&(n=Math.max(0,n));const s=a+n*l[2];for(let e=0;e<f;e++){const t=m[s+e];g[k++]=t}}}}return n.makeTensorInfo([h,u,c,f],a.dtype,g)}},wI={kernelName:wn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a,dy:r}=t,{alignCorners:i}=s;Lx([r,a],"resizeNearestNeighborGrad");const o=Z(a.shape),l=Z(r.shape),[u,c,h,p]=a.shape,[,d,f]=r.shape,m=new Float32Array(u*c*h*p),g=n.data.get(r.dataId).values,y=[i&&d>1?c-1:c,i&&f>1?h-1:h],b=[i&&d>1?d-1:d,i&&f>1?f-1:f],x=y[0]/b[0],w=y[1]/b[1],k=1/x,v=1/w,N=2*Math.ceil(k)+2,I=2*Math.ceil(v)+2;for(let e=0;e<u;e++){const t=e*o[0];for(let e=0;e<c;e++){const n=t+e*o[1],s=Math.floor(e*k),a=Math.floor(s-N/2);for(let s=0;s<h;s++){const r=n+s*o[2],u=Math.floor(s*v),y=Math.floor(u-I/2);for(let n=0;n<p;n++){let o=0;for(let r=0;r<N;r++){const u=r+a;if(u<0||u>=d)continue;const p=t+u*l[1],m=u*x;if(e===Math.min(c-1,i?Math.round(m):Math.floor(m)))for(let e=0;e<I;e++){const t=e+y;if(t<0||t>=f)continue;const a=p+t*l[2],r=t*w;s===Math.min(h-1,i?Math.round(r):Math.floor(r))&&(o+=g[a+n])}}m[r+n]=o}}}}return n.makeTensorInfo(a.shape,a.dtype,m)}},kI={kernelName:In,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{dims:r}=s;Lx(a,"reverse");const i=a.shape.length,o=B(r,a.shape);if(0===i)return Gx({inputs:{x:a},backend:n});const l=new Es(a.shape,a.dtype),u=n.bufferSync(a);for(let e=0;e<l.size;e++){const t=l.indexToLoc(e),n=t.slice();o.forEach((e=>n[e]=a.shape[e]-1-n[e])),l.set(u.get(...n),...t)}return n.makeTensorInfo(l.shape,l.dtype,l.values)}},vI={kernelName:as,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:a,fillValue:r,center:i}=t,o=n,l=V(s.dtype,D(s.shape)),[u,c,h,p]=s.shape,[d,f]=ou(i,c,h),m=Math.sin(a),g=Math.cos(a),y=o.data.get(s.dataId).values;for(let e=0;e<u;e++){const t=e*h*c*p;for(let e=0;e<c;e++){const n=e*(h*p);for(let s=0;s<h;s++){const a=s*p;for(let i=0;i<p;i++){const o=[u,e,s,i],b=o[2],x=o[1];let w=(b-d)*g-(x-f)*m,k=(b-d)*m+(x-f)*g;w=Math.round(w+d),k=Math.round(k+f);let v=r;"number"!=typeof r&&(v=3===i?255:r[i]),w>=0&&w<h&&k>=0&&k<c&&(v=y[t+k*(h*p)+w*p+i]),l[t+n+a+i]=v}}}}return{dataId:o.write(l,s.shape,s.dtype),shape:s.shape,dtype:s.dtype}}},NI=Bx($n,(e=>{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2==0?t:t+1})),II={kernelName:$n,backendName:"cpu",kernelFunc:NI},$I=ok((e=>1/Math.sqrt(e))),CI=Wx(Cn,$I),SI={kernelName:Cn,backendName:"cpu",kernelFunc:CI};function TI(e,t,n,s,a,r,i,o,l,u){const c=[s/a,a],h=e.values,p=t.values;if(0===s)return Ha(n,t.dtype);const d=Ha(c,t.dtype);d.values.fill(l);for(let e=0;e<r;e++){const r=[];let l=0;for(let t=0;t<i;t++){const n=h[e*i+t];r.push(n),l+=n*o[t]}if(l<0||l>=s/a)throw new Error(`Invalid indices: ${r} does not index into ${n}`);for(let n=0;n<a;n++)u?d.values[l*a+n]+=p[e*a+n]:d.values[l*a+n]=0===t.rank?p[0]:p[e*a+n]}return d}const EI={kernelName:Sn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{indices:a,updates:r}=t,{shape:i}=s,{sliceRank:o,numUpdates:l,sliceSize:u,strides:c,outputSize:h}=gu(0,a,i),p=TI(n.bufferSync(a),n.bufferSync(r),i,h,u,l,o,c,0,!0);return n.makeTensorInfo(i,p.dtype,p.values)}},AI={kernelName:Tn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{condition:s,t:a,e:r}=t;Lx([s,a,r],"select");const i=s.shape.length,o=n.data.get(s.dataId).values,l=n.data.get(a.dataId).values,u=n.data.get(r.dataId).values,c=Vs(a.dtype,r.dtype),h=ne(D(a.shape),c);let p=0;const d=0===i||i>1||1===a.shape.length?1:D(a.shape.slice(1));for(let e=0;e<o.length;e++)for(let t=0;t<d;t++)1===o[e]?h[p++]=l[e]:h[p++]=u[e];return n.makeTensorInfo(a.shape,c,h)}},RI=yu,FI=bu,DI=Bx(En,(e=>e>=0?FI*e:RI*(Math.exp(e)-1))),_I={kernelName:En,backendName:"cpu",kernelFunc:DI},OI=Bx(_n,(e=>1/(1+Math.exp(-e)))),MI={kernelName:_n,backendName:"cpu",kernelFunc:OI},LI=Bx(Dn,(e=>e<0?-1:e>0?1:0)),zI={kernelName:Dn,backendName:"cpu",kernelFunc:LI},PI=Bx(Rn,(e=>Math.sin(e))),BI={kernelName:Rn,backendName:"cpu",kernelFunc:PI},WI=Bx(Fn,(e=>Math.sinh(e))),VI={kernelName:Fn,backendName:"cpu",kernelFunc:WI},UI=Math.log(1.1920928955078125e-7)+2,GI=Bx(On,(e=>{const t=e>-UI,n=e<UI,s=Math.exp(e);let a;return a=n?s:t?e:Math.log(1+s),a})),HI={kernelName:On,backendName:"cpu",kernelFunc:GI},jI={kernelName:zn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{blockShape:r,paddings:i}=s;Lx([a],"spaceToBatchND");const o=D(r),l=[[0,0]];l.push(...i);for(let e=1+r.length;e<a.shape.length;++e)l.push([0,0]);const u=oI.kernelFunc({inputs:{x:a},backend:n,attrs:{paddings:l,constantValue:0}}),c=lu(u.shape,r,o,!1),h=uu(c.length,r.length,!1),p=cu(u.shape,r,o,!1),d=gw({inputs:{x:u},backend:n,attrs:{shape:c}}),f=Ew({inputs:{x:d},backend:n,attrs:{perm:h}}),m=gw({inputs:{x:f},backend:n,attrs:{shape:p}});return n.disposeIntermediateTensorInfo(u),n.disposeIntermediateTensorInfo(d),n.disposeIntermediateTensorInfo(f),m}},KI={kernelName:Gn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{sparseIndices:a,sparseValues:r,defaultValue:i}=t,{outputShape:o}=s,{sliceRank:l,numUpdates:u,sliceSize:c,strides:h,outputSize:p}=gu(0,a,o),d=TI(n.bufferSync(a),n.bufferSync(r),o,p,c,u,l,h,n.data.get(i.dataId).values[0],!1);return n.makeTensorInfo(o,d.dtype,d.values)}},qI={kernelName:Pn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{numOrSizeSplits:r,axis:i}=s,o=B(i,a.shape)[0],l=Ou(a,r,o),u=new Array(a.shape.length).fill(0),c=a.shape.slice();return l.map((e=>{const t=[...c];t[o]=e;const s=tk({inputs:{x:a},backend:n,attrs:{begin:u,size:t}});return u[o]+=e,s}))}},XI=Bx(Mn,(e=>Math.sqrt(e))),YI={kernelName:Mn,backendName:"cpu",kernelFunc:XI},JI={kernelName:Vn,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,s=t;Lx(n,"square");const a=s.data.get(n.dataId).values,r=new Float32Array(a.length);for(let e=0;e<a.length;++e){const t=a[e];r[e]=t*t}return{dataId:s.write(r,n.shape,n.dtype),shape:n.shape,dtype:n.dtype}}},ZI=qx(((e,t)=>{const n=e-t;return n*n})),QI=cw(Wn,ZI),e$={kernelName:Wn,backendName:"cpu",kernelFunc:QI},t$=Bx(ns,((e,t)=>{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha})),n$={kernelName:ns,backendName:"cpu",kernelFunc:t$};function s$(e,t,n,s){const a=Ha(e,t.dtype);for(let e=0;e<a.size;e++){const r=a.indexToLoc(e),i=new Array(r.length);for(let e=0;e<i.length;e++)i[e]=r[e]*n[e]+s[e];a.set(t.get(...i),...r)}return a}const a$={kernelName:Hn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{begin:r,end:i,strides:o,beginMask:l,endMask:u,ellipsisMask:c,newAxisMask:h,shrinkAxisMask:p}=s;Lx(a,"stridedSlice");const{nonStrided:d,$begin:f,$strides:m,size:g,newShape:y,outShape:b}=wr(a.shape,r,i,o,l,u,c,h,p),x=gw({inputs:{x:a},backend:n,attrs:{shape:y}});let w;if(d){const e=tk({inputs:{x},backend:n,attrs:{begin:f,size:g}});w=gw({inputs:{x:e},backend:n,attrs:{shape:b}}),n.disposeIntermediateTensorInfo(e)}else if(b.some((e=>0===e)))w=n.makeTensorInfo(b,a.dtype,[]);else{const e=s$(b,n.bufferSync(x),m,f);w=n.makeTensorInfo(e.shape,e.dtype,e.values)}const k=gw({inputs:{x:w},backend:n,attrs:{shape:b}});return n.disposeIntermediateTensorInfo(x),n.disposeIntermediateTensorInfo(w),k}},r$=Bx(jn,(e=>Math.tan(e))),i$={kernelName:jn,backendName:"cpu",kernelFunc:r$},o$=Bx(Kn,(e=>Math.tanh(e)));function l$(e,t){const n=new Array(e.rank);for(let s=0;s<n.length;s++)n[s]=e.shape[s]*t[s];const s=Ha(n,e.dtype);for(let t=0;t<s.values.length;++t){const n=s.indexToLoc(t),a=new Array(e.rank);for(let t=0;t<a.length;t++)a[t]=n[t]%e.shape[t];const r=e.locToIndex(a);s.values[t]=e.values[r]}return s}function u$(e,t,n,s,a){const r=t[t.length-1],[i,o]=[e.length/r,r],l=V(n,i*s),u=V("int32",i*s);for(let t=0;t<i;t++){const n=t*o,a=e.subarray(n,n+o),r=[];for(let e=0;e<a.length;e++)r.push({value:a[e],index:e});r.sort(((e,t)=>t.value-e.value));const i=t*s,c=l.subarray(i,i+s),h=u.subarray(i,i+s);for(let e=0;e<s;e++)c[e]=r[e].value,h[e]=r[e].index}const c=t.slice();return c[c.length-1]=s,[Ha(c,n,l),Ha(c,"int32",u)]}function c$(e,t,n){switch(n){case"reflect":return function(e,t){let n=e;if(n<0)if(t<=1)n=0;else{const e=2*t;n<e&&(n=e*Math.trunc(-n/e)+n),n=n<-t?n+e:-n-1}else if(n>t-1)if(t<=1)n=0;else{const e=2*t;n-=e*Math.trunc(n/e),n>=t&&(n=e-n-1)}return S(0,n,t-1)}(e,t);case"wrap":return function(e,t){let n=e;if(n<0)if(t<=1)n=0;else{const e=t-1;n+=t*(Math.trunc(-n/e)+1)}else if(n>t-1)if(t<=1)n=0;else{const e=t-1;n-=t*Math.trunc(n/e)}return S(0,n,t-1)}(e,t);case"nearest":return function(e,t){return S(0,e,t-1)}(e,t);case"constant":default:return function(e,t){return e}(e)}}function h$(e,t,n,s,a,r,i,o,l,u,c){return 0<=o&&o<t&&0<=l&&l<n?e[i*s+o*a+l*r+u]:c}function p$(e,t,n,s,a,r,i,o,l,u,c){return h$(e,t,n,s,a,r,i,Math.round(o),Math.round(l),u,c)}function d$(e,t,n,s,a,r,i,o,l,u,c){const h=Math.floor(o),p=Math.floor(l),d=h+1,f=p+1;return(d-o)*((f-l)*h$(e,t,n,s,a,r,i,h,p,u,c)+(l-p)*h$(e,t,n,s,a,r,i,h,f,u,c))+(o-h)*((f-l)*h$(e,t,n,s,a,r,i,d,p,u,c)+(l-p)*h$(e,t,n,s,a,r,i,d,f,u,c))}function f$(e,t,n,s){const a=B(t,n)[0],r=[1,n[0],1];for(let e=0;e<a;e++)r[0]*=n[e];r[1]=n[a];for(let e=a+1;e<n.length;e++)r[2]*=n[e];const i={},o=new Int32Array(n[a]),l=new Es(r,s,e),u=[],c=1===r[0]&&1===r[2];for(let t=0;t<n[a];t++){let n;if(c)n=e[t].toString();else{const e=[];for(let n=0;n<r[0];n++)for(let s=0;s<r[2];s++)e.push(l.get(n,t,s));n=e.join(",")}if(void 0!==i[n])o[t]=i[n];else{const e=Object.keys(i).length;i[n]=e,o[t]=e,u.push(t)}}const h=r.slice();h[1]=Object.keys(i).length;const p=new Es(h,s);u.forEach(((e,t)=>{for(let n=0;n<r[0];n++)for(let s=0;s<r[2];s++)p.set(l.get(n,e,s),n,t,s)}));const d=n.slice();return d[a]=h[1],{outputValues:p.values,outputShape:d,indices:o}}const m$=[ww,vw,Iw,Cw,mw,Sw,Rw,Fw,Dw,_w,Mw,zw,Bw,Uw,Hw,Xw,Yw,Jw,Zw,xw,Qw,sk,ik,uw,ck,pk,aw,dk,bk,kk,vk,wk,Ik,$k,Nk,Sk,Ek,Ak,Rk,Fk,Dk,Ok,Mk,Lk,zk,Pk,Wk,Bk,fv,Ux,Vk,Hk,Qk,nv,av,ov,vv,Iv,$v,Tv,Rv,Fv,Dv,_v,Mv,Pv,Vv,Hx,Uv,gk,Hv,Kv,Xv,Kx,Zv,tN,sN,iN,lN,hN,dN,gN,yN,bN,IN,$N,CN,SN,TN,EN,kN,FN,DN,MN,LN,BN,UN,hv,HN,KN,XN,JN,eI,tI,aI,iI,oI,cI,Jx,pI,fI,ow,gI,Qx,tw,yw,yI,bI,xI,wI,kI,vI,II,SI,EI,AI,_I,MI,zI,BI,VI,nk,VN,HI,jI,KI,qI,YI,JI,e$,n$,a$,bv,RN,i$,{kernelName:Kn,backendName:"cpu",kernelFunc:o$},{kernelName:qn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{reps:r}=s;Lx(a,"tile");const i=l$(n.bufferSync(a),r);return n.makeTensorInfo(i.shape,i.dtype,i.values)}},{kernelName:Xn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{k:r,sorted:i}=s;Lx(a,"topk");const o=n.data.get(a.dataId).values,[l,u]=u$(o,a.shape,a.dtype,r);return[n.makeTensorInfo(l.shape,l.dtype,l.values),n.makeTensorInfo(u.shape,u.dtype,u.values)]}},Aw,{kernelName:Yn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,attrs:n,backend:s}=e,{image:a,transforms:r}=t,{interpolation:i,fillMode:o,fillValue:l,outputShape:u}=n,[c,h,p,d]=a.shape,[f,m]=null!=u?u:[h,p],g=[c,f,m,d],y=Z(a.shape),b=y[0],x=y[1],w=y[2],k=V(a.dtype,D(g));k.fill(l);const v=s.data.get(a.dataId).values,N=s.data.get(r.dataId).values;for(let e=0;e<c;++e){const t=1===r.shape[0]?N:N.subarray(8*e,8*e+8);for(let n=0;n<f;++n)for(let s=0;s<m;++s)for(let a=0;a<d;++a){let r;const u=t[6]*s+t[7]*n+1;if(0===u)continue;const c=(t[0]*s+t[1]*n+t[2])/u,d=(t[3]*s+t[4]*n+t[5])/u,f=c$(c,p,o),m=c$(d,h,o);switch(i){case"nearest":r=p$(v,h,p,b,x,w,e,m,f,a,l);break;case"bilinear":r=d$(v,h,p,b,x,w,e,m,f,a,l);break;default:throw new Error(`Error in Transform: Expect 'nearest' or 'bilinear', but got ${i}`)}k[e*b+n*x+s*w+a]=r}return s.makeTensorInfo(g,a.dtype,k)}return{dataId:s.write(k,g,a.dtype),shape:a.shape,dtype:a.dtype}}},{kernelName:Zn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,attrs:n,backend:s}=e,{axis:a}=n,{x:r}=t;Lx(r,"unique");const i=s.data.get(r.dataId).values,{outputValues:o,outputShape:l,indices:u}=f$(i,a,r.shape,r.dtype);return[s.makeTensorInfo(l,r.dtype,o),s.makeTensorInfo([u.length],"int32",u)]}},{kernelName:Qn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{value:a}=t;let{axis:r}=s;r<0&&(r+=a.shape.length);const i=a.shape.length,o=a.shape[r],l=new Array(i-1);let u=0;for(let e=0;e<i;e++)e!==r&&(l[u++]=a.shape[e]);const c=new Array(i).fill(0),h=a.shape.slice();h[r]=1;const p=new Array(o);for(let e=0;e<p.length;e++){c[r]=e;const t=tk({inputs:{x:a},backend:n,attrs:{begin:c,size:h}});p[e]=gw({inputs:{x:t},backend:n,attrs:{shape:l}}),n.disposeIntermediateTensorInfo(t)}return p}},{kernelName:es,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,segmentIds:r}=t,{numSegments:i}=s;Lx(a,"unsortedSegmentSum");const o=[],l=[],u=a.shape.length-r.shape.length;let c=r;for(let e=0;e<u;++e){const t=sv({inputs:{input:c},backend:n,attrs:{dim:e+1}});c=t,l.push(t)}for(let e=0;e<i;++e){const t=gs(e,"int32"),s=n.makeTensorInfo([],"int32",t),r=Gk({inputs:{a:s,b:c},backend:n}),i=lw({inputs:{x:r},backend:n,attrs:{dtype:"float32"}}),u=cv({inputs:{a:i,b:a},backend:n}),h=AN({inputs:{x:u},backend:n,attrs:{axis:0,keepDims:!1}});o.push(h),l.push(s),l.push(r),l.push(i),l.push(u),l.push(h)}const h=rI({inputs:o,backend:n,attrs:{axis:0}});return l.forEach((e=>n.disposeIntermediateTensorInfo(e))),h}},sI];for(const e of m$)ds(e);const g$={},y$={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function b$(e){if(!(e in g$)){const t=function(e){if(1!==e&&2!==e)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");const t=function(e){if("undefined"!=typeof OffscreenCanvas&&2===e)return new OffscreenCanvas(300,150);if("undefined"!=typeof document)return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}(e);return t.addEventListener("webglcontextlost",(t=>{t.preventDefault(),delete g$[e]}),!1),1===e?t.getContext("webgl",y$)||t.getContext("experimental-webgl",y$):t.getContext("webgl2",y$)}(e);if(null===t)return console.log("Could not get context for WebGL version",e),null;g$[e]=t}const t=g$[e];return t.isContextLost()?(delete g$[e],b$(e)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),g$[e])}var x$,w$,k$;function v$(e,t){return[t,e]}function N$(e){const t=D(e);return M(Math.ceil(t/4))}function I$(e,t){return[Math.max(1,Math.ceil(t/2)),Math.max(1,Math.ceil(e/2))]}function $$(e,t){const n=e;let s,a,r,i,o,l,u,c,h,p;return 2===ue().getNumber("WEBGL_VERSION")?(s=n.R32F,a=n.R16F,r=n.RGBA16F,i=n.RGBA32F,o=n.RED,u=4,c=1,h=n.HALF_FLOAT,p=n.FLOAT):(s=e.RGBA,a=e.RGBA,r=e.RGBA,i=n.RGBA,o=e.RGBA,u=4,c=4,h=null!=t?t.HALF_FLOAT_OES:null,p=e.FLOAT),l=e.RGBA,{internalFormatFloat:s,internalFormatHalfFloat:a,internalFormatPackedHalfFloat:r,internalFormatPackedFloat:i,textureFormatFloat:o,downloadTextureFormat:l,downloadUnpackNumChannels:u,defaultNumChannels:c,textureTypeHalfFloat:h,textureTypeFloat:p}}function C$(e,t){const n=t();return ue().getBool("DEBUG")&&function(e){const t=e.getError();if(t!==e.NO_ERROR)throw new Error("WebGL Error: "+function(e,t){switch(t){case e.NO_ERROR:return"NO_ERROR";case e.INVALID_ENUM:return"INVALID_ENUM";case e.INVALID_VALUE:return"INVALID_VALUE";case e.INVALID_OPERATION:return"INVALID_OPERATION";case e.INVALID_FRAMEBUFFER_OPERATION:return"INVALID_FRAMEBUFFER_OPERATION";case e.OUT_OF_MEMORY:return"OUT_OF_MEMORY";case e.CONTEXT_LOST_WEBGL:return"CONTEXT_LOST_WEBGL";default:return`Unknown error code ${t}`}}(e,t))}(e),n}function S$(e){return!!(ue().getBool("WEBGL_RENDER_FLOAT32_ENABLED")||0===e||5.96e-8<Math.abs(e)&&Math.abs(e)<65504)}function T$(e,t){return M$(e,(()=>e.getExtension(t)),'Extension "'+t+'" not supported on this browser.')}!function(e){e[e.DENSE=0]="DENSE",e[e.SHARED_BATCH=1]="SHARED_BATCH"}(x$||(x$={})),function(e){e[e.RENDER=0]="RENDER",e[e.UPLOAD=1]="UPLOAD",e[e.PIXELS=2]="PIXELS",e[e.DOWNLOAD=3]="DOWNLOAD"}(w$||(w$={})),function(e){e[e.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",e[e.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",e[e.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",e[e.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",e[e.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"}(k$||(k$={}));const E$=/ERROR: [0-9]+:([0-9]+):/g;function A$(e,t){if(C$(e,(()=>e.validateProgram(t))),!1===e.getProgramParameter(t,e.VALIDATE_STATUS))throw console.log(e.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function R$(e,t,n,s,a,r,i){const o=e.getAttribLocation(t,n);return-1!==o&&(C$(e,(()=>e.bindBuffer(e.ARRAY_BUFFER,s))),C$(e,(()=>e.vertexAttribPointer(o,a,e.FLOAT,!1,r,i))),C$(e,(()=>e.enableVertexAttribArray(o))),!0)}function F$(e,t,n,s){C$(e,(()=>function(e,t,n){(function(e,t){const n=e.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,s=t+e.TEXTURE0;if(s<e.TEXTURE0||s>n)throw new Error(`textureUnit must be in [gl.TEXTURE0, gl.TEXTURE${n}].`)})(e,n),C$(e,(()=>e.activeTexture(e.TEXTURE0+n))),C$(e,(()=>e.bindTexture(e.TEXTURE_2D,t)))}(e,t,s))),C$(e,(()=>e.uniform1i(n,s)))}function D$(e,t,n){C$(e,(()=>e.bindFramebuffer(e.FRAMEBUFFER,n))),C$(e,(()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,t,0)))}function _$(e,t){C$(e,(()=>e.bindFramebuffer(e.FRAMEBUFFER,t))),C$(e,(()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,null,0)))}function O$(e){const t=e.checkFramebufferStatus(e.FRAMEBUFFER);if(t!==e.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+function(e,t){switch(t){case e.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case e.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}(e,t))}function M$(e,t,n){const s=C$(e,(()=>t()));if(null==s)throw new Error(n);return s}function L$(e,t=2){return D(e.slice(0,e.length-t))}function z$(e){if(0===e.length)throw Error("Cannot get rows and columns of an empty shape array.");return[e.length>1?e[e.length-2]:1,e[e.length-1]]}function P$(e){let t=[1,1,1];return 0===e.length||1===e.length&&1===e[0]||(t=[L$(e),...z$(e)]),t}function B$(e){return e%2==0}function W$(e,t){if(_(e=e.slice(-2),t=t.slice(-2)))return!0;if(!e.length||!t.length)return!0;if(0===e[0]||0===e[1]||0===t[0]||0===t[1])return!0;if(e.length!==t.length){const n=e.slice(-1)[0],s=t.slice(-1)[0];if(n===s)return!0;if(B$(n)&&B$(s)&&(1===e[0]||1===t[0]))return!0}return e[1]===t[1]&&B$(e[0])&&B$(t[0])}let V$,U$;function G$(e,t){return null!=e.getExtension(t)}function H$(e){try{if(null!=b$(e))return!0}catch(e){return console.log("Error when getting WebGL context: ",e),!1}return!1}function j$(e){const t=$$(e),n=e.createTexture();e.bindTexture(e.TEXTURE_2D,n),e.texImage2D(e.TEXTURE_2D,0,t.internalFormatFloat,1,1,0,t.textureFormatFloat,t.textureTypeFloat,null);const s=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,s),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,n,0);const a=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(n),e.deleteFramebuffer(s),a}function K$(e,t){Array.isArray(e)||(e=[e]),e.forEach((e=>{null!=e&&E("complex64"!==e.dtype,(()=>`${t} does not support complex64 tensors in the WebGL backend.`))}))}const q$=ue();function X$(){let e,t,n,s,a,r,i,o,l,u;return 2===ue().getNumber("WEBGL_VERSION")?(e="#version 300 es",t="in",n="out",s="in",a="texture",r="outputColor",i="out vec4 outputColor;",o="\n bool isnan_custom(float val) {\n return (val > 0.0 || val < 0.0) ? false : val != 0.0;\n }\n\n bvec4 isnan_custom(vec4 val) {\n return bvec4(isnan_custom(val.x),\n isnan_custom(val.y), isnan_custom(val.z), isnan_custom(val.w));\n }\n\n #define isnan(value) isnan_custom(value)\n ",l="",u="\n #define round(value) newRound(value)\n int newRound(float value) {\n return int(floor(value + 0.5));\n }\n\n ivec4 newRound(vec4 value) {\n return ivec4(floor(value + vec4(0.5)));\n }\n "):(e="",t="attribute",n="varying",s="varying",a="texture2D",r="gl_FragColor",i="",o="\n #define isnan(value) isnan_custom(value)\n bool isnan_custom(float val) {\n return (val > 0. || val < 1. || val == 0.) ? false : true;\n }\n bvec4 isnan_custom(vec4 val) {\n return bvec4(isnan(val.x), isnan(val.y), isnan(val.z), isnan(val.w));\n }\n ",l="\n uniform float INFINITY;\n\n bool isinf(float val) {\n return abs(val) == INFINITY;\n }\n bvec4 isinf(vec4 val) {\n return equal(abs(val), vec4(INFINITY));\n }\n ",u="\n int round(float value) {\n return int(floor(value + 0.5));\n }\n\n ivec4 round(vec4 value) {\n return ivec4(floor(value + vec4(0.5)));\n }\n "),{version:e,attribute:t,varyingVs:n,varyingFs:s,texture2D:a,output:r,defineOutput:i,defineSpecialNaN:o,defineSpecialInf:l,defineRound:u}}function Y$(e,t,n="index"){const s=Z(t);return s.map(((t,a)=>`int ${e[a]} = ${n} / ${t}; ${a===s.length-1?`int ${e[a+1]} = ${n} - ${e[a]} * ${t}`:`index -= ${e[a]} * ${t}`};`)).join("")}function J$(e){const t=Z(e).map((e=>e.toString()));return`\n int getFlatIndex(ivec3 coords) {\n return coords.x * ${t[0]} + coords.y * ${t[1]} + coords.z;\n }\n`}q$.registerFlag("HAS_WEBGL",(()=>q$.getNumber("WEBGL_VERSION")>0)),q$.registerFlag("WEBGL_VERSION",(()=>H$(2)?2:H$(1)?1:0)),q$.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",(()=>!1)),q$.registerFlag("WEBGL_BUFFER_SUPPORTED",(()=>2===q$.get("WEBGL_VERSION"))),q$.registerFlag("WEBGL_CPU_FORWARD",(()=>!0)),q$.registerFlag("WEBGL_FORCE_F16_TEXTURES",(()=>!1)),q$.registerFlag("WEBGL_PACK",(()=>q$.getBool("HAS_WEBGL"))),q$.registerFlag("WEBGL_PACK_NORMALIZATION",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_PACK_CLIP",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_PACK_DEPTHWISECONV",(()=>!1)),q$.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_PACK_REDUCE",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_LAZILY_UNPACK",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_CONV_IM2COL",(()=>q$.getBool("WEBGL_PACK"))),q$.registerFlag("WEBGL_MAX_TEXTURE_SIZE",(()=>function(e){if(null==V$){const t=b$(e);V$=t.getParameter(t.MAX_TEXTURE_SIZE)}return V$}(q$.getNumber("WEBGL_VERSION")))),q$.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",(()=>function(e){if(null==U$){const t=b$(e);U$=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,U$)}(q$.getNumber("WEBGL_VERSION")))),q$.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",(()=>{const e=q$.getNumber("WEBGL_VERSION");return 0===e?0:function(e){if(0===e)return 0;let t;const n=b$(e);return t=G$(n,"EXT_disjoint_timer_query_webgl2")&&2===e?2:G$(n,"EXT_disjoint_timer_query")?1:0,t}(e)})),q$.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",(()=>q$.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!ea())),q$.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",(()=>function(e){if(0===e)return!1;const t=b$(e);if(1===e){if(!G$(t,"OES_texture_float"))return!1}else if(!G$(t,"EXT_color_buffer_float"))return!1;return j$(t)}(q$.getNumber("WEBGL_VERSION")))),q$.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",(()=>!q$.getBool("WEBGL_FORCE_F16_TEXTURES")&&q$.getBool("WEBGL_RENDER_FLOAT32_CAPABLE"))),q$.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",(()=>function(e){if(0===e)return!1;const t=b$(e);if(1!==e){if(G$(t,"EXT_color_buffer_float"))return j$(t);const e="EXT_color_buffer_half_float";if(G$(t,e)){const n=t.getExtension(e);return function(e,t){const n=$$(e,t),s=e.createTexture();e.bindTexture(e.TEXTURE_2D,s),e.texImage2D(e.TEXTURE_2D,0,n.internalFormatHalfFloat,1,1,0,n.textureFormatFloat,n.textureTypeHalfFloat,null);const a=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,a),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,s,0);const r=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(s),e.deleteFramebuffer(a),r}(t,n)}return!1}return!!G$(t,"OES_texture_float")&&!!G$(t,"WEBGL_color_buffer_float")&&j$(t)}(q$.getNumber("WEBGL_VERSION")))),q$.registerFlag("WEBGL_FENCE_API_ENABLED",(()=>{return 2===(e=q$.getNumber("WEBGL_VERSION"))&&null!=b$(e).fenceSync;var e})),q$.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",(()=>q$.getBool("WEBGL_RENDER_FLOAT32_ENABLED")?4:0)),q$.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",(()=>-1),(e=>{if(e<0&&-1!==e)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${e}.`)})),q$.registerFlag("WEBGL_FLUSH_THRESHOLD",(()=>ea()&&q$.getBool("IS_CHROME")?1:-1),(e=>{if(e<0&&-1!==e)throw new Error(`WEBGL_FLUSH_THRESHOLD must be -1 (indicating never manual flush) or at least 0, but got ${e}.`)}));const Z$="\n const float FLOAT_MAX = 1.70141184e38;\n const float FLOAT_MIN = 1.17549435e-38;\n\n lowp vec4 encode_float(highp float v) {\n if (isnan(v)) {\n return vec4(255, 255, 255, 255);\n }\n\n highp float av = abs(v);\n\n if(av < FLOAT_MIN) {\n return vec4(0.0, 0.0, 0.0, 0.0);\n } else if(v > FLOAT_MAX) {\n return vec4(0.0, 0.0, 128.0, 127.0) / 255.0;\n } else if(v < -FLOAT_MAX) {\n return vec4(0.0, 0.0, 128.0, 255.0) / 255.0;\n }\n\n highp vec4 c = vec4(0,0,0,0);\n\n highp float e = floor(log2(av));\n highp float m = exp2(fract(log2(av))) - 1.0;\n\n c[2] = floor(128.0 * m);\n m -= c[2] / 128.0;\n c[1] = floor(32768.0 * m);\n m -= c[1] / 32768.0;\n c[0] = floor(8388608.0 * m);\n\n highp float ebias = e + 127.0;\n c[3] = floor(ebias / 2.0);\n ebias -= c[3] * 2.0;\n c[2] += floor(ebias) * 128.0;\n\n c[3] += 128.0 * step(0.0, -v);\n\n return c / 255.0;\n }\n";class Q${constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=x$.DENSE;const t=N$(e),n=X$();this.outputShape=e,this.userCode=`\n ivec3 outCoordsFromFlatIndex(int index) {\n ${Y$(["r","c","d"],e)}\n return ivec3(r, c, d);\n }\n\n void main() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = 4 * (resTexRC.x * ${t[1]} + resTexRC.y);\n\n vec4 result = vec4(0.);\n\n for (int i=0; i<4; i++) {\n int flatIndex = index + i;\n ivec3 rc = outCoordsFromFlatIndex(flatIndex);\n result[i] = getA(rc.x, rc.y, rc.z);\n }\n\n ${n.output} = result;\n }\n `}}class eC{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=x$.DENSE;const t=N$(e),n=X$();this.outputShape=e,this.userCode=`\n ivec3 outCoordsFromFlatIndex(int index) {\n ${Y$(["r","c","d"],e)}\n return ivec3(r, c, d);\n }\n\n void main() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = 4 * (resTexRC.x * ${t[1]} + resTexRC.y);\n\n vec4 result = vec4(0.);\n\n for (int i=0; i<4; i++) {\n int flatIndex = index + i;\n ivec3 rc = outCoordsFromFlatIndex(flatIndex);\n result[i] = getChannel(getA(rc.x, rc.y, rc.z), vec2(rc.y, rc.z));\n }\n\n ${n.output} = result;\n }\n `}}class tC{constructor(e){this.variableNames=["A"],this.outTexUsage=w$.DOWNLOAD;const t=X$();this.outputShape=e,this.userCode=`\n ${Z$}\n\n void main() {\n float x = getAAtOutCoords();\n ${t.output} = encode_float(x);\n }\n `}}class nC{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outTexUsage=w$.DOWNLOAD;const t=X$();this.outputShape=e,this.userCode=`\n ${Z$}\n\n void main() {\n ivec3 coords = getOutputCoords();\n float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z));\n ${t.output} = encode_float(x);\n }\n `}}class sC{constructor(e,t,n=!1){this.variableNames=["A"];const s=X$(),[a,r]=t;this.outputShape=e;let i="result";n&&(i="floor(result * 255. + 0.5)"),this.userCode=`\n ${J$(e)}\n\n void main() {\n ivec3 coords = getOutputCoords();\n\n int flatIndex = getFlatIndex(coords);\n int offset = imod(flatIndex, 4);\n\n flatIndex = idiv(flatIndex, 4, 1.);\n\n int r = flatIndex / ${r};\n int c = imod(flatIndex, ${r});\n vec2 uv = (vec2(c, r) + halfCR) / vec2(${r}.0, ${a}.0);\n vec4 values = ${s.texture2D}(A, uv);\n\n float result;\n\n if(offset == 0) {\n result = values[0];\n } else if(offset == 1) {\n result = values[1];\n } else if(offset == 2) {\n result = values[2];\n } else {\n result = values[3];\n }\n\n ${s.output} = vec4(${i}, 0., 0., 0.);\n }\n `}}class aC{constructor(e,t,n=!1){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const s=X$(),[a,r]=t;this.outputShape=e;let i="",o="result";n&&(o="floor(result * 255. + 0.5)");for(let t=0;t<=1;t++)for(let n=0;n<=1;n++){const o=2*t+n;i+=`\n localCoords = coords;\n if(localCoords[2] + ${n} < ${e[2]}) {\n localCoords[2] += ${n};\n if(localCoords[1] + ${t} < ${e[1]}) {\n localCoords[1] += ${t};\n\n flatIndex = getFlatIndex(localCoords);\n offset = imod(flatIndex, 4);\n\n flatIndex = idiv(flatIndex, 4, 1.);\n\n r = flatIndex / ${r};\n c = imod(flatIndex, ${r});\n uv = (vec2(c, r) + halfCR) / vec2(${r}.0, ${a}.0);\n values = ${s.texture2D}(A, uv);\n\n if(offset == 0) {\n result[${o}] = values[0];\n } else if(offset == 1) {\n result[${o}] = values[1];\n } else if(offset == 2) {\n result[${o}] = values[2];\n } else {\n result[${o}] = values[3];\n }\n }\n }\n `}this.userCode=`\n ${J$(e)}\n\n void main() {\n ivec3 coords = getOutputCoords();\n\n vec4 result = vec4(0.);\n int flatIndex, r, c, offset;\n ivec3 localCoords;\n vec2 uv;\n vec4 values;\n\n ${i}\n\n ${s.output} = ${o};\n }\n `}}function rC(e,t,n,s,a,r){!function(e,t){const n=ue().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(e<=0||t<=0)throw new Error(`Requested texture size [${e}x${t}] is invalid.`);if(e>n||t>n)throw new Error(`Requested texture size [${e}x${t}] greater than WebGL maximum on this browser / GPU [${n}x${n}].`)}(t,n);const i=function(e){return M$(e,(()=>e.createTexture()),"Unable to create WebGLTexture.")}(e),o=e.TEXTURE_2D;return C$(e,(()=>e.bindTexture(o,i))),C$(e,(()=>e.texParameteri(o,e.TEXTURE_WRAP_S,e.CLAMP_TO_EDGE))),C$(e,(()=>e.texParameteri(o,e.TEXTURE_WRAP_T,e.CLAMP_TO_EDGE))),C$(e,(()=>e.texParameteri(o,e.TEXTURE_MIN_FILTER,e.NEAREST))),C$(e,(()=>e.texParameteri(o,e.TEXTURE_MAG_FILTER,e.NEAREST))),C$(e,(()=>e.texImage2D(o,0,s,t,n,0,a,r,null))),C$(e,(()=>e.bindTexture(e.TEXTURE_2D,null))),i}function iC(e){return e.internalFormatFloat}function oC(e){return e.internalFormatHalfFloat}function lC(e){return e.downloadTextureFormat}function uC(e){return e.internalFormatPackedFloat}function cC(e){return e.internalFormatPackedHalfFloat}class hC{constructor(e){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];const t=ue().getNumber("WEBGL_VERSION");null!=e?(this.gl=e,function(e,t){g$[e]=t}(t,e)):this.gl=b$(t);let n="WEBGL_color_buffer_float";const s="EXT_color_buffer_half_float";if(1===ue().getNumber("WEBGL_VERSION")){const e="OES_texture_float",t="OES_texture_half_float";if(this.textureFloatExtension=T$(this.gl,e),G$(this.gl,t))this.textureHalfFloatExtension=T$(this.gl,t);else if(ue().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(n),G$(this.gl,s))this.colorBufferHalfFloatExtension=T$(this.gl,s);else if(ue().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(n="EXT_color_buffer_float",G$(this.gl,n))this.colorBufferFloatExtension=this.gl.getExtension(n);else{if(!G$(this.gl,s))throw new Error("GL context does not support color renderable floats");this.colorBufferHalfFloatExtension=this.gl.getExtension(s)}this.vertexBuffer=function(e){return function(e,t){const n=M$(e,(()=>e.createBuffer()),"Unable to create WebGLBuffer");return C$(e,(()=>e.bindBuffer(e.ARRAY_BUFFER,n))),C$(e,(()=>e.bufferData(e.ARRAY_BUFFER,t,e.STATIC_DRAW))),n}(e,new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]))}(this.gl),this.indexBuffer=function(e){return function(e,t){const n=M$(e,(()=>e.createBuffer()),"Unable to create WebGLBuffer");return C$(e,(()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,n))),C$(e,(()=>e.bufferData(e.ELEMENT_ARRAY_BUFFER,t,e.STATIC_DRAW))),n}(e,new Uint16Array([0,1,2,2,1,3]))}(this.gl),this.framebuffer=function(e){return M$(e,(()=>e.createFramebuffer()),"Unable to create WebGLFramebuffer.")}(this.gl),this.textureConfig=$$(this.gl,this.textureHalfFloatExtension)}get debug(){return ue().getBool("DEBUG")}dispose(){if(this.disposed)return;null!=this.program&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),null!=this.outputTexture&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");const e=this.gl;C$(e,(()=>e.finish())),C$(e,(()=>e.bindFramebuffer(e.FRAMEBUFFER,null))),C$(e,(()=>e.deleteFramebuffer(this.framebuffer))),C$(e,(()=>e.bindBuffer(e.ARRAY_BUFFER,null))),C$(e,(()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,null))),C$(e,(()=>e.deleteBuffer(this.indexBuffer))),this.disposed=!0}createFloat32MatrixTexture(e,t){return this.throwIfDisposed(),function(e,t,n,s){const[a,r]=v$(t,n);return rC(e,a,r,iC(s),s.textureFormatFloat,e.FLOAT)}(this.gl,e,t,this.textureConfig)}createFloat16MatrixTexture(e,t){return this.throwIfDisposed(),function(e,t,n,s){const[a,r]=v$(t,n);return rC(e,a,r,oC(s),s.textureFormatFloat,s.textureTypeHalfFloat)}(this.gl,e,t,this.textureConfig)}createUnsignedBytesMatrixTexture(e,t){return this.throwIfDisposed(),function(e,t,n,s){const[a,r]=v$(t,n);return rC(e,a,r,lC(s),e.RGBA,e.UNSIGNED_BYTE)}(this.gl,e,t,this.textureConfig)}uploadPixelDataToTexture(e,t){this.throwIfDisposed(),function(e,t,n){C$(e,(()=>e.bindTexture(e.TEXTURE_2D,t))),n.data instanceof Uint8Array?C$(e,(()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,n.width,n.height,0,e.RGBA,e.UNSIGNED_BYTE,n.data))):C$(e,(()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,e.RGBA,e.UNSIGNED_BYTE,n))),C$(e,(()=>e.bindTexture(e.TEXTURE_2D,null)))}(this.gl,e,t)}uploadDenseMatrixToTexture(e,t,n,s){this.throwIfDisposed(),function(e,t,n,s,a,r){let i,o,l;C$(e,(()=>e.bindTexture(e.TEXTURE_2D,t))),a instanceof Uint8Array?(i=new Uint8Array(n*s*4),o=e.UNSIGNED_BYTE,l=e.RGBA):(i=new Float32Array(n*s*4),o=e.FLOAT,l=r.internalFormatPackedFloat),i.set(a),C$(e,(()=>e.texImage2D(e.TEXTURE_2D,0,l,n,s,0,e.RGBA,o,i))),C$(e,(()=>e.bindTexture(e.TEXTURE_2D,null)))}(this.gl,e,t,n,s,this.textureConfig)}createFloat16PackedMatrixTexture(e,t){return this.throwIfDisposed(),function(e,t,n,s){const[a,r]=I$(t,n);return rC(e,a,r,cC(s),e.RGBA,s.textureTypeHalfFloat)}(this.gl,e,t,this.textureConfig)}createPackedMatrixTexture(e,t){return this.throwIfDisposed(),function(e,t,n,s){const[a,r]=I$(t,n);return rC(e,a,r,uC(s),e.RGBA,e.FLOAT)}(this.gl,e,t,this.textureConfig)}deleteMatrixTexture(e){this.throwIfDisposed(),this.outputTexture===e&&(_$(this.gl,this.framebuffer),this.outputTexture=null),C$(this.gl,(()=>this.gl.deleteTexture(e)))}downloadByteEncodedFloatMatrixFromOutputTexture(e,t,n){return this.downloadMatrixDriver(e,(()=>function(e,t,n,s){const[a,r]=v$(t,n),i=new Uint8Array(t*n*4);return C$(e,(()=>e.readPixels(0,0,a,r,s.downloadTextureFormat,e.UNSIGNED_BYTE,i))),new Float32Array(i.buffer)}(this.gl,t,n,this.textureConfig)))}downloadPackedMatrixFromBuffer(e,t,n,s,a,r){return function(e,t,n,s,a,r,i,o){const l=e,u=new Float32Array(function(e,t){const[n,s]=I$(e,t);return n*s*4}(r,i));return l.bindBuffer(l.PIXEL_PACK_BUFFER,t),l.getBufferSubData(l.PIXEL_PACK_BUFFER,0,u),l.bindBuffer(l.PIXEL_PACK_BUFFER,null),u}(this.gl,e,0,0,0,a,r,this.textureConfig)}downloadFloat32MatrixFromBuffer(e,t){return function(e,t,n){const s=e,a=new Float32Array(n);return s.bindBuffer(s.PIXEL_PACK_BUFFER,t),s.getBufferSubData(s.PIXEL_PACK_BUFFER,0,a),s.bindBuffer(s.PIXEL_PACK_BUFFER,null),a}(this.gl,e,t)}createBufferFromTexture(e,t,n){this.bindTextureToFrameBuffer(e);const s=function(e,t,n,s){const a=e.createBuffer();C$(e,(()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,a)));const r=16*t*n;return C$(e,(()=>e.bufferData(e.PIXEL_PACK_BUFFER,r,e.STREAM_READ))),C$(e,(()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,0))),C$(e,(()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,null))),a}(this.gl,t,n,this.textureConfig);return this.unbindTextureToFrameBuffer(),s}createAndWaitForFence(){const e=this.createFence(this.gl);return this.pollFence(e)}createFence(e){let t,n;if(ue().getBool("WEBGL_FENCE_API_ENABLED")){const s=e,a=s.fenceSync(s.SYNC_GPU_COMMANDS_COMPLETE,0);e.flush(),n=()=>{const e=s.clientWaitSync(a,0,0);return e===s.ALREADY_SIGNALED||e===s.CONDITION_SATISFIED},t=a}else ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(t=this.beginQuery(),this.endQuery(),n=()=>this.isQueryAvailable(t,ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):n=()=>!0;return{query:t,isFencePassed:n}}downloadMatrixFromPackedTexture(e,t,n){return this.downloadMatrixDriver(e,(()=>function(e,t,n){const s=new Float32Array(t*n*4);return C$(e,(()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,s))),s}(this.gl,t,n)))}createProgram(e){this.throwIfDisposed();const t=this.gl,n=function(e,t){const n=M$(e,(()=>e.createShader(e.FRAGMENT_SHADER)),"Unable to create fragment WebGLShader.");if(C$(e,(()=>e.shaderSource(n,t))),C$(e,(()=>e.compileShader(n))),!1===e.getShaderParameter(n,e.COMPILE_STATUS))throw function(e,t){const n=E$.exec(t);if(null==n)return console.log(`Couldn't parse line number in error: ${t}`),void console.log(e);const s=+n[1],a=e.split("\n"),r=a.length.toString().length+2,i=a.map(((e,t)=>L((t+1).toString(),r)+e));let o=0;for(let e=0;e<i.length;e++)o=Math.max(i[e].length,o);const l=i.slice(0,s-1),u=i.slice(s-1,s),c=i.slice(s);console.log(l.join("\n")),console.log(t.split("\n")[0]),console.log(`%c ${L(u[0],o)}`,"border:1px solid red; background-color:#e3d2d2; color:#a61717"),console.log(c.join("\n"))}(t,e.getShaderInfoLog(n)),new Error("Failed to compile fragment shader.");return n}(t,e),s=function(e){const t=X$();return function(e,t){const n=M$(e,(()=>e.createShader(e.VERTEX_SHADER)),"Unable to create vertex WebGLShader.");if(C$(e,(()=>e.shaderSource(n,t))),C$(e,(()=>e.compileShader(n))),!1===e.getShaderParameter(n,e.COMPILE_STATUS))throw console.log(e.getShaderInfoLog(n)),new Error("Failed to compile vertex shader.");return n}(e,`${t.version}\n precision highp float;\n ${t.attribute} vec3 clipSpacePos;\n ${t.attribute} vec2 uv;\n ${t.varyingVs} vec2 resultUV;\n\n void main() {\n gl_Position = vec4(clipSpacePos, 1);\n resultUV = uv;\n }`)}(t),a=function(e){return M$(e,(()=>e.createProgram()),"Unable to create WebGLProgram.")}(t);return C$(t,(()=>t.attachShader(a,s))),C$(t,(()=>t.attachShader(a,n))),function(e,t){if(C$(e,(()=>e.linkProgram(t))),!1===e.getProgramParameter(t,e.LINK_STATUS))throw console.log(e.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}(t,a),this.debug&&A$(t,a),this.vertexAttrsAreBound||(this.setProgram(a),this.vertexAttrsAreBound=function(e,t,n){return C$(e,(()=>e.bindBuffer(e.ARRAY_BUFFER,n))),R$(e,t,"clipSpacePos",n,3,20,0)&&R$(e,t,"uv",n,2,20,12)}(t,this.program,this.vertexBuffer)),a}deleteProgram(e){this.throwIfDisposed(),e===this.program&&(this.program=null),null!=e&&C$(this.gl,(()=>this.gl.deleteProgram(e)))}setProgram(e){this.throwIfDisposed(),this.program=e,null!=this.program&&this.debug&&A$(this.gl,this.program),C$(this.gl,(()=>this.gl.useProgram(e)))}getUniformLocation(e,t,n=!0){return this.throwIfDisposed(),n?function(e,t,n){return M$(e,(()=>e.getUniformLocation(t,n)),'uniform "'+n+'" not present in program.')}(this.gl,e,t):function(e,t,n){return e.getUniformLocation(t,n)}(this.gl,e,t)}getAttributeLocation(e,t){return this.throwIfDisposed(),C$(this.gl,(()=>this.gl.getAttribLocation(e,t)))}getUniformLocationNoThrow(e,t){return this.throwIfDisposed(),this.gl.getUniformLocation(e,t)}setInputMatrixTexture(e,t,n){this.throwIfDisposed(),this.throwIfNoProgram(),F$(this.gl,e,t,n)}setOutputMatrixTexture(e,t,n){this.setOutputMatrixTextureDriver(e,n,t)}setOutputPackedMatrixTexture(e,t,n){this.throwIfDisposed();const[s,a]=I$(t,n);this.setOutputMatrixTextureDriver(e,s,a)}setOutputMatrixWriteRegion(e,t,n,s){this.setOutputMatrixWriteRegionDriver(n,e,s,t)}setOutputPackedMatrixWriteRegion(e,t,n,s){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){null!=this.program&&A$(this.gl,this.program),O$(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();const e=this.gl;this.debug&&this.debugValidate(),C$(e,(()=>e.drawElements(e.TRIANGLES,6,e.UNSIGNED_SHORT,0)))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),C$(this.gl,(()=>this.gl.finish()))}getQueryTimerExtension(){return null==this.disjointQueryTimerExtension&&(this.disjointQueryTimerExtension=T$(this.gl,2===ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(2===ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")){const e=this.gl,t=this.getQueryTimerExtensionWebGL2(),n=e.createQuery();return e.beginQuery(t.TIME_ELAPSED_EXT,n),n}const e=this.getQueryTimerExtensionWebGL1(),t=e.createQueryEXT();return e.beginQueryEXT(e.TIME_ELAPSED_EXT,t),t}endQuery(){if(2===ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")){const e=this.gl,t=this.getQueryTimerExtensionWebGL2();return void e.endQuery(t.TIME_ELAPSED_EXT)}const e=this.getQueryTimerExtensionWebGL1();e.endQueryEXT(e.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(e){return await z((()=>this.disposed||this.isQueryAvailable(e,ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")))),this.getQueryTime(e,ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(e,t){if(0===t)return null;if(2===t){const t=this.gl;return t.getQueryParameter(e,t.QUERY_RESULT)/1e6}{const t=this.getQueryTimerExtensionWebGL1();return t.getQueryObjectEXT(e,t.QUERY_RESULT_EXT)/1e6}}isQueryAvailable(e,t){if(0===t)return!0;if(2===t){const t=this.gl,n=this.getQueryTimerExtensionWebGL2(),s=t.getQueryParameter(e,t.QUERY_RESULT_AVAILABLE);return null==this.disjoint&&(this.disjoint=this.gl.getParameter(n.GPU_DISJOINT_EXT)),s&&!this.disjoint}{const t=this.getQueryTimerExtensionWebGL1(),n=t.getQueryObjectEXT(e,t.QUERY_RESULT_AVAILABLE_EXT);return null==this.disjoint&&(this.disjoint=this.gl.getParameter(t.GPU_DISJOINT_EXT)),n&&!this.disjoint}}pollFence(e){return new Promise((t=>{this.addItemToPoll((()=>e.isFencePassed()),(()=>t()))}))}pollItems(){const e=function(e){let t=0;for(;t<e.length&&e[t]();++t);return t-1}(this.itemsToPoll.map((e=>e.isDoneFn)));for(let t=0;t<=e;++t){const{resolveFn:e}=this.itemsToPoll[t];e()}this.itemsToPoll=this.itemsToPoll.slice(e+1)}addItemToPoll(e,t){this.itemsToPoll.push({isDoneFn:e,resolveFn:t}),this.itemsToPoll.length>1||z((()=>(this.pollItems(),0===this.itemsToPoll.length)))}bindTextureToFrameBuffer(e){this.throwIfDisposed(),D$(this.gl,e,this.framebuffer),this.debug&&O$(this.gl)}unbindTextureToFrameBuffer(){null!=this.outputTexture?(D$(this.gl,this.outputTexture,this.framebuffer),this.debug&&O$(this.gl)):_$(this.gl,this.framebuffer)}downloadMatrixDriver(e,t){this.bindTextureToFrameBuffer(e);const n=t();return this.unbindTextureToFrameBuffer(),n}setOutputMatrixTextureDriver(e,t,n){this.throwIfDisposed();const s=this.gl;D$(s,e,this.framebuffer),this.debug&&O$(s),this.outputTexture=e,C$(s,(()=>s.viewport(0,0,t,n))),C$(s,(()=>s.scissor(0,0,t,n)))}setOutputMatrixWriteRegionDriver(e,t,n,s){this.throwIfDisposed(),C$(this.gl,(()=>this.gl.scissor(e,t,n,s)))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(null==this.program)throw new Error("No GPU program is currently set.")}}const{getBroadcastDims:pC}=a;function dC(e,t,n,s){const a=[];e.forEach((e=>{const t=D(e.shapeInfo.logicalShape);e.shapeInfo.isUniform?a.push(`uniform float ${e.name}${t>1?`[${t}]`:""};`):(a.push(`uniform sampler2D ${e.name};`),a.push(`uniform int offset${e.name};`))}));const r=a.join("\n"),i=e.map((e=>function(e,t,n=!1){let s="";s+=n?mC(e):fC(e);const a=e.shapeInfo.logicalShape,r=t.logicalShape;return a.length<=r.length&&(s+=n?function(e,t){const n=e.name,s=n.charAt(0).toUpperCase()+n.slice(1),a="get"+s+"AtOutCoords",r=e.shapeInfo.logicalShape.length,i=t.logicalShape.length,o=pC(e.shapeInfo.logicalShape,t.logicalShape),l=vC(i),u=i-r;let c;const h=["x","y","z","w","u","v"];c=0===r?"":i<2&&o.length>=1?"coords = 0;":o.map((e=>`coords.${h[e+u]} = 0;`)).join("\n");let p="";p=i<2&&r>0?"coords":e.shapeInfo.logicalShape.map(((e,t)=>`coords.${h[t+u]}`)).join(", ");let d="return outputValue;";const f=1===D(e.shapeInfo.logicalShape),m=1===D(t.logicalShape);if(1!==r||f||m){if(f&&!m)d=1===i?"\n return vec4(outputValue.x, outputValue.x, 0., 0.);\n ":"\n return vec4(outputValue.x);\n ";else if(o.length){const e=r-2,t=r-1;o.indexOf(e)>-1&&o.indexOf(t)>-1?d="return vec4(outputValue.x);":o.indexOf(e)>-1?d="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":o.indexOf(t)>-1&&(d="return vec4(outputValue.xx, outputValue.zz);")}}else d="\n return vec4(outputValue.xy, outputValue.xy);\n ";return`\n vec4 ${a}() {\n ${l} coords = getOutputCoords();\n ${c}\n vec4 outputValue = get${s}(${p});\n ${d}\n }\n `}(e,t):function(e,t){const n=e.name,s=n.charAt(0).toUpperCase()+n.slice(1),a="get"+s+"AtOutCoords",r=t.texShape,i=e.shapeInfo.texShape,o=e.shapeInfo.logicalShape.length,l=t.logicalShape.length;if(!e.shapeInfo.isUniform&&o===l&&null==e.shapeInfo.flatOffset&&_(i,r))return`\n float ${a}() {\n return sampleTexture(${n}, resultUV);\n }\n `;const u=vC(l),c=pC(e.shapeInfo.logicalShape,t.logicalShape),h=l-o;let p;const d=["x","y","z","w","u","v"];p=0===o?"":l<2&&c.length>=1?"coords = 0;":c.map((e=>`coords.${d[e+h]} = 0;`)).join("\n");let f="";return f=l<2&&o>0?"coords":e.shapeInfo.logicalShape.map(((e,t)=>`coords.${d[t+h]}`)).join(", "),`\n float ${a}() {\n ${u} coords = getOutputCoords();\n ${p}\n return get${s}(${f});\n }\n `}(e,t)),s}(e,t,s))).join("\n"),o=t.texShape,l=X$(),u=function(e){return`\n float sampleTexture(sampler2D textureSampler, vec2 uv) {\n return ${e.texture2D}(textureSampler, uv).r;\n }\n `}(l);let c,h,p=function(e){return`${e.version}\n precision highp float;\n precision highp int;\n precision highp sampler2D;\n ${e.varyingFs} vec2 resultUV;\n ${e.defineOutput}\n const vec2 halfCR = vec2(0.5, 0.5);\n\n struct ivec5\n {\n int x;\n int y;\n int z;\n int w;\n int u;\n };\n\n struct ivec6\n {\n int x;\n int y;\n int z;\n int w;\n int u;\n int v;\n };\n\n uniform float NAN;\n ${e.defineSpecialNaN}\n ${e.defineSpecialInf}\n ${e.defineRound}\n\n int imod(int x, int y) {\n return x - y * (x / y);\n }\n\n int idiv(int a, int b, float sign) {\n int res = a / b;\n int mod = imod(a, b);\n if (sign < 0. && mod != 0) {\n res -= 1;\n }\n return res;\n }\n\n //Based on the work of Dave Hoskins\n //https://www.shadertoy.com/view/4djSRW\n #define HASHSCALE1 443.8975\n float random(float seed){\n vec2 p = resultUV * seed;\n vec3 p3 = fract(vec3(p.xyx) * HASHSCALE1);\n p3 += dot(p3, p3.yzx + 19.19);\n return fract((p3.x + p3.y) * p3.z);\n }\n\n ${gC}\n ${yC}\n ${bC}\n `}(l);return t.isPacked?(c=function(e,t){switch(e.length){case 0:return"\n int getOutputCoords() {\n return 0;\n }\n ";case 1:return function(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)];return 1===n[0]?`\n int getOutputCoords() {\n return 2 * int(resultUV.x * ${n[1]}.0);\n }\n `:1===n[1]?`\n int getOutputCoords() {\n return 2 * int(resultUV.y * ${n[0]}.0);\n }\n `:`\n int getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${n[0]}, ${n[1]}));\n return 2 * (resTexRC.x * ${n[1]} + resTexRC.y);\n }\n `}(0,t);case 2:return function(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)];if(_(e,t))return`\n ivec2 getOutputCoords() {\n return 2 * ivec2(resultUV.yx * vec2(${n[0]}, ${n[1]}));\n }\n `;const s=Math.ceil(e[1]/2);return`\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${n[0]}, ${n[1]}));\n\n int index = resTexRC.x * ${n[1]} + resTexRC.y;\n int r = 2 * (index / ${s});\n int c = imod(index, ${s}) * 2;\n\n return ivec2(r, c);\n }\n `}(e,t);case 3:return function(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[2]/2),a=s*Math.ceil(e[1]/2);return`\n ivec3 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${n[0]}, ${n[1]}));\n int index = resTexRC.x * ${n[1]} + resTexRC.y;\n\n int b = index / ${a};\n index -= b * ${a};\n\n int r = 2 * (index / ${s});\n int c = imod(index, ${s}) * 2;\n\n return ivec3(b, r, c);\n }\n `}(e,t);default:return function(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[e.length-1]/2),a=s*Math.ceil(e[e.length-2]/2);let r=a,i="",o="b, r, c";for(let t=2;t<e.length-1;t++)r*=e[e.length-t-1],i=`\n int b${t} = index / ${r};\n index -= b${t} * ${r};\n `+i,o=`b${t}, `+o;return`\n ivec${e.length} getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${n[0]}, ${n[1]}));\n int index = resTexRC.x * ${n[1]} + resTexRC.y;\n\n ${i}\n\n int b = index / ${a};\n index -= b * ${a};\n\n int r = 2 * (index / ${s});\n int c = imod(index, ${s}) * 2;\n\n return ivec${e.length}(${o});\n }\n `}(e,t)}}(t.logicalShape,o),h=function(e){return`\n void setOutput(vec4 val) {\n ${e.output} = val;\n }\n `}(l)):(c=function(e,t){switch(e.length){case 0:return"\n int getOutputCoords() {\n return 0;\n }\n ";case 1:return 1===(n=t)[0]?`\n int getOutputCoords() {\n return int(resultUV.x * ${n[1]}.0);\n }\n `:1===n[1]?`\n int getOutputCoords() {\n return int(resultUV.y * ${n[0]}.0);\n }\n `:`\n int getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${n[0]}, ${n[1]}));\n return resTexRC.x * ${n[1]} + resTexRC.y;\n }\n `;case 2:return function(e,t){return _(e,t)?`\n ivec2 getOutputCoords() {\n return ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]}));\n }\n `:1===e[1]?`\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n return ivec2(index, 0);\n }\n `:1===e[0]?`\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n return ivec2(0, index);\n }\n `:`\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n int r = index / ${e[1]};\n int c = index - r * ${e[1]};\n return ivec2(r, c);\n }\n `}(e,t);case 3:return function(e,t){const n=Y$(["r","c","d"],e);return`\n ivec3 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n ${n}\n return ivec3(r, c, d);\n }\n `}(e,t);case 4:return function(e,t){const n=Y$(["r","c","d","d2"],e);return`\n ivec4 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n ${n}\n return ivec4(r, c, d, d2);\n }\n `}(e,t);case 5:return function(e,t){const n=Y$(["r","c","d","d2","d3"],e);return`\n ivec5 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]},\n ${t[1]}));\n\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n\n ${n}\n\n ivec5 outShape = ivec5(r, c, d, d2, d3);\n return outShape;\n }\n `}(e,t);case 6:return function(e,t){const n=Y$(["r","c","d","d2","d3","d4"],e);return`\n ivec6 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${t[0]}, ${t[1]}));\n int index = resTexRC.x * ${t[1]} + resTexRC.y;\n\n ${n}\n\n ivec6 result = ivec6(r, c, d, d2, d3, d4);\n return result;\n }\n `}(e,t);default:throw new Error(`${e.length}-D output sampling is not yet supported`)}var n}(t.logicalShape,o),h=function(e){return`\n void setOutput(float val) {\n ${e.output} = vec4(val, 0, 0, 0);\n }\n `}(l)),s&&(p+=xC),[p,u,h,r,c,i,n].join("\n")}function fC(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return function(e){const t=e.name,n="get"+t.charAt(0).toUpperCase()+t.slice(1);if(e.shapeInfo.isUniform)return`float ${n}() {return ${t};}`;const[s,a]=e.shapeInfo.texShape;if(1===s&&1===a)return`\n float ${n}() {\n return sampleTexture(${t}, halfCR);\n }\n `;const[r,i]=e.shapeInfo.texShape;return`\n float ${n}() {\n vec2 uv = uvFromFlat(${r}, ${i}, ${wC(t)});\n return sampleTexture(${t}, uv);\n }\n `}(e);case 1:return function(e){const t=e.name,n="get"+t.charAt(0).toUpperCase()+t.slice(1);if(e.shapeInfo.isUniform)return`\n float ${n}(int index) {\n ${kC(e)}\n }\n `;const s=e.shapeInfo.texShape,a=s[0],r=s[1];if(1===r&&1===a)return`\n float ${n}(int index) {\n return sampleTexture(${t}, halfCR);\n }\n `;const i=wC(t);return 1===r?`\n float ${n}(int index) {\n vec2 uv = vec2(0.5, (float(index + ${i}) + 0.5) / ${a}.0);\n return sampleTexture(${t}, uv);\n }\n `:1===a?`\n float ${n}(int index) {\n vec2 uv = vec2((float(index + ${i}) + 0.5) / ${r}.0, 0.5);\n return sampleTexture(${t}, uv);\n }\n `:`\n float ${n}(int index) {\n vec2 uv = uvFromFlat(${a}, ${r}, index + ${i});\n return sampleTexture(${t}, uv);\n }\n `}(e);case 2:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),a=e.shapeInfo.texShape;if(null!=a&&_(t,a)){const e=a[0];return`\n float ${s}(int row, int col) {\n vec2 uv = (vec2(col, row) + halfCR) / vec2(${a[1]}.0, ${e}.0);\n return sampleTexture(${n}, uv);\n }\n `}const{newShape:r,keptDims:i}=W(t),o=r;if(o.length<t.length){const t=["row","col"];return`\n ${fC(NC(e,o))}\n float ${s}(int row, int col) {\n return ${s}(${IC(t,i)});\n }\n `}if(e.shapeInfo.isUniform)return`\n float ${s}(int row, int col) {\n int index = round(dot(vec2(row, col), vec2(${t[1]}, 1)));\n ${kC(e)}\n }\n `;const l=a[0],u=a[1],c=wC(n);return 1===u?`\n float ${s}(int row, int col) {\n float index = dot(vec3(row, col, ${c}), vec3(${t[1]}, 1, 1));\n vec2 uv = vec2(0.5, (index + 0.5) / ${l}.0);\n return sampleTexture(${n}, uv);\n }\n `:1===l?`\n float ${s}(int row, int col) {\n float index = dot(vec3(row, col, ${c}), vec3(${t[1]}, 1, 1));\n vec2 uv = vec2((index + 0.5) / ${u}.0, 0.5);\n return sampleTexture(${n}, uv);\n }\n `:`\n float ${s}(int row, int col) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${t[1]} + col + ${c};\n vec2 uv = uvFromFlat(${l}, ${u}, index);\n return sampleTexture(${n}, uv);\n }\n`}(e);case 3:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),a=t[1]*t[2],r=t[2],{newShape:i,keptDims:o}=W(t),l=i;if(l.length<t.length){const t=["row","col","depth"];return`\n ${fC(NC(e,l))}\n float ${s}(int row, int col, int depth) {\n return ${s}(${IC(t,o)});\n }\n `}if(e.shapeInfo.isUniform)return`\n float ${s}(int row, int col, int depth) {\n int index = round(dot(vec3(row, col, depth),\n vec3(${a}, ${r}, 1)));\n ${kC(e)}\n }\n `;const u=e.shapeInfo.texShape,c=u[0],h=u[1],p=e.shapeInfo.flatOffset;if(h===a&&null==p)return`\n float ${s}(int row, int col, int depth) {\n float texR = float(row);\n float texC = dot(vec2(col, depth), vec2(${r}, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${h}.0, ${c}.0);\n return sampleTexture(${n}, uv);\n }\n `;if(h===r&&null==p)return`\n float ${s}(int row, int col, int depth) {\n float texR = dot(vec2(row, col), vec2(${t[1]}, 1));\n float texC = float(depth);\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${h}.0, ${c}.0);\n return sampleTexture(${n}, uv);\n }\n `;return`\n float ${s}(int row, int col, int depth) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${a} + col * ${r} + depth + ${wC(n)};\n vec2 uv = uvFromFlat(${c}, ${h}, index);\n return sampleTexture(${n}, uv);\n }\n `}(e);case 4:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),a=t[3],r=t[2]*a,i=t[1]*r,{newShape:o,keptDims:l}=W(t);if(o.length<t.length){const t=["row","col","depth","depth2"];return`\n ${fC(NC(e,o))}\n float ${s}(int row, int col, int depth, int depth2) {\n return ${s}(${IC(t,l)});\n }\n `}if(e.shapeInfo.isUniform)return`\n float ${s}(int row, int col, int depth, int depth2) {\n int index = round(dot(vec4(row, col, depth, depth2),\n vec4(${i}, ${r}, ${a}, 1)));\n ${kC(e)}\n }\n `;const u=e.shapeInfo.flatOffset,c=e.shapeInfo.texShape,h=c[0],p=c[1];if(p===i&&null==u)return`\n float ${s}(int row, int col, int depth, int depth2) {\n float texR = float(row);\n float texC =\n dot(vec3(col, depth, depth2),\n vec3(${r}, ${a}, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${p}.0, ${h}.0);\n return sampleTexture(${n}, uv);\n }\n `;if(p===a&&null==u)return`\n float ${s}(int row, int col, int depth, int depth2) {\n float texR = dot(vec3(row, col, depth),\n vec3(${t[1]*t[2]}, ${t[2]}, 1));\n float texC = float(depth2);\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${p}.0, ${h}.0);\n return sampleTexture(${n}, uv);\n }\n `;return`\n float ${s}(int row, int col, int depth, int depth2) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${i} + col * ${r} +\n depth * ${a} + depth2;\n vec2 uv = uvFromFlat(${h}, ${p}, index + ${wC(n)});\n return sampleTexture(${n}, uv);\n }\n `}(e);case 5:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),a=t[4],r=t[3]*a,i=t[2]*r,o=t[1]*i,{newShape:l,keptDims:u}=W(t);if(l.length<t.length){const t=["row","col","depth","depth2","depth3"];return`\n ${fC(NC(e,l))}\n float ${s}(int row, int col, int depth, int depth2, int depth3) {\n return ${s}(${IC(t,u)});\n }\n `}if(e.shapeInfo.isUniform)return`\n float ${s}(int row, int col, int depth, int depth2, int depth3) {\n float index = dot(\n vec4(row, col, depth, depth2),\n vec4(${o}, ${i}, ${r}, ${a})) +\n depth3;\n ${kC(e)}\n }\n `;const c=e.shapeInfo.flatOffset,h=e.shapeInfo.texShape,p=h[0],d=h[1];if(d===o&&null==c)return`\n float ${s}(int row, int col, int depth, int depth2, int depth3) {\n int texR = row;\n float texC = dot(vec4(col, depth, depth2, depth3),\n vec4(${i}, ${r}, ${a}, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${d}.0, ${p}.0);\n return sampleTexture(${n}, uv);\n }\n `;if(d===a&&null==c)return`\n float ${s}(int row, int col, int depth, int depth2, int depth3) {\n float texR = dot(\n vec4(row, col, depth, depth2),\n vec4(${t[1]*t[2]*t[3]},\n ${t[2]*t[3]}, ${t[3]}, 1));\n int texC = depth3;\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${d}.0, ${p}.0);\n return sampleTexture(${n}, uv);\n }\n `;return`\n float ${s}(int row, int col, int depth, int depth2, int depth3) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${o} + col * ${i} + depth * ${r} +\n depth2 * ${a} + depth3 + ${wC(n)};\n vec2 uv = uvFromFlat(${p}, ${d}, index);\n return sampleTexture(${n}, uv);\n }\n `}(e);case 6:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),{newShape:a,keptDims:r}=W(t);if(a.length<t.length){const t=["row","col","depth","depth2","depth3","depth4"];return`\n ${fC(NC(e,a))}\n float ${s}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n return ${s}(${IC(t,r)});\n }\n `}const i=t[5],o=t[4]*i,l=t[3]*o,u=t[2]*l,c=t[1]*u;if(e.shapeInfo.isUniform)return`\n float ${s}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n int index = round(dot(\n vec4(row, col, depth, depth2),\n vec4(${c}, ${u}, ${l}, ${o})) +\n dot(\n vec2(depth3, depth4),\n vec2(${i}, 1)));\n ${kC(e)}\n }\n `;const h=e.shapeInfo.flatOffset,p=e.shapeInfo.texShape,d=p[0],f=p[1];if(f===c&&null==h)return`\n float ${s}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n int texR = row;\n float texC = dot(vec4(col, depth, depth2, depth3),\n vec4(${u}, ${l}, ${o}, ${i})) +\n float(depth4);\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${f}.0, ${d}.0);\n return sampleTexture(${n}, uv);\n }\n `;if(f===i&&null==h)return`\n float ${s}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n float texR = dot(vec4(row, col, depth, depth2),\n vec4(${t[1]*t[2]*t[3]*t[4]},\n ${t[2]*t[3]*t[4]},\n ${t[3]*t[4]},\n ${t[4]})) + float(depth3);\n int texC = depth4;\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${f}.0, ${d}.0);\n return sampleTexture(${n}, uv);\n }\n `;return`\n float ${s}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${c} + col * ${u} + depth * ${l} +\n depth2 * ${o} + depth3 * ${i} + depth4 + ${wC(n)};\n vec2 uv = uvFromFlat(${d}, ${f}, index);\n return sampleTexture(${n}, uv);\n }\n `}(e);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function mC(e){switch(e.shapeInfo.logicalShape.length){case 0:return function(e){const t=e.name;return`\n vec4 ${"get"+t.charAt(0).toUpperCase()+t.slice(1)}() {\n return ${X$().texture2D}(${t}, halfCR);\n }\n `}(e);case 1:return function(e){const t=e.name,n="get"+t.charAt(0).toUpperCase()+t.slice(1),s=e.shapeInfo.texShape,a=[Math.ceil(s[0]/2),Math.ceil(s[1]/2)],r=X$();return`\n vec4 ${n}(int index) {\n vec2 uv = packedUVfrom1D(\n ${a[0]}, ${a[1]}, index);\n return ${r.texture2D}(${t}, uv);\n }\n `}(e);case 2:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),a=e.shapeInfo.texShape,r=a[0],i=a[1],o=X$();if(null!=a&&_(t,a))return`\n vec4 ${s}(int row, int col) {\n vec2 uv = (vec2(col, row) + halfCR) / vec2(${i}.0, ${r}.0);\n\n return ${o.texture2D}(${n}, uv);\n }\n `;const l=[Math.ceil(a[0]/2),Math.ceil(a[1]/2)];return`\n vec4 ${s}(int row, int col) {\n vec2 uv = packedUVfrom2D(${Math.ceil(t[1]/2)}, ${l[0]}, ${l[1]}, row, col);\n return ${o.texture2D}(${n}, uv);\n }\n `}(e);case 3:return function(e){const t=e.shapeInfo.logicalShape,n=e.name,s="get"+n.charAt(0).toUpperCase()+n.slice(1),a=e.shapeInfo.texShape,r=[Math.ceil(a[0]/2),Math.ceil(a[1]/2)];if(1===t[0]){const n=[1,2],a=["b","row","col"];return`\n ${mC(NC(e,t.slice(1)))}\n vec4 ${s}(int b, int row, int col) {\n return ${s}(${IC(a,n)});\n }\n `}const i=r[0],o=r[1],l=Math.ceil(t[2]/2);return`\n vec4 ${s}(int b, int row, int col) {\n vec2 uv = packedUVfrom3D(\n ${i}, ${o}, ${l*Math.ceil(t[1]/2)}, ${l}, b, row, col);\n return ${X$().texture2D}(${n}, uv);\n }\n `}(e);default:return function(e){const t=e.shapeInfo.logicalShape,n=t.length,s=e.name,a="get"+s.charAt(0).toUpperCase()+s.slice(1),r=e.shapeInfo.texShape,i=[Math.ceil(r[0]/2),Math.ceil(r[1]/2)],o=i[0],l=i[1],u=Math.ceil(t[n-1]/2);let c=u*Math.ceil(t[n-2]/2),h="int b, int row, int col",p=`b * ${c} + (row / 2) * ${u} + (col / 2)`;for(let e=2;e<n-1;e++)h=`int b${e}, `+h,c*=t[n-e-1],p=`b${e} * ${c} + `+p;return`\n vec4 ${a}(${h}) {\n int index = ${p};\n int texR = index / ${l};\n int texC = index - texR * ${l};\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${l}, ${o});\n return ${X$().texture2D}(${s}, uv);\n }\n `}(e)}}const gC="\nvec2 uvFromFlat(int texNumR, int texNumC, int index) {\n int texR = index / texNumC;\n int texC = index - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\nvec2 packedUVfrom1D(int texNumR, int texNumC, int index) {\n int texelIndex = index / 2;\n int texR = texelIndex / texNumC;\n int texC = texelIndex - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\n",yC="\nvec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR,\n int texNumC, int row, int col) {\n int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2);\n int texR = texelIndex / texNumC;\n int texC = texelIndex - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\n",bC="\nvec2 packedUVfrom3D(int texNumR, int texNumC,\n int texelsInBatch, int texelsInLogicalRow, int b,\n int row, int col) {\n int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2);\n int texR = index / texNumC;\n int texC = index - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\n",xC="\n float getChannel(vec4 frag, vec2 innerDims) {\n vec2 modCoord = mod(innerDims, 2.);\n return modCoord.x == 0. ?\n (modCoord.y == 0. ? frag.r : frag.g) :\n (modCoord.y == 0. ? frag.b : frag.a);\n }\n float getChannel(vec4 frag, int dim) {\n float modCoord = mod(float(dim), 2.);\n return modCoord == 0. ? frag.r : frag.g;\n }\n";function wC(e){return`offset${e}`}function kC(e){const t=e.name,n=D(e.shapeInfo.logicalShape);return n<2?`return ${t};`:`\n for (int i = 0; i < ${n}; i++) {\n if (i == index) {\n return ${t}[i];\n }\n }\n `}function vC(e){if(e<=1)return"int";if(2===e)return"ivec2";if(3===e)return"ivec3";if(4===e)return"ivec4";if(5===e)return"ivec5";if(6===e)return"ivec6";throw Error(`GPU for rank ${e} is not yet supported`)}function NC(e,t){const n=JSON.parse(JSON.stringify(e));return n.shapeInfo.logicalShape=t,n}function IC(e,t){return t.map((t=>e[t])).join(", ")}function $C(e,t){if(e.length!==t.length)throw Error(`Binary was compiled with ${e.length} inputs, but was executed with ${t.length} inputs`);e.forEach(((e,n)=>{const s=e.logicalShape,a=t[n],r=a.shape;if(!_(s,r))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${s} and ${r} must match`);if(e.isUniform&&a.isUniform)return;const i=e.texShape,o=a.isUniform?null:a.texData.texShape;if(!_(i,o))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${i} and ${o} must match`)}))}const{addImpl:CC,bincountImpl:SC,bincountReduceImpl:TC,ceilImpl:EC,concatImpl:AC,expImpl:RC,expm1Impl:FC,floorImpl:DC,gatherV2Impl:_C,greaterImpl:OC,lessImpl:MC,linSpaceImpl:LC,logImpl:zC,maxImpl:PC,maximumImpl:BC,minimumImpl:WC,multiplyImpl:VC,negImpl:UC,prodImpl:GC,rangeImpl:HC,rsqrtImpl:jC,simpleAbsImpl:KC,sliceImpl:qC,stridedSliceImpl:XC,subImpl:YC,tileImpl:JC,topKImpl:ZC,transposeImpl:QC,uniqueImpl:eS}=v;function tS(e,t){return["x","y","z","w","u","v"].slice(0,t).map((t=>`${e}.${t}`))}function nS(e,t){return 1===t?[e]:tS(e,t)}class sS{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outputShape=e;const t=e.length;if(0===t)this.userCode="\n void main() {\n setOutput(vec4(getA(), 0., 0., 0.));\n }\n ";else{const n=nS("rc",t),s=vC(t),a=function(e,t,n){if(1===e)return`rc > ${t[0]}`;let s="";for(let a=e-2;a<e;a++)s+=`${n[a]} >= ${t[a]}`,a<e-1&&(s+="||");return s}(t,e,n),r=function(e,t,n,s){if(1===e)return"";const a=s.slice(-2);return`\n int r = ${a[0]};\n int c = ${a[1]};\n int rp1 = r + 1;\n int cp1 = c + 1;\n\n bool cEdge = cp1 >= ${t};\n bool rEdge = rp1 >= ${n};\n `}(t,e[e.length-1],e[e.length-2],n),i=function(e,t){const n=e.length,s=function(e,t){const n=[];for(let s=0;s<=1;s++)for(let a=0;a<=1;a++){let r=`${0===s?"r":"rp1"}, ${0===a?"c":"cp1"}`;for(let n=2;n<e;n++)r=`${t[t.length-1-n]},`+r;n.push(r)}return n}(n,t);return 1===n?`getA(rc),\n rc + 1 >= ${e[0]} ? 0. : getA(rc + 1),\n 0, 0`:`getA(${s[0]}),\n cEdge ? 0. : getA(${s[1]}),\n rEdge ? 0. : getA(${s[2]}),\n rEdge || cEdge ? 0. : getA(${s[3]})`}(e,n);this.userCode=`\n void main() {\n ${s} rc = getOutputCoords();\n\n if(${a}) {\n setOutput(vec4(0));\n } else {\n ${r}\n\n setOutput(vec4(${i}));\n }\n }\n `}}}class aS{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;let n="";for(let e=0;e<4;e++){let t="thisRC = rc;";e%2==1&&(t+="thisRC.z += 1;"),e>1&&(t+="thisRC.y += 1;"),n+=`\n ${t}\n ${e>0?"if(thisRC.y < rows && thisRC.z < cols){":""}\n int flatIndex = getFlatIndex(thisRC);\n\n ivec3 inputRC = inputCoordsFromReshapedOutCoords(flatIndex);\n vec2 inputRCInnerDims = vec2(float(inputRC.y),float(inputRC.z));\n\n result[${e}] =\n getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims);\n ${e>0?"}":""}\n `}var s;this.userCode=`\n ${s=t,`\n ivec3 inputCoordsFromReshapedOutCoords(int index) {\n ${Y$(["r","c","d"],s)}\n return ivec3(r, c, d);\n }\n `}\n ${J$(e)}\n\n void main() {\n ivec3 rc = getOutputCoords();\n\n vec4 result = vec4(0.);\n\n ivec3 thisRC;\n int rows = ${e[1]};\n int cols = ${e[2]};\n\n ${n}\n\n setOutput(result);\n }\n `}}class rS{constructor(e){this.gpgpu=e,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0,this.freeTextures={},this.logEnabled=!1,this.usedTextures={}}acquireTexture(e,t,n){const s=oS(t,n),a=lS(e,s,n);a in this.freeTextures||(this.freeTextures[a]=[]),a in this.usedTextures||(this.usedTextures[a]=[]);const r=iS(e,s,this.gpgpu.gl,this.gpgpu.textureConfig,n);if(this.freeTextures[a].length>0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=r,this.log();const e=this.freeTextures[a].shift();return this.usedTextures[a].push(e),e}let i;return s===k$.PACKED_2X2_FLOAT32?i=this.gpgpu.createPackedMatrixTexture(e[0],e[1]):s===k$.PACKED_2X2_FLOAT16?i=this.gpgpu.createFloat16PackedMatrixTexture(e[0],e[1]):s===k$.UNPACKED_FLOAT32?i=this.gpgpu.createFloat32MatrixTexture(e[0],e[1]):s===k$.UNPACKED_FLOAT16?i=this.gpgpu.createFloat16MatrixTexture(e[0],e[1]):s===k$.PACKED_4X1_UNSIGNED_BYTE&&(i=this.gpgpu.createUnsignedBytesMatrixTexture(e[0],e[1])),this.usedTextures[a].push(i),this.numUsedTextures++,this._numBytesAllocated+=r,this.log(),i}releaseTexture(e,t,n,s){if(null==this.freeTextures)return;const a=oS(n,s),r=lS(t,a,s);r in this.freeTextures||(this.freeTextures[r]=[]);const i=iS(t,a,this.gpgpu.gl,this.gpgpu.textureConfig,s),o=ue().get("WEBGL_DELETE_TEXTURE_THRESHOLD");-1!==o&&this._numBytesAllocated>o?(this.gpgpu.deleteMatrixTexture(e),this._numBytesAllocated-=i):(this.freeTextures[r].push(e),this.numFreeTextures++,this._numBytesFree+=i),this.numUsedTextures--;const l=this.usedTextures[r],u=l.indexOf(e);if(u<0)throw new Error("Cannot release a texture that was never provided by this texture manager");l.splice(u,1),this.log()}log(){if(!this.logEnabled)return;const e=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${e})`);const t=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*t)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(null!=this.freeTextures){for(const e in this.freeTextures)this.freeTextures[e].forEach((e=>{this.gpgpu.deleteMatrixTexture(e)}));for(const e in this.usedTextures)this.usedTextures[e].forEach((e=>{this.gpgpu.deleteMatrixTexture(e)}));this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}}function iS(e,t,n,s,a){const r=function(e,t){switch(e){case k$.PACKED_2X2_FLOAT32:return uC(t);case k$.PACKED_2X2_FLOAT16:return cC(t);case k$.UNPACKED_FLOAT32:return iC(t);case k$.UNPACKED_FLOAT16:return oC(t);case k$.PACKED_4X1_UNSIGNED_BYTE:return lC(t);default:throw new Error(`Unknown physical texture type ${e}`)}}(t,s);let i;if(a){const[t,n]=I$(e[0],e[1]);i=t*n}else{const[t,n]=v$(e[0],e[1]);i=t*n}return i*function(e,t){const n=e;if(t===n.R32F)return 4;if(t===n.R16F)return 2;if(t===n.RGBA32F)return 16;if(t===e.RGBA)return 16;if(t===n.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}(n,r)}function oS(e,t){if(e===w$.UPLOAD)return k$.PACKED_2X2_FLOAT32;if(e===w$.RENDER||null==e)return function(e){return ue().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?e?k$.PACKED_2X2_FLOAT32:k$.UNPACKED_FLOAT32:e?k$.PACKED_2X2_FLOAT16:k$.UNPACKED_FLOAT16}(t);if(e===w$.DOWNLOAD||e===w$.PIXELS)return k$.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${e}`)}function lS(e,t,n){return`${e[0]}_${e[1]}_${t}_${n}`}class uS{constructor(e,t){this.variableNames=["A"],this.outputShape=e,this.userCode=`\n float unaryOperation(float x) {\n ${t}\n }\n\n void main() {\n float x = getAAtOutCoords();\n float y = unaryOperation(x);\n\n setOutput(y);\n }\n `}}const cS="return abs(x);",hS="return x;";class pS{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=`\n vec4 unaryOperation(vec4 x) {\n ${t}\n }\n\n void main() {\n vec4 x = getAAtOutCoords();\n vec4 y = unaryOperation(x);\n\n setOutput(y);\n }\n `}}class dS{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outputShape=e;const t=e.length,n=nS("rc",t),s=vC(t),a=function(e,t){if(1===e)return"rc";let n="";for(let s=0;s<e;s++)n+=t[s],s<e-1&&(n+=",");return n}(t,n),r=n.slice(-2),i=t<=1?"rc":`vec2(${r.join(",")})`;this.userCode=`\n void main() {\n ${s} rc = getOutputCoords();\n vec4 packedInput = getA(${a});\n\n setOutput(getChannel(packedInput, ${i}));\n }\n `}}const fS=Wu,mS={};class gS extends I{constructor(e){if(super(),this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.lastGlFlushTime=0,this.warnedAboutMemory=!1,this.warnedAboutCPUBackend=!1,this.pendingDeletes=0,this.disposed=!1,!ue().getBool("HAS_WEBGL"))throw new Error("WebGL is not supported on this device");if(null==e){const e=b$(ue().getNumber("WEBGL_VERSION"));this.binaryCache=((t=ue().getNumber("WEBGL_VERSION"))in mS||(mS[t]={}),mS[t]),this.gpgpu=new hC(e),this.canvas=e.canvas,this.gpgpuCreatedLocally=!0}else this.gpgpu=e,this.binaryCache={},this.gpgpuCreatedLocally=!1,this.canvas=e.gl.canvas;var t;this.textureManager=new rS(this.gpgpu),this.numMBBeforeWarning=null==ue().global.screen?1024:ue().global.screen.height*ue().global.screen.width*window.devicePixelRatio*600/1024/1024,this.texData=new N(this,Ir())}nextDataId(){return gS.nextDataId++}numDataIds(){return this.texData.numDataIds()+(this.cpuBackend?this.cpuBackend.numDataIds():0)-this.pendingDeletes}write(e,t,n){if((ue().getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||ue().getBool("DEBUG"))&&this.checkNumericalProblems(e),"complex64"===n&&null!=e)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");const s={id:this.nextDataId()};return this.texData.set(s,{shape:t,dtype:n,values:e,usage:w$.UPLOAD,refCount:1}),s}refCount(e){return this.texData.has(e)?this.texData.get(e).refCount:0}incRef(e){this.texData.get(e).refCount++}decRef(e){this.texData.has(e)&&this.texData.get(e).refCount--}move(e,t,n,s,a){if(ue().getBool("DEBUG")&&this.checkNumericalProblems(t),"complex64"===s)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(e,{shape:n,dtype:s,values:t,usage:w$.UPLOAD,refCount:a})}disposeIntermediateTensorInfo(e){this.disposeData(e.dataId)}readSync(e){const t=this.texData.get(e),{values:n,dtype:s,complexTensorInfos:a,slice:r,shape:i,isPacked:o}=t;if(null!=r){let t;t=o?new pS(i,hS):new uS(i,hS);const n=this.runWebGLProgram(t,[{dataId:e,shape:i,dtype:s}],s),a=this.readSync(n.dataId);return this.disposeIntermediateTensorInfo(n),a}if(null!=n)return this.convertAndCacheOnCPU(e);if("string"===s)return n;const l=null!=this.activeTimers;let u,c;return l&&(u=bs()),c="complex64"===s?Su(this.readSync(a.real.dataId),this.readSync(a.imag.dataId)):this.getValuesFromTexture(e),l&&(this.downloadWaitMs+=bs()-u),this.convertAndCacheOnCPU(e,c)}async read(e){if(this.pendingRead.has(e)){const t=this.pendingRead.get(e);return new Promise((e=>t.push(e)))}const t=this.texData.get(e),{values:n,shape:s,slice:a,dtype:r,complexTensorInfos:i,isPacked:o}=t;if(null!=a){let t;t=o?new pS(s,hS):new uS(s,hS);const n=this.runWebGLProgram(t,[{dataId:e,shape:s,dtype:r}],r),a=this.read(n.dataId);return this.disposeIntermediateTensorInfo(n),a}if(null!=n)return this.convertAndCacheOnCPU(e);if(!ue().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&2===ue().getNumber("WEBGL_VERSION"))throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let l,u,c=null;if("complex64"!==r&&ue().get("WEBGL_BUFFER_SUPPORTED")){l=this.decode(e);const t=this.texData.get(l.dataId);c=this.gpgpu.createBufferFromTexture(t.texture,...N$(s))}if(this.pendingRead.set(e,[]),"complex64"!==r&&await this.gpgpu.createAndWaitForFence(),"complex64"===r){const e=await Promise.all([this.read(i.real.dataId),this.read(i.imag.dataId)]);u=Su(e[0],e[1])}else if(null==c)u=this.getValuesFromTexture(e);else{const e=D(s);u=this.gpgpu.downloadFloat32MatrixFromBuffer(c,e)}null!=l&&this.disposeIntermediateTensorInfo(l);const h=this.convertAndCacheOnCPU(e,u),p=this.pendingRead.get(e);return this.pendingRead.delete(e),p.forEach((e=>e(h))),this.pendingDisposal.has(e)&&(this.pendingDisposal.delete(e),this.disposeData(e)&&Ir().removeDataId(e,this),this.pendingDeletes--),h}bufferSync(e){const t=this.readSync(e.dataId);let n=t;if("string"===e.dtype)try{n=t.map((e=>ws(e)))}catch(e){throw new Error("Failed to decode encoded string bytes into utf-8")}return Ha(e.shape,e.dtype,n)}checkNumericalProblems(e){if(null!=e)for(let t=0;t<e.length;t++){const n=e[t];if(!S$(n)){if(ue().getBool("WEBGL_RENDER_FLOAT32_CAPABLE"))throw Error(`The value ${n} cannot be represented with your current settings. Consider enabling float32 rendering: 'tf.env().set('WEBGL_RENDER_FLOAT32_ENABLED', true);'`);throw Error(`The value ${n} cannot be represented on this device.`)}}}getValuesFromTexture(e){const{shape:t,dtype:n,isPacked:s}=this.texData.get(e),a=D(t);if(ue().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")){const n=this.decode(e),s=this.texData.get(n.dataId),r=this.gpgpu.downloadMatrixFromPackedTexture(s.texture,...N$(t)).subarray(0,a);return this.disposeIntermediateTensorInfo(n),r}const r=ue().getBool("WEBGL_PACK")&&!0===s,i=r?P$(t):t,o=r?new nC(i):new tC(i),l=this.runWebGLProgram(o,[{shape:i,dtype:n,dataId:e}],"float32"),u=this.texData.get(l.dataId),c=this.gpgpu.downloadByteEncodedFloatMatrixFromOutputTexture(u.texture,u.texShape[0],u.texShape[1]).subarray(0,a);return this.disposeIntermediateTensorInfo(l),c}timerAvailable(){return ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0}async time(e){const t=this.activeTimers,n=[];let s=!1;null==this.programTimersStack?(this.programTimersStack=n,s=!0):this.activeTimers.push(n),this.activeTimers=n,e();const a=F(this.activeTimers.map((e=>e.query))).filter((e=>null!=e)),r=F(this.activeTimers.map((e=>e.name))).filter((e=>null!=e));this.activeTimers=t,s&&(this.programTimersStack=null);const i={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){const e=await Promise.all(a);i.kernelMs=function(e){let t=0;for(let n=0;n<e.length;n++)t+=e[n];return t}(e),i.getExtraProfileInfo=()=>e.map(((e,t)=>({name:r[t],ms:e}))).map((e=>`${e.name}: ${e.ms}`)).join(", ")}else i.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,i}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:bs(),endMs:null}}endTimer(e){return ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),e):(e.endMs=bs(),e)}async getQueryTime(e){if(ue().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(e);const t=e;return t.endMs-t.startMs}disposeData(e,t=!1){if(this.pendingDisposal.has(e))return!1;if(!this.texData.has(e))return!0;if(t?this.texData.get(e).refCount=0:this.texData.get(e).refCount--,!t&&this.texData.get(e).refCount>0)return!1;if(this.pendingRead.has(e))return this.pendingDisposal.add(e),this.pendingDeletes++,!1;this.releaseGPUData(e);const{complexTensorInfos:n}=this.texData.get(e);return null!=n&&(this.disposeData(n.real.dataId,t),this.disposeData(n.imag.dataId,t)),this.texData.delete(e),!0}releaseGPUData(e){const{texture:t,dtype:n,texShape:s,usage:a,isPacked:r,slice:i}=this.texData.get(e),o=i&&i.origDataId||e,l=this.dataRefCount.get(o);l>1?this.dataRefCount.set(o,l-1):(this.dataRefCount.delete(o),null!=t&&(this.numBytesInGPU-=this.computeBytes(s,n),this.textureManager.releaseTexture(t,s,a,r)));const u=this.texData.get(e);u.texture=null,u.texShape=null,u.isPacked=!1,u.slice=null}getTexture(e){return this.uploadToGPU(e),this.texData.get(e).texture}getDataInfo(e){return this.texData.get(e)}getCPUBackend(){return ue().getBool("WEBGL_CPU_FORWARD")?(null==this.cpuBackend&&(this.cpuBackend=Ir().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(e,t=128){const n=this.getCPUBackend();return ue().getBool("IS_TEST")||this.warnedAboutCPUBackend||null!=n||(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),null!=n&&e.every((e=>null==this.texData.get(e.dataId).texture&&D(e.shape)<t))}getGPGPUContext(){return this.gpgpu}where(e){$u("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");const t=e.dataSync();return fS(e.shape,t)}packedUnaryOp(e,t,n){const s=new pS(e.shape,t),a=this.compileAndRun(s,[e],n);return Ir().makeTensorFromDataId(a.dataId,a.shape,a.dtype)}abs(e){if(this.shouldExecuteOnCPU([e])&&"complex64"!==e.dtype){const t=KC(this.texData.get(e.dataId).values);return this.makeOutput(e.shape,e.dtype,t)}if(ue().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,cS,e.dtype);const t=new uS(e.shape,cS),n=this.compileAndRun(t,[e]);return Ir().makeTensorFromDataId(n.dataId,n.shape,n.dtype)}makeTensorInfo(e,t,n){let s;if("string"===t&&null!=n&&n.length>0&&K(n[0])){const a=n.map((e=>xs(e)));s=this.write(a,e,t)}else s=this.write(n,e,t);return this.texData.get(s).usage=null,{dataId:s,shape:e,dtype:t}}makeOutput(e,t,n){const{dataId:s}=this.makeTensorInfo(e,t,n);return Ir().makeTensorFromDataId(s,e,t,this)}unpackTensor(e){const t=new dS(e.shape);return this.runWebGLProgram(t,[e],e.dtype)}packTensor(e){const t=new sS(e.shape);return this.runWebGLProgram(t,[e],e.dtype,null,!0)}packedReshape(e,t){const n=[L$(e.shape),...z$(e.shape)],s={dtype:e.dtype,shape:n,dataId:e.dataId},a=[L$(t),...z$(t)],r=new aS(a,n),i=this.runWebGLProgram(r,[s],e.dtype,null,!0);return{dataId:i.dataId,shape:t,dtype:i.dtype}}decode(e){const t=this.texData.get(e),{isPacked:n,shape:s,dtype:a}=t,r=P$(s);let i;return i=n?new eC(r):new Q$(r),{dtype:a,shape:s,dataId:this.runWebGLProgram(i,[{shape:r,dtype:a,dataId:e}],a,null,!0).dataId}}runWebGLProgram(e,t,n,s,a=!1){const r=this.makeTensorInfo(e.outputShape,n),i=this.texData.get(r.dataId);if(e.packedOutput&&(i.isPacked=!0),e.outPackingScheme===x$.DENSE){const t=N$(e.outputShape);i.texShape=t.map((e=>2*e))}if(null!=e.outTexUsage&&(i.usage=e.outTexUsage),0===D(r.shape))return i.values=V(r.dtype,0),r;const o=[],l=t.map((t=>{if("complex64"===t.dtype)throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let n=this.texData.get(t.dataId);if(null==n.texture){if(!e.packedInputs&&D(t.shape)<=ue().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:t.shape,texData:null,isUniform:!0,uniformValues:n.values};e.packedInputs&&(n.isPacked=!0,n.shape=t.shape)}else if(!!n.isPacked!=!!e.packedInputs)t=n.isPacked?this.unpackTensor(t):this.packTensor(t),o.push(t),n=this.texData.get(t.dataId);else if(n.isPacked&&!W$(n.shape,t.shape)){const e=t,s=t.shape;t.shape=n.shape,t=this.packedReshape(t,s),o.push(t),n=this.texData.get(t.dataId),e.shape=s}return this.uploadToGPU(t.dataId),{shape:t.shape,texData:n,isUniform:!1}}));this.uploadToGPU(r.dataId);const u={shape:r.shape,texData:i,isUniform:!1},c=function(e,t,n){let s="";t.concat(n).forEach((e=>{const t=null!=e.texData&&null!=e.texData.slice&&e.texData.slice.flatOffset>0,n=e.isUniform?"uniform":e.texData.texShape;s+=`${e.shape}_${n}_${t}`}));const a=e.userCode;let r=e.constructor.name;return r+="_"+s+"_"+a,r}(e,l,u),h=this.getAndSaveBinary(c,(()=>function(e,t,n,s){const a=t.userCode,r=n.map(((e,n)=>{const s={logicalShape:e.shape,texShape:e.isUniform?null:e.texData.texShape,isUniform:e.isUniform,isPacked:!e.isUniform&&e.texData.isPacked,flatOffset:null};return null!=e.texData&&null!=e.texData.slice&&e.texData.slice.flatOffset>0&&(s.flatOffset=e.texData.slice.flatOffset),{name:t.variableNames[n],shapeInfo:s}})),i=r.map((e=>e.shapeInfo)),o={logicalShape:s.shape,texShape:s.texData.texShape,isUniform:!1,isPacked:s.texData.isPacked,flatOffset:null},l=dC(r,o,a,t.packedInputs),u=e.createProgram(l);let c=null;const h=e.getUniformLocation(u,"NAN",!1);1===ue().getNumber("WEBGL_VERSION")&&(c=e.getUniformLocation(u,"INFINITY",!1));const p={};for(let n=0;n<t.variableNames.length;n++){const s=t.variableNames[n],a=!1;p[s]=e.getUniformLocation(u,s,a),p[`offset${s}`]=e.getUniformLocation(u,`offset${s}`,a)}return{program:t,source:l,webGLProgram:u,uniformLocations:p,inShapeInfos:i,outShapeInfo:o,infLoc:c,nanLoc:h}}(this.gpgpu,e,l,u))),p=null!=this.activeTimers;let d;p&&(d=this.startTimer()),function(e,t,n,s,a){$C(t.inShapeInfos,n),$C([t.outShapeInfo],[s]);const r=s.texData.texture,i=s.texData.texShape;s.texData.isPacked?e.setOutputPackedMatrixTexture(r,i[0],i[1]):e.setOutputMatrixTexture(r,i[0],i[1]),e.setProgram(t.webGLProgram),1===ue().getNumber("WEBGL_VERSION")&&null!==t.infLoc&&e.gl.uniform1f(t.infLoc,1/0),null!==t.nanLoc&&e.gl.uniform1f(t.nanLoc,NaN),n.forEach(((n,s)=>{const a=t.program.variableNames[s],r=t.uniformLocations[a],i=t.uniformLocations[`offset${a}`];if(null!=r)if(n.isUniform)if(D(n.shape)<2)e.gl.uniform1f(r,n.uniformValues[0]);else{let t=n.uniformValues;t instanceof Float32Array||(t=new Float32Array(t)),e.gl.uniform1fv(r,t)}else null!=n.texData.slice&&null!=i&&e.gl.uniform1i(i,n.texData.slice.flatOffset),e.setInputMatrixTexture(n.texData.texture,r,s)})),null!=a&&a(e,t.webGLProgram),e.executeProgram()}(this.gpgpu,h,l,u,s),o.forEach((e=>this.disposeIntermediateTensorInfo(e))),p&&(d=this.endTimer(d),this.activeTimers.push({name:e.constructor.name,query:this.getQueryTime(d)}));const f=ue().get("WEBGL_FLUSH_THRESHOLD");if(f>0){const e=bs();e-this.lastGlFlushTime>f&&(this.gpgpu.gl.flush(),this.lastGlFlushTime=e)}if(!ue().getBool("WEBGL_LAZILY_UNPACK")&&i.isPacked&&!1===a){const e=this.unpackTensor(r);return this.disposeIntermediateTensorInfo(r),e}return r}compileAndRun(e,t,n,s,a=!1){return n=n||t[0].dtype,this.runWebGLProgram(e,t,n,s,a)}getAndSaveBinary(e,t){return e in this.binaryCache||(this.binaryCache[e]=t()),this.binaryCache[e]}getTextureManager(){return this.textureManager}dispose(){this.disposed||(ue().getBool("IS_TEST")||Object.keys(this.binaryCache).forEach((e=>{this.gpgpu.deleteProgram(this.binaryCache[e].webGLProgram),delete this.binaryCache[e]})),this.textureManager.dispose(),null!=this.canvas&&"undefined"!=typeof HTMLCanvasElement&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0)}floatPrecision(){return null==this.floatPrecisionValue&&(this.floatPrecisionValue=Cr((()=>{if(!ue().get("WEBGL_RENDER_FLOAT32_ENABLED")){const e=ue().getBool("DEBUG");ue().set("DEBUG",!1);const t=this.abs(Fr(1e-8)).dataSync()[0];if(ue().set("DEBUG",e),t>0)return 32}return 16}))),this.floatPrecisionValue}epsilon(){return 32===this.floatPrecision()?1e-7:1e-4}uploadToGPU(e){const t=this.texData.get(e),{shape:n,dtype:s,values:a,texture:r,usage:i,isPacked:o}=t;if(null!=r)return;const l=null!=this.activeTimers;let u;l&&(u=bs());let c=t.texShape;if(null==c&&(c=function(e,t=!1){let n=ue().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(n*=2,1===(e=e.map(((t,n)=>n>=e.length-2?T(e[n]):e[n]))).length&&(e=[2,e[0]])),2!==e.length){const t=W(e);e=t.newShape}let s=D(e);if(e.length<=1&&s<=n)return[1,s];if(2===e.length&&e[0]<=n&&e[1]<=n)return e;if(3===e.length&&e[0]*e[1]<=n&&e[2]<=n)return[e[0]*e[1],e[2]];if(3===e.length&&e[0]<=n&&e[1]*e[2]<=n)return[e[0],e[1]*e[2]];if(4===e.length&&e[0]*e[1]*e[2]<=n&&e[3]<=n)return[e[0]*e[1]*e[2],e[3]];if(4===e.length&&e[0]<=n&&e[1]*e[2]*e[3]<=n)return[e[0],e[1]*e[2]*e[3]];if(t){const t=L$(e);let n=2,a=2;return e.length&&([n,a]=z$(e)),s=t*(n/2)*(a/2),M(s).map((e=>2*e))}return M(s)}(n,o),t.texShape=c),null!=a){const e=P$(n);let r,i=c[1],h=c[0];const p=a instanceof Uint8Array;o?([i,h]=I$(c[0],c[1]),r=new aC(e,[h,i],p)):r=new sC(e,[h,i],p);const d=this.makeTensorInfo([h,i],s);this.texData.get(d.dataId).usage=p?w$.PIXELS:w$.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(d.dataId),i,h,a);const f=!0,m=this.runWebGLProgram(r,[d],s,null,f),g=this.texData.get(m.dataId);t.texture=g.texture,t.texShape=g.texShape,t.isPacked=g.isPacked,t.usage=g.usage,this.disposeIntermediateTensorInfo(d),this.texData.delete(m.dataId),t.values=null,l&&(this.uploadWaitMs+=bs()-u)}else{const e=this.acquireTexture(c,i,s,o);t.texture=e}}convertAndCacheOnCPU(e,t){const n=this.texData.get(e),{dtype:s}=n;return this.releaseGPUData(e),null!=t&&(n.values=function(e,t){if("float32"===t||"complex64"===t)return e;if("int32"===t||"bool"===t){const n="int32"===t?new Int32Array(e.length):new Uint8Array(e.length);for(let t=0;t<n.length;++t)n[t]=Math.round(e[t]);return n}throw new Error(`Unknown dtype ${t}`)}(t,s)),n.values}acquireTexture(e,t,n,s){if(this.numBytesInGPU+=this.computeBytes(e,n),!this.warnedAboutMemory&&this.numBytesInGPU>1024*this.numMBBeforeWarning*1024){const e=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${e} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(e,t,s)}computeBytes(e,t){return e[0]*e[1]*j(t)}}gS.nextDataId=0,ta()&&Ar("webgl",(()=>new gS),2);class yS{constructor(e,t,n){this.variableNames=["A","B"],this.outputShape=vi(t,n),this.userCode=`\n float binaryOperation(float a, float b) {\n ${e}\n }\n\n void main() {\n float a = getAAtOutCoords();\n float b = getBAtOutCoords();\n setOutput(binaryOperation(a, b));\n }\n `}}class bS{constructor(e,t,n,s=!1){this.variableNames=["A","B"],this.supportsBroadcasting=!0,this.packedInputs=!0,this.packedOutput=!0,this.outputShape=vi(t,n);const a=this.outputShape.length;let r="";if(s)if(0===a||1===D(this.outputShape))r="\n result.y = 0.;\n result.z = 0.;\n result.w = 0.;\n ";else if(r=`\n ${vC(a)} coords = getOutputCoords();\n `,1===a)r+=`\n result.y = (coords + 1) >= ${this.outputShape[0]} ? 0. : result.y;\n result.z = 0.;\n result.w = 0.;\n `;else{const e=nS("coords",a);r+=`\n bool nextRowOutOfBounds =\n (${e[a-2]} + 1) >= ${this.outputShape[a-2]};\n bool nextColOutOfBounds =\n (${e[a-1]} + 1) >= ${this.outputShape[a-1]};\n result.y = nextColOutOfBounds ? 0. : result.y;\n result.z = nextRowOutOfBounds ? 0. : result.z;\n result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w;\n `}this.userCode=`\n vec4 binaryOperation(vec4 a, vec4 b) {\n ${e}\n }\n\n void main() {\n vec4 a = getAAtOutCoords();\n vec4 b = getBAtOutCoords();\n\n vec4 result = binaryOperation(a, b);\n ${r}\n\n setOutput(result);\n }\n `}}function xS(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const wS={kernelName:It,backendName:"webgl",kernelFunc:xS};function kS(e){const{inputs:t,backend:n}=e,{real:s,imag:a}=t,r=n.makeTensorInfo(s.shape,"complex64"),i=n.texData.get(r.dataId),o=xS({inputs:{x:s},backend:n}),l=xS({inputs:{x:a},backend:n});return i.complexTensorInfos={real:o,imag:l},r}const vS={kernelName:ze,backendName:"webgl",kernelFunc:kS},NS="return (a < 0.) ? b * a : a;",IS="\n vec4 aLessThanZero = vec4(lessThan(a, vec4(0.)));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n",$S={kernelName:At,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{alpha:r}=s,i=n.makeTensorInfo([],"float32",gs(r,"float32")),o=ue().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new bS(IS,a.shape,i.shape):new yS(NS,a.shape,i.shape),l=n.runWebGLProgram(o,[a,i],a.dtype);return n.disposeIntermediateTensorInfo(i),l}},CS="return (a < 0.) ? b * a : a;",SS="\n vec4 aLessThanZero = vec4(lessThan(a, vec4(0.)));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n",TS={kernelName:pn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s,alpha:a}=t,r=ue().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new bS(SS,s.shape,a.shape):new yS(CS,s.shape,a.shape);return n.runWebGLProgram(r,[s,a],s.dtype)}};function ES({opSnippet:e,packedOpSnippet:t,cpuKernelImpl:n,dtype:s}){return({inputs:a,backend:r})=>{const{x:i}=a,o=r,l=s||i.dtype;if(o.shouldExecuteOnCPU([i])&&null!=n){const e=o.texData.get(i.dataId),t=n(e.values,l);return o.makeTensorInfo(i.shape,l,t)}let u;return u=ue().getBool("WEBGL_PACK_UNARY_OPERATIONS")&&null!=t?new pS(i.shape,t):new uS(i.shape,e),o.runWebGLProgram(u,[i],l)}}function AS({opSnippet:e,packedOpSnippet:t,checkOutOfBounds:n=!1,supportsComplex:s=!1,cpuKernelImpl:a,dtype:r}){return({inputs:i,backend:o})=>{const{a:l,b:u}=i,c=o;if(s&&"complex64"===l.dtype){const t=c.texData.get(l.dataId),n=c.texData.get(u.dataId),[s,a]=[[t.complexTensorInfos.real,n.complexTensorInfos.real],[t.complexTensorInfos.imag,n.complexTensorInfos.imag]].map((t=>{const[n,s]=t,a={dataId:n.dataId,dtype:n.dtype,shape:l.shape},r={dataId:s.dataId,dtype:s.dtype,shape:u.shape},i=new yS(e,l.shape,u.shape);return c.runWebGLProgram(i,[a,r],Vs(n.dtype,s.dtype))})),r=kS({inputs:{real:s,imag:a},backend:c});return c.disposeIntermediateTensorInfo(s),c.disposeIntermediateTensorInfo(a),r}const h=r||Vs(l.dtype,u.dtype);if(c.shouldExecuteOnCPU([l,u])&&null!=a){const e=c.texData.get(l.dataId),t=c.texData.get(u.dataId),[n,s]=a(l.shape,u.shape,e.values,t.values,h),r=c.makeTensorInfo(s,h);return c.texData.get(r.dataId).values=n,r}let p;return p=ue().getBool("WEBGL_PACK_BINARY_OPERATIONS")&&null!=t?new bS(t,l.shape,u.shape,n):new yS(e,l.shape,u.shape),c.runWebGLProgram(p,[l,u],h)}}function RS(e,t=!1){if("linear"===e)return"return x;";if("relu"===e)return t?"\n vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n":"if (isnan(x)) return x;\n return (x < 0.0) ? 0.0 : x;\n";if("elu"===e)return t?"\n vec4 result;\n\n result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0);\n result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0);\n result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0);\n result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0);\n\n return result;\n":"return (x >= 0.0) ? x : (exp(x) - 1.0);";if("relu6"===e)return t?"\n vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n":"if (isnan(x)) return x;\n return (x < 0.0) ? 0.0 : min(6.0, x);\n";if("prelu"===e)return t?SS:CS;if("leakyrelu"===e)return t?IS:NS;throw new Error(`Activation ${e} has not been implemented for the WebGL backend.`)}class FS{constructor(e,t,n,s=!1,a=!1,r=!1,i=null,o=!1,l=!1){this.variableNames=["matrixA","matrixB"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=n;const u=s?e[1]:e[2],c=Math.ceil(u/2),h=s?"i * 2, rc.y":"rc.y, i * 2",p=a?"rc.z, i * 2":"i * 2, rc.z",d=s?["a.xxyy","a.zzww"]:["a.xxzz","a.yyww"],f=a?["b.xzxz","b.ywyw"]:["b.xyxy","b.zwzw"];let m="",g="";i&&(m=o?`vec4 activation(vec4 a) {\n vec4 b = getPreluActivationWeightsAtOutCoords();\n ${i}\n }`:l?`vec4 activation(vec4 a) {\n vec4 b = getLeakyreluAlphaAtOutCoords();\n ${i}\n }`:`vec4 activation(vec4 x) {\n ${i}\n }`,g="result = activation(result);");const y=r?"result += getBiasAtOutCoords();":"";r&&this.variableNames.push("bias"),o&&this.variableNames.push("preluActivationWeights"),l&&this.variableNames.push("leakyreluAlpha");let b="rc.x",x="rc.x";e[0]<t[0]?b=`int(min(float(rc.x), ${e[0]-1}.))`:t[0]<e[0]&&(x=`int(min(float(rc.x), ${t[0]-1}.))`),this.userCode=`\n ${m}\n\n const float sharedDimension = ${c}.0;\n\n vec4 dot2x2ARowBCol(ivec3 rc) {\n vec4 result = vec4(0);\n for (int i = 0; i < ${c}; i++) {\n int batchA = ${b};\n int batchB = ${x};\n vec4 a = getMatrixA(batchA, ${h});\n vec4 b = getMatrixB(batchB, ${p});\n\n // These swizzled products need to be separately added.\n // See: https://github.com/tensorflow/tfjs/issues/1735\n result += (${d[0]} * ${f[0]});\n result += (${d[1]} * ${f[1]});\n }\n return result;\n }\n\n void main() {\n ivec3 rc = getOutputCoords();\n vec4 result = dot2x2ARowBCol(rc);\n\n ${y}\n\n ${g}\n\n setOutput(result);\n }\n `}}class DS{constructor(e,t,n){this.variableNames=["AReal","AImag","BReal","BImag"],this.outputShape=vi(t,n),this.userCode=`\n float binaryOpComplex(\n float areal, float aimag, float breal, float bimag) {\n ${e}\n }\n\n void main() {\n float areal = getARealAtOutCoords();\n float aimag = getAImagAtOutCoords();\n float breal = getBRealAtOutCoords();\n float bimag = getBImagAtOutCoords();\n setOutput(binaryOpComplex(areal, aimag, breal, bimag));\n }\n `}}const _S="return a * b;";function OS(e){const{inputs:t,backend:n}=e,{a:s,b:a}=t,r=Vs(s.dtype,a.dtype);if("complex64"===s.dtype){const e=n.texData.get(s.dataId),t=n.texData.get(a.dataId),r=new DS("return areal * breal - aimag * bimag;",s.shape,a.shape),i=new DS("return areal * bimag + aimag * breal;",s.shape,a.shape),o=[{dataId:e.complexTensorInfos.real.dataId,dtype:e.complexTensorInfos.real.dtype,shape:s.shape},{dataId:e.complexTensorInfos.imag.dataId,dtype:e.complexTensorInfos.imag.dtype,shape:s.shape},{dataId:t.complexTensorInfos.real.dataId,dtype:t.complexTensorInfos.real.dtype,shape:a.shape},{dataId:t.complexTensorInfos.imag.dataId,dtype:t.complexTensorInfos.imag.dtype,shape:a.shape}],l=n.runWebGLProgram(r,o,"float32"),u=n.runWebGLProgram(i,o,"float32"),c=kS({inputs:{real:l,imag:u},backend:n});return n.disposeIntermediateTensorInfo(l),n.disposeIntermediateTensorInfo(u),c}if(n.shouldExecuteOnCPU([s,a])){const e=n.texData.get(s.dataId),t=n.texData.get(a.dataId),[i,o]=VC(s.shape,a.shape,e.values,t.values,r),l=n.makeTensorInfo(o,r);return n.texData.get(l.dataId).values=i,l}let i;return i=ue().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new bS(_S,s.shape,a.shape):new yS(_S,s.shape,a.shape),n.runWebGLProgram(i,[s,a],r)}const MS={kernelName:en,backendName:"webgl",kernelFunc:OS};function LS(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{shape:r}=s,i=n,o=D(a.shape),l=P(r,o),u=D(l);E(o===u,(()=>`The new shape (${l}) has ${u} elements and the old shape (${a.shape}) has ${o} elements. The new shape and old shape must have the same number of elements.`));const c=i.texData.get(a.dataId);return!c.isPacked||W$(a.shape,l)||null!==c.texture&&W$(c.shape,l)?(i.incRef(a.dataId),{dataId:a.dataId,shape:l,dtype:a.dtype}):function(e,t,n){const s=[L$(e.shape),...z$(e.shape)],a={dtype:e.dtype,shape:s,dataId:e.dataId},r=[L$(t),...z$(t)],i=new aS(r,s),o=n.runWebGLProgram(i,[a],e.dtype,null,!0);return{dataId:o.dataId,shape:t,dtype:o.dtype}}(a,l,i)}const zS={kernelName:bn,backendName:"webgl",kernelFunc:LS};class PS{constructor(e,t){this.variableNames=["x"];const{windowSize:n,batchSize:s,inSize:a,outSize:r}=e;this.outputShape=[s,r];const i=4*Math.floor(n/4),o=n%4;let l="sumValue += dot(values, ones);";if(null!=t){const e=1/t;l=`sumValue += dot(values * ${O(e)?e.toPrecision(2):e}, ones);`}let u="";a%n>0&&(u=`\n if (inIdx < 0 || inIdx >= ${a}) {\n return 0.0;\n }\n `),this.userCode=`\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float getValue(int batch, int inIdx) {\n ${u}\n return getX(batch, inIdx);\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = outIdx * ${n};\n\n float sumValue = 0.0;\n\n for (int i = 0; i < ${i}; i += 4) {\n int inIdx = inOffset + i;\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n getValue(batch, inIdx + 3)\n );\n\n ${l}\n }\n\n int inIdx = inOffset + ${i};\n if (${1===o}) {\n vec4 values = vec4(getValue(batch, inIdx), 0.0, 0.0, 0.0);\n\n ${l}\n } else if (${2===o}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1), 0.0, 0.0);\n\n ${l}\n } else if (${3===o}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2), 0.0);\n\n ${l}\n }\n setOutput(sumValue);\n }\n `}}class BS{constructor(e,t){this.variableNames=["x"];const{windowSize:n,batchSize:s,inSize:a,outSize:r}=e;this.outputShape=[s,r];let i="0.0",o="";"prod"===t?i="1.0":"min"===t?(i="1.0 / 1e-20",o="min"):"max"===t&&(i="-1.0 / 1e-20",o="max");let l=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;"sum"===t?l="sumValue":"prod"===t?l="prodValue":"all"===t?l="allValue":"any"===t&&(l="anyValue");const u=4*Math.floor(n/4),c=n%4;let h=`\n if (${"sum"===t}) {\n sumValue += dot(values, ones);\n } else if (${"prod"===t}) {\n vec2 tmp = vec2(values[0], values[1]) * vec2(values[2], values[3]);\n prodValue *= tmp[0] * tmp[1];\n } else {\n minMaxValue = ${o}(values, minMaxValue);\n }\n `,p="vec4";"all"===t?(i="1.0",h="\n bool reducedAllValue = all(values);\n float floatedReducedAllValue = float(reducedAllValue);\n allValue = float(allValue >= 1.0 && floatedReducedAllValue >= 1.0);\n ",p="bvec4"):"any"===t&&(i="0.0",h="\n bool reducedAnyValue = any(values);\n float floatedReducedAnyValue = float(reducedAnyValue);\n anyValue = float(anyValue >= 1.0 || floatedReducedAnyValue >= 1.0);\n ",p="bvec4");let d="";a%n>0&&(d=`\n if (inIdx < 0 || inIdx >= ${a}) {\n return initializationValue;\n }\n `),this.userCode=`\n const float initializationValue = ${i};\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float getValue(int batch, int inIdx) {\n ${d}\n return getX(batch, inIdx);\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = outIdx * ${n};\n\n vec4 minMaxValue = vec4(${i});\n float prodValue = 1.0;\n float sumValue = 0.0;\n float allValue = 1.0;\n float anyValue = 0.0;\n\n for (int i = 0; i < ${u}; i += 4) {\n int inIdx = inOffset + i;\n ${p} values = ${p}(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n getValue(batch, inIdx + 3)\n );\n\n ${h}\n }\n\n int inIdx = inOffset + ${u};\n if (${1===c}) {\n ${p} values = ${p}(\n getValue(batch, inIdx),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n ${h}\n } else if (${2===c}) {\n ${p} values = ${p}(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n initializationValue,\n initializationValue\n );\n\n ${h}\n } else if (${3===c}) {\n ${p} values = ${p}(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n initializationValue\n );\n\n ${h}\n }\n setOutput(${l});\n }\n `}}function WS(e,t,n,s){const a=function(e){const t=[];for(;0===t.length||1!==t[t.length-1].outSize;){const n=t.length?t[t.length-1].outSize:e[1],s=iu(n);t.push({inSize:n,windowSize:s,outSize:Math.ceil(n/s)})}return t}(e.shape);let r=e;for(let i=0;i<a.length;i++){const{inSize:o,windowSize:l,outSize:u}=a[i];let c,h;c="mean"===n?0===i?new PS({windowSize:l,inSize:o,batchSize:e.shape[0],outSize:u},o):new PS({windowSize:l,inSize:o,batchSize:e.shape[0],outSize:u}):new BS({windowSize:l,inSize:o,batchSize:e.shape[0],outSize:u},n),h=r,r=s.runWebGLProgram(c,[r],t),h.dataId!==e.dataId&&s.disposeIntermediateTensorInfo(h)}return r}class VS{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let s=0;s<n.length;s++)n[s]=e[t[s]];this.outputShape=n,this.rank=n.length;const s=vC(this.rank),a=function(e){const t=e.length;if(t>6)throw Error(`Transpose for rank ${t} is not yet supported`);const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u","resRC.v"],s=new Array(t);for(let t=0;t<e.length;t++)s[e[t]]=n[t];return s.join()}(t);this.userCode=`\n void main() {\n ${s} resRC = getOutputCoords();\n setOutput(getA(${a}));\n }\n `}}class US{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0;const n=new Array(e.length);for(let s=0;s<n.length;s++)n[s]=e[t[s]];if(this.outputShape=n,this.rank=n.length,this.rank>6)throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`);const s=vC(this.rank),a=tS("rc",this.rank),r=new Array(this.rank);for(let e=0;e<t.length;e++)r[t[e]]=a[e];const i=`vec2(${r.slice(-2).join()})`,o=`++${a[this.rank-1]} < ${n[this.rank-1]}`,l=`getChannel(getA(${r.join()}), ${i})`;this.userCode=`\n void main() {\n ${s} rc = getOutputCoords();\n vec4 result = vec4(0.);\n result[0] = ${l};\n if(${o}) {\n result[1] = ${l};\n }\n --${a[this.rank-1]};\n if(++${a[this.rank-2]} < ${n[this.rank-2]}) {\n result[2] = ${l};\n if(${o}) {\n result[3] = ${l};\n }\n }\n setOutput(result);\n }\n `}}function GS(e,t,n){const s=ue().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new US(e.shape,t):new VS(e.shape,t);return n.runWebGLProgram(s,[e],e.dtype)}function HS(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s;return function(e,t,n,s){const a=t,r=e.shape.length,i=B(a,e.shape);let o=i;const l=Zi(o,r),u=null!=l;let c=e;u&&(c=GS(e,l,s),o=eo(o.length,r)),Ji("sum",o,r);const[h,p]=Xi(c.shape,o);let d=h;n&&(d=Yi(h,i));const f=D(p),m=LS({inputs:{x:c},attrs:{shape:[D(e.shape)/f,f]},backend:s}),g=WS(m,Us(e.dtype),"sum",s),y=LS({inputs:{x:g},attrs:{shape:d},backend:s});return s.disposeIntermediateTensorInfo(m),s.disposeIntermediateTensorInfo(g),u&&s.disposeIntermediateTensorInfo(c),y}(a,r,i,n)}const jS={kernelName:Ln,backendName:"webgl",kernelFunc:HS};function KS(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{perm:r}=s,i=n,o=a.shape.length,l=new Array(o);for(let e=0;e<l.length;e++)l[e]=a.shape[r[e]];let u;if(i.shouldExecuteOnCPU([a])){const e=i.texData.get(a.dataId).values,t=QC(e,a.shape,a.dtype,r,l);u=i.makeTensorInfo(l,a.dtype),i.texData.get(u.dataId).values=t}else u=GS(a,r,i);return u}const qS={kernelName:Jn,backendName:"webgl",kernelFunc:KS};function XS({a:e,b:t,transposeA:n,transposeB:s,backend:a,bias:r=null,preluActivationWeights:i=null,leakyreluAlpha:o=0,activation:l=null}){const u=e.shape.length,c=t.shape.length,h=n?e.shape[u-2]:e.shape[u-1],p=s?t.shape[c-1]:t.shape[c-2],d=n?e.shape[u-1]:e.shape[u-2],f=s?t.shape[c-2]:t.shape[c-1],m=e.shape.slice(0,-2),g=t.shape.slice(0,-2),y=D(m),b=D(g);E(u>=2&&c>=2&&(y===b||1===y||1===b),(()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${m}) and (${g}).`));const x=(y>b?e.shape.slice(0,-2):t.shape.slice(0,-2)).concat([d,f]);E(h===p,(()=>`Error in matMul: inner shapes (${h}) and (${p}) of Tensors with shapes ${e.shape} and ${t.shape} and transposeA=${n} and transposeB=${s} must match.`));const w=n?[y,h,d]:[y,d,h],k=s?[b,f,p]:[b,p,f],v=LS({inputs:{x:e},backend:a,attrs:{shape:w}}),N=LS({inputs:{x:t},backend:a,attrs:{shape:k}}),I=[v,N],$=Math.max(y,b),C=n?v.shape[1]:v.shape[2],S=null!=r,T=null!=i,A="leakyrelu"===l,R=null!=l?RS(l,!0):null;let F;if((1===d||1===f)&&C>1e3&&!1===(S||T||A||null!=R)){let e=v,t=N;n&&(e=KS({inputs:{x:v},backend:a,attrs:{perm:[0,2,1]}}),I.push(e)),s&&(t=KS({inputs:{x:N},backend:a,attrs:{perm:[0,2,1]}}),I.push(t));const r=1===f;let i=e;1!==f&&(i=LS({inputs:{x:e},backend:a,attrs:{shape:[$,C,1]}}),I.push(i));const o=1===f?2:1;let l=t;r&&(l=LS({inputs:{x:t},backend:a,attrs:{shape:[$,1,C]}}),I.push(l));const u=OS({inputs:{a:i,b:l},backend:a});F=HS({inputs:{x:u},backend:a,attrs:{axis:o,keepDims:!0}}),I.push(u)}else{const l=Vs(e.dtype,t.dtype),u=new FS(w,k,[$,d,f],n,s,S,R,T,A),c=[v,N];if(null!=r&&c.push(r),T&&c.push(i),A){const e=a.makeTensorInfo([],"float32",gs(o,"float32"));c.push(e),I.push(e)}F=a.runWebGLProgram(u,c,l)}const _=LS({inputs:{x:F},backend:a,attrs:{shape:x}});I.push(F);for(const e of I)a.disposeIntermediateTensorInfo(e);return _}const YS={kernelName:rs,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{a,b:r,bias:i,preluActivationWeights:o}=t,{transposeA:l,transposeB:u,activation:c,leakyreluAlpha:h}=s;return XS({a,b:r,transposeA:l,transposeB:u,backend:n,bias:i,preluActivationWeights:o,leakyreluAlpha:h,activation:c})}},JS="return abs(x);",ZS={kernelName:fe,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s}=t;if(n.shouldExecuteOnCPU([s])&&"complex64"!==s.dtype){const e=n.texData.get(s.dataId),t=KC(e.values);return n.makeTensorInfo(s.shape,s.dtype,t)}let a;return a=ue().getBool("WEBGL_PACK_UNARY_OPERATIONS")?new pS(s.shape,JS):new uS(s.shape,JS),n.runWebGLProgram(a,[s],s.dtype)}},QS=ES({opSnippet:"if (isnan(x)) return x;\n if (abs(x) > 1.) {\n return NAN;\n }\n return acos(x);\n"}),eT={kernelName:me,backendName:"webgl",kernelFunc:QS},tT=ES({opSnippet:"if (isnan(x)) return x;\n if (x < 1.0) return NAN;\nreturn log(x + sqrt(x * x - 1.0));"}),nT={kernelName:ge,backendName:"webgl",kernelFunc:tT},sT="return a + b;",aT=AS({opSnippet:sT,packedOpSnippet:sT,supportsComplex:!0,cpuKernelImpl:CC}),rT={kernelName:ye,backendName:"webgl",kernelFunc:aT};class iT{constructor(e,t){this.outputShape=[],this.outputShape=e,this.variableNames=t.map(((e,t)=>`T${t}`));const n=[];this.variableNames.forEach((e=>{n.push(`float v${e} = get${e}AtOutCoords();`)}));const s=this.variableNames.map((e=>`v${e}`)).join(" + ");this.userCode=`\n void main() {\n ${n.join("\n ")}\n\n float result = ${s};\n setOutput(result);\n }\n `}}class oT{constructor(e,t){this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.variableNames=t.map(((e,t)=>`T${t}`));const n=[];this.variableNames.forEach((e=>{n.push(`vec4 v${e} = get${e}AtOutCoords();`)}));const s=this.variableNames.map((e=>`v${e}`)).join(" + ");this.userCode=`\n void main() {\n ${n.join("\n ")}\n\n vec4 result = ${s};\n setOutput(result);\n }\n `}}const lT={kernelName:be,backendName:"webgl",kernelFunc:function e(t){const{inputs:n,backend:s}=t,a=n;if(1===a.length)return xS({inputs:{x:a[0]},backend:s});if(a.length>ue().get("WEBGL_MAX_TEXTURES_IN_SHADER")){const t=Math.floor(a.length/2),n=e({inputs:a.slice(0,t),backend:s}),r=e({inputs:a.slice(t),backend:s});return e({inputs:[n,r],backend:s})}const r=a.map((e=>e.dtype)).reduce(((e,t)=>Vs(e,t))),i=a.map((e=>e.shape)),o=ue().getBool("WEBGL_PACK")?new oT(a[0].shape,i):new iT(a[0].shape,i);return s.runWebGLProgram(o,a,r)}},uT={kernelName:xe,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s,o=a.shape.length,l=B(r,a.shape);let u=l;const c=Zi(u,o);let h=a;null!=c&&(h=KS({inputs:{x:a},backend:n,attrs:{perm:c}}),u=eo(u.length,o)),Ji("all",u,o);const[p,d]=Xi(h.shape,u),f=LS({inputs:{x:h},backend:n,attrs:{shape:[-1,D(d)]}}),m=WS(f,f.dtype,"all",n);let g;return g=LS(i?{inputs:{x:m},backend:n,attrs:{shape:Yi(p,l)}}:{inputs:{x:m},backend:n,attrs:{shape:p}}),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(m),null!=c&&n.disposeIntermediateTensorInfo(h),g}},cT={kernelName:we,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s,o=a.shape.length,l=B(r,a.shape);let u=l;const c=Zi(u,o);let h=a;null!=c&&(h=KS({inputs:{x:a},backend:n,attrs:{perm:c}}),u=eo(u.length,o)),Ji("any",u,o);const[p,d]=Xi(h.shape,u),f=LS({inputs:{x:h},backend:n,attrs:{shape:[-1,D(d)]}}),m=WS(f,f.dtype,"any",n);let g;return g=LS(i?{inputs:{x:m},backend:n,attrs:{shape:Yi(p,l)}}:{inputs:{x:m},backend:n,attrs:{shape:p}}),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(m),null!=c&&n.disposeIntermediateTensorInfo(h),g}};class hT{constructor(e,t,n){this.variableNames=["A"];const{windowSize:s,batchSize:a,outSize:r}=e;n||this.variableNames.push("bestIndicesA"),this.outputShape=[a,r];const i="max"===t?">":"<",o=n?"inOffset + i;":"round(getBestIndicesA(batch, inOffset + i));";this.userCode=`\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = outIdx * ${s};\n\n int bestIndex = inOffset;\n float bestValue = getA(batch, bestIndex);\n\n for (int i = 0; i < ${s}; i++) {\n int inIdx = ${o};\n float candidate = getA(batch, inIdx);\n if (candidate ${i} bestValue) {\n bestValue = candidate;\n bestIndex = inIdx;\n }\n }\n setOutput(float(bestIndex));\n }\n `}}class pT{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,E(e.length>2,(()=>`Packed arg${n.charAt(0).toUpperCase()+n.slice(1)} supports only inputs with rank above 2.`));const a=e[e.length-1],r=Math.ceil(a/t);this.outputShape=e.slice(0,-1),r>1&&this.outputShape.push(r),s||this.variableNames.push("bestIndicesA");const i=this.outputShape,o=i.length,l=vC(o),u=nS("coords",o);let c,h;if(1===r){h=o+1;const e=vC(h);c=`\n ${e} sourceLocR = ${e}(${u.join()}, 0);\n ++${u[o-1]};\n ${e} sourceLocG = ${e}(${u.join()}, 0);\n ++${u[o-2]};\n ${e} sourceLocA = ${e}(${u.join()}, 0);\n --${u[o-1]};\n ${e} sourceLocB = ${e}(${u.join()}, 0);\n --${u[o-2]};`}else h=o,c=`\n ${l} sourceLocR = coords;\n ++${u[o-1]};\n ${l} sourceLocG = coords;\n ++${u[o-2]};\n ${l} sourceLocA = coords;\n --${u[o-1]};\n ${l} sourceLocB = coords;\n --${u[o-2]};`;const p=["x","y","z","w","u","v"].slice(0,h),d="."+p[h-1],f=p.map((e=>"int "+e)),m=nS("sourceLocR",h-1).concat("inIdx.r"),g=nS("sourceLocG",h-1).concat("inIdx.g"),y=nS("sourceLocB",h-1).concat("inIdx.b"),b=nS("sourceLocA",h-1).concat("inIdx.a"),x="max"===n?"greaterThan":"lessThan",w=s?"":`\n inIdx = round(vec4(getBestIndicesAChannel(${m.join()}),\n getBestIndicesAChannel(${g.join()}),\n getBestIndicesAChannel(${y.join()}),\n getBestIndicesAChannel(${b.join()})));`,k=`vec4(\n getAChannel(${m.join()}),\n hasNextCol ? getAChannel(${g.join()}) : 0.,\n hasNextRow ? getAChannel(${y.join()}) : 0.,\n hasNextRow && hasNextCol ? getAChannel(${b.join()}) : 0.)`,v=s?"":`\n float getBestIndicesAChannel(${f.join()}) {\n return getChannel(getBestIndicesA(${p.join()}),\n vec2(${p.slice(-2).join()}));\n }`;this.userCode=`\n float getAChannel(${f.join()}) {\n return getChannel(getA(${p.join()}),\n vec2(${p.slice(-2).join()}));\n }\n ${v}\n void main() {\n ${l} coords = getOutputCoords();\n bool hasNextCol = ${u[o-1]} < ${i[o-1]-1};\n bool hasNextRow = ${u[o-2]} < ${i[o-2]-1};\n ${c}\n ivec4 srcIdx = ivec4(sourceLocR${d}, sourceLocG${d},\n sourceLocB${d}, sourceLocA${d}) * ${t};\n ivec4 inIdx = srcIdx;\n vec4 bestIndex = vec4(inIdx);\n vec4 bestValue = ${k};\n\n for (int i = 0; i < ${t}; i++) {\n inIdx = srcIdx;\n ${w}\n vec4 candidate = ${k};\n bvec4 nan = isnan(candidate);\n bvec4 replace = bvec4(\n vec4(${x}(candidate, bestValue)) * (vec4(1.0) - vec4(nan)));\n\n bestValue = vec4(replace.x ? candidate.x : bestValue.x,\n replace.y ? candidate.y : bestValue.y,\n replace.z ? candidate.z : bestValue.z,\n replace.w ? candidate.w : bestValue.w);\n bestIndex = mix(bestIndex, vec4(inIdx), vec4(replace));\n srcIdx++;\n }\n setOutput(bestIndex);\n }\n `}}function dT(e,t,n,s=null){let a=t.shape[0],r=t.shape[1];null!=s&&(a=s.shape[0],r=s.shape[1]);const i=iu(r),o={windowSize:i,inSize:r,batchSize:a,outSize:Math.ceil(r/i)},l=new hT(o,n,null==s),u=[t];null!=s&&u.push(s);const c=e.runWebGLProgram(l,u,"int32");if(1===c.shape[1])return c;const h=dT(e,t,n,c);return e.disposeIntermediateTensorInfo(c),h}function fT(e,t,n,s=null){const a=null!=s?s.shape:t.shape,r=iu(a[a.length-1]),i=new pT(a,r,n,null==s),o=null==s?[t]:[t,s],l=e.runWebGLProgram(i,o,"int32");if(l.shape.length===t.shape.length){const s=fT(e,t,n,l);return e.disposeIntermediateTensorInfo(l),s}return l}function mT(e,t,n,s){const a=[n];if(Ji("arg"+s.charAt(0).toUpperCase()+s.slice(1),a,t.shape.length),!ue().getBool("WEBGL_PACK_REDUCE")||t.shape.length<=2){const n=[],[r,i]=Xi(t.shape,a),o=D(i),l=LS({inputs:{x:t},backend:e,attrs:{shape:[-1,o]}});n.push(l);const u=dT(e,l,s);n.push(u);const c=LS({inputs:{x:u},backend:e,attrs:{shape:r}});return n.forEach((t=>e.disposeIntermediateTensorInfo(t))),c}return fT(e,t,s)}const gT={kernelName:ke,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r}=s;let i=B(r,a.shape);const o=Zi(i,a.shape.length);let l=a;const u=[];null!=o&&(l=KS({inputs:{x:a},backend:n,attrs:{perm:o}}),u.push(l),i=eo(i.length,l.shape.length)),Ji("argMax",[i[0]],l.shape.length);const c=mT(n,l,i[0],"max");return u.forEach((e=>n.disposeIntermediateTensorInfo(e))),c}},yT={kernelName:ve,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r}=s;let i=B(r,a.shape);const o=Zi(i,a.shape.length);let l=a;const u=[];null!=o&&(l=KS({inputs:{x:a},backend:n,attrs:{perm:o}}),u.push(l),i=eo(i.length,l.shape.length)),Ji("argMin",[i[0]],l.shape.length);const c=mT(n,l,i[0],"min");return u.forEach((e=>n.disposeIntermediateTensorInfo(e))),c}},bT=ES({opSnippet:"if (isnan(x)) return x;\n if (abs(x) > 1.) {\n return NAN;\n }\n return asin(x);\n"}),xT={kernelName:Ne,backendName:"webgl",kernelFunc:bT},wT=ES({opSnippet:"if (isnan(x)) return x;return log(x + sqrt(x * x + 1.0));"}),kT={kernelName:Ie,backendName:"webgl",kernelFunc:wT},vT=ES({opSnippet:"if (isnan(x)) return x;\n return atan(x);\n"}),NT={kernelName:$e,backendName:"webgl",kernelFunc:vT},IT=AS({opSnippet:"\n if (isnan(a)) return a;\n if (isnan(b)) return b;\n\n return atan(a, b);\n",packedOpSnippet:"\n vec4 result = atan(a, b);\n vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0));\n \n result.r = isNaN.r > 0. ? NAN : result.r;\n result.g = isNaN.g > 0. ? NAN : result.g;\n result.b = isNaN.b > 0. ? NAN : result.b;\n result.a = isNaN.a > 0. ? NAN : result.a;\n\n return result;\n"}),$T={kernelName:Se,backendName:"webgl",kernelFunc:IT},CT=ES({opSnippet:"if (isnan(x)) return x;\n if ((x < -1.0) || (x > 1.0)) return NAN;\nreturn (log(1.0 + x) - log(1.0 - x)) / 2.0;"}),ST={kernelName:Ce,backendName:"webgl",kernelFunc:CT};class TT{constructor(e,t,n,s=!1,a=!1){if(this.variableNames=["x"],"avg"===t&&n)throw new Error("Cannot compute positions for average pool.");const r=e.filterWidth,i=e.strideHeight,o=e.strideWidth,l=e.dilationHeight,u=e.dilationWidth,c=e.effectiveFilterHeight,h=e.effectiveFilterWidth,p=e.padInfo.top,d=e.padInfo.left;this.outputShape=e.outShape;const f="avg"===t,m=`((batch * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + d`,g=`(xR * ${e.inWidth} + xC) * ${e.inChannels} + d`;let y="0.0";if(f||(y="-1.0 / 1e-20"),n){const t=">=";return void(this.userCode=`\n const ivec2 strides = ivec2(${i}, ${o});\n const ivec2 pads = ivec2(${p}, ${d});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d = coords[3];\n\n ivec2 xRCCorner = coords.yz * strides - pads;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // max/min x(?, ?, d) to get y(yR, yC, d).\n // ? = to be determined\n float minMaxValue = 0.0;\n float minMaxValueFound = 0.0;\n int minMaxPosition = 0;\n float avgValue = 0.0;\n\n for (int wR = 0; wR < ${c};\n wR += ${l}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${h};\n wC += ${u}) {\n int xC = xCCorner + wC;\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n float value = getX(batch, xR, xC, d);\n\n // If a min / max value has already been found, use it. If not,\n // use the current value.\n float currMinMaxValue = mix(\n value, minMaxValue, minMaxValueFound);\n if (value ${t} currMinMaxValue) {\n minMaxValue = value;\n minMaxValueFound = 1.0;\n minMaxPosition = ${s?a?m:g:`wR * ${h} + wC`};\n }\n }\n }\n setOutput(float(minMaxPosition));\n }\n `)}let b=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;"avg"===t&&(b="avgValue / count");const x=4*Math.floor(r/4),w=r%4,k=`\n if (${f}) {\n avgValue += dot(values, ones);\n } else {\n minMaxValue = max(values, minMaxValue);\n }\n `;this.userCode=`\n const ivec2 strides = ivec2(${i}, ${o});\n const ivec2 pads = ivec2(${p}, ${d});\n const float initializationValue = ${y};\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float count = 0.0;\n\n float getValue(int batch, int xR, int xC, int d) {\n if (xC < 0 || xC >= ${e.inWidth}) {\n return initializationValue;\n }\n count += 1.0;\n return getX(batch, xR, xC, d);\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d = coords[3];\n\n ivec2 xRCCorner = coords.yz * strides - pads;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // max/min x(?, ?, d) to get y(yR, yC, d).\n // ? = to be determined\n vec4 minMaxValue = vec4(${y});\n float avgValue = 0.0;\n count = 0.0;\n\n for (int wR = 0; wR < ${c};\n wR += ${l}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${x}; wC += 4) {\n int xC = xCCorner + wC * ${u};\n\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n getValue(batch, xR, xC + ${u}, d),\n getValue(batch, xR, xC + 2 * ${u}, d),\n getValue(batch, xR, xC + 3 * ${u}, d)\n );\n\n ${k}\n }\n\n int xC = xCCorner + ${x};\n if (${1===w}) {\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n ${k}\n } else if (${2===w}) {\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n getValue(batch, xR, xC + ${u}, d),\n initializationValue,\n initializationValue\n );\n\n ${k}\n } else if (${3===w}) {\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n getValue(batch, xR, xC + ${u}, d),\n getValue(batch, xR, xC + 2 * ${u}, d),\n initializationValue\n );\n\n ${k}\n }\n }\n setOutput(${b});\n }\n `}}class ET{constructor(e,t,n,s=!1,a=!1){if(this.variableNames=["x"],"avg"===t&&n)throw new Error("Cannot compute positions for average pool.");const r=e.filterWidth,i=e.strideDepth,o=e.strideHeight,l=e.strideWidth,u=e.dilationDepth,c=e.dilationHeight,h=e.dilationWidth,p=e.effectiveFilterDepth,d=e.effectiveFilterHeight,f=e.effectiveFilterWidth,m=e.padInfo.front,g=e.padInfo.top,y=e.padInfo.left;this.outputShape=e.outShape;const b="avg"===t;let x="0.0";if(b||(x="-1.0 / 1e-20"),n){const t=">=";return void(this.userCode=`\n const ivec3 strides =\n ivec3(${i}, ${o}, ${l});\n const ivec3 pads = ivec3(${m}, ${g}, ${y});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads;\n int xDCorner = xCorner.x;\n int xRCorner = xCorner.y;\n int xCCorner = xCorner.z;\n\n // max/min x(?, ?, ?, ch) to get y(yD, yR, yC, ch).\n // ? = to be determined\n float minMaxValue = 0.0;\n float minMaxValueFound = 0.0;\n int minMaxPosition = 0;\n\n for (int wD = 0; wD < ${p};\n wD += ${u}) {\n int xD = xDCorner + wD;\n\n if (xD < 0 || xD >= ${e.inDepth}) {\n continue;\n }\n\n for (int wR = 0; wR < ${d};\n wR += ${c}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${f};\n wC += ${h}) {\n int xC = xCCorner + wC;\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n float value = getX(batch, xD, xR, xC, ch);\n\n // If a min / max value has already been found, use it. If not,\n // use the current value.\n float currMinMaxValue = mix(\n value, minMaxValue, minMaxValueFound);\n if (value ${t} currMinMaxValue) {\n minMaxValue = value;\n minMaxValueFound = 1.0;\n minMaxPosition = ${s?a?`(((batch * ${e.inDepth} + xD) * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + ch`:`((xD * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + ch`:`wD * ${d} * ${f} +\n wR * ${f} + wC`};\n }\n }\n }\n }\n setOutput(float(minMaxPosition));\n }\n `)}let w=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;"avg"===t&&(w="avgValue / count");const k=4*Math.floor(r/4),v=r%4,N=`\n if (${b}) {\n avgValue += dot(values, ones);\n } else {\n minMaxValue = max(values, minMaxValue);\n }\n `;this.userCode=`\n const ivec3 strides =\n ivec3(${i}, ${o}, ${l});\n const ivec3 pads = ivec3(${m}, ${g}, ${y});\n const float initializationValue = ${x};\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float count = 0.0;\n\n float getValue(int batch, int xD, int xR, int xC, int ch) {\n if (xC < 0 || xC >= ${e.inWidth}) {\n return initializationValue;\n }\n count += 1.0;\n return getX(batch, xD, xR, xC, ch);\n }\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads;\n int xDCorner = xCorner.x;\n int xRCorner = xCorner.y;\n int xCCorner = xCorner.z;\n\n // max/min x(?, ?, ?, d) to get y(yD, yR, yC, ch).\n // ? = to be determined\n vec4 minMaxValue = vec4(${x});\n float avgValue = 0.0;\n count = 0.0;\n\n for (int wD = 0; wD < ${p};\n wD += ${u}) {\n int xD = xDCorner + wD;\n\n if (xD < 0 || xD >= ${e.inDepth}) {\n continue;\n }\n\n for (int wR = 0; wR < ${d};\n wR += ${c}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${k}; wC += 4) {\n int xC = xCCorner + wC * ${h};\n\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n getValue(batch, xD, xR, xC + ${h}, ch),\n getValue(batch, xD, xR, xC + 2 * ${h}, ch),\n getValue(batch, xD, xR, xC + 3 * ${h}, ch)\n );\n\n ${N}\n }\n\n int xC = xCCorner + ${k};\n if (${1===v}) {\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n ${N}\n } else if (${2===v}) {\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n getValue(batch, xD, xR, xC + ${h}, ch),\n initializationValue,\n initializationValue\n );\n\n ${N}\n } else if (${3===v}) {\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n getValue(batch, xD, xR, xC + ${h}, ch),\n getValue(batch, xD, xR, xC + 2 * ${h}, ch),\n initializationValue\n );\n\n ${N}\n }\n }\n setOutput(${w});\n }\n }\n `}}const AT={kernelName:Te,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t;K$(a,"avgPool");const{filterSize:r,strides:i,pad:o,dimRoundingMode:l}=s;E(Yr(i,1),(()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${i} and dilations '1'`));const u=Br(a.shape,r,i,1,o,l);if(1===u.filterWidth&&1===u.filterHeight&&_(u.inShape,u.outShape))return xS({inputs:{x:a},backend:n});const c=new TT(u,"avg",!1);return n.runWebGLProgram(c,[a],"float32")}},RT={kernelName:Ae,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{filterSize:r,strides:i,pad:o,dimRoundingMode:l,dataFormat:u}=s,c=Wr(a.shape,r,i,[1,1,1],o,l,u),h=new ET(c,"avg",!1);return n.runWebGLProgram(h,[a],"float32")}};class FT{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,a=e.strideWidth,r=e.dilationHeight,i=e.dilationWidth,o=e.effectiveFilterHeight,l=e.effectiveFilterWidth,u=o-1-e.padInfo.top,c=l-1-e.padInfo.left,h=1/(t*n);this.userCode=`\n const ivec2 pads = ivec2(${u}, ${c});\n const float avgMultiplier = float(${h});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n\n ivec2 dyRCCorner = coords.yz - pads;\n int dyRCorner = dyRCCorner.x;\n int dyCCorner = dyRCCorner.y;\n\n // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${o};\n wR += ${r}) {\n float dyR = float(dyRCorner + wR) / ${s}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${l};\n wC+= ${i}) {\n float dyC = float(dyCCorner + wC) / ${a}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(b, idyR, idyC, d);\n\n dotProd += dyValue * avgMultiplier;\n }\n }\n setOutput(dotProd);\n }\n `}}class DT{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,a=e.strideDepth,r=e.strideHeight,i=e.strideWidth,o=e.dilationDepth,l=e.dilationHeight,u=e.dilationWidth,c=e.effectiveFilterDepth,h=e.effectiveFilterHeight,p=e.effectiveFilterWidth,d=c-1-e.padInfo.front,f=h-1-e.padInfo.top,m=p-1-e.padInfo.left,g=1/(t*n*s);this.userCode=`\n const ivec3 pads = ivec3(${d}, ${f}, ${m});\n const float avgMultiplier = float(${g});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads;\n int dyDCorner = dyCorner.x;\n int dyRCorner = dyCorner.y;\n int dyCCorner = dyCorner.z;\n\n // Convolve dy(?, ?, ?, d) with pos mask(:, :, :, ch) to get\n // dx(xD, xR, xC, ch).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n\n for (int wD = 0; wD < ${c};\n wD += ${o}) {\n float dyD = float(dyDCorner + wD) / ${a}.0;\n\n if (dyD < 0.0 || dyD >= ${e.outDepth}.0 || fract(dyD) > 0.0) {\n continue;\n }\n int idyD = int(dyD);\n\n for (int wR = 0; wR < ${h};\n wR += ${l}) {\n float dyR = float(dyRCorner + wR) / ${r}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 ||\n fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${p};\n wC += ${u}) {\n float dyC = float(dyCCorner + wC) / ${i}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(batch, idyD, idyR, idyC, ch);\n\n dotProd += dyValue * avgMultiplier;\n }\n }\n }\n setOutput(dotProd);\n }\n `}}const _T={kernelName:Re,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r}=t,i=r,{filterSize:o,strides:l,pad:u,dimRoundingMode:c}=s,h=Wr(i.shape,o,l,[1,1,1],u,c),p=new DT(h);return n.runWebGLProgram(p,[a],i.dtype)}},OT={kernelName:Ee,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r}=t,i=r;K$([a,r],"avgPoolGrad");const{filterSize:o,strides:l,pad:u}=s,c=Br(i.shape,o,l,1,u),h=new FT(c);return n.runWebGLProgram(h,[a],i.dtype)}},MT={kernelName:Fe,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{a,b:r}=t,{transposeA:i,transposeB:o}=s;return XS({a,b:r,transposeA:i,transposeB:o,backend:n})}};class LT{constructor(e,t,n,s,a,r){this.outputShape=[],this.variableNames=["x","mean","variance"],vi(e,t),vi(e,n);let i="0.0";null!=s&&(vi(e,s),this.variableNames.push("offset"),i="getOffsetAtOutCoords()");let o="1.0";null!=a&&(vi(e,a),this.variableNames.push("scale"),o="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=`\n void main() {\n float x = getXAtOutCoords();\n float mean = getMeanAtOutCoords();\n float variance = getVarianceAtOutCoords();\n float offset = ${i};\n float scale = ${o};\n float inv = scale * inversesqrt(variance + float(${r}));\n setOutput(dot(vec3(x, -mean, offset), vec3(inv, inv, 1)));\n }\n `}}class zT{constructor(e,t,n,s,a,r){this.packedInputs=!0,this.packedOutput=!0,this.variableNames=["x","mean","variance"],vi(e,t),vi(e,n);let i="vec4(0.0)";null!=s&&(vi(e,s),this.variableNames.push("offset"),i="getOffsetAtOutCoords()");let o="vec4(1.0)";null!=a&&(vi(e,a),this.variableNames.push("scale"),o="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=`\n void main() {\n vec4 offset = ${i};\n vec4 scale = ${o};\n\n vec4 x = getXAtOutCoords();\n vec4 mean = getMeanAtOutCoords();\n vec4 variance = getVarianceAtOutCoords();\n\n vec4 inv = scale * inversesqrt(variance + vec4(${r}));\n\n setOutput((x - mean) * inv + offset);\n }\n `}}const PT={kernelName:xt,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,mean:a,variance:r,offset:i,scale:o}=e;E(a.shape.length===r.shape.length,(()=>"Batch normalization gradient requires mean and variance to have equal ranks.")),E(null==i||a.shape.length===i.shape.length,(()=>"Batch normalization gradient requires mean and offset to have equal ranks.")),E(null==o||a.shape.length===o.shape.length,(()=>"Batch normalization gradient requires mean and scale to have equal ranks."));let{varianceEpsilon:l}=n;null==l&&(l=.001);const u=[s,a,r];let c=null;null!=i&&(c=i.shape,u.push(i));let h=null;null!=o&&(h=o.shape,u.push(o));const p=ue().getBool("WEBGL_PACK_NORMALIZATION")?new zT(s.shape,a.shape,r.shape,c,h,l):new LT(s.shape,a.shape,r.shape,c,h,l);return t.runWebGLProgram(p,u,u[0].dtype)}};class BT{constructor(e){this.variableNames=["source"],this.outputShape=e,this.rank=e.length;const t=vC(this.rank),n=`uniform int start[${this.rank}];`,s=function(e){if(1===e)return"sourceLoc";if(e<=6)return WT.slice(0,e).map((e=>"sourceLoc."+e)).join(",");throw Error(`Slicing for rank ${e} is not yet supported`)}(this.rank);let a;a=`\n ${t} sourceLoc;\n ${t} coords = getOutputCoords();\n ${e.map(((e,t)=>`sourceLoc.${WT[t]} = start[${t}] + coords.${WT[t]};`)).join("\n")}\n `,this.userCode=`\n ${n}\n void main() {\n ${a}\n setOutput(getSource(${s}));\n }\n `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{null==this.startLoc&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),null==this.startLoc)||t.gl.uniform1iv(this.startLoc,e)}}}const WT=["x","y","z","w","u","v"];class VT{constructor(e){this.variableNames=["source"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.rank=e.length;const t=vC(this.rank),n=nS("coords",this.rank),s=nS("sourceLoc",this.rank),a=1===this.rank?"sourceLoc":`vec2(${s.slice(-2).join()})`,r=`getChannel(getSource(${s.join()}), ${a})`,i=`\n result.x = ${r};\n if (++${n[this.rank-1]} < ${e[this.rank-1]}) {\n ++${s[this.rank-1]};\n result.y = ${r};\n --${s[this.rank-1]};\n }\n `,o=1===this.rank?"":`\n --${n[this.rank-1]};\n if (++${n[this.rank-2]} < ${e[this.rank-2]}) {\n ++${s[this.rank-2]};\n result.z = ${r};\n if (++${n[this.rank-1]} < ${e[this.rank-1]}) {\n ++${s[this.rank-1]};\n result.w = ${r};\n }\n }\n `,l=this.rank<=4?`sourceLoc = coords +\n ${t}(${e.map(((e,t)=>`start[${t}]`)).join()});`:e.map(((e,t)=>`${s[t]} = ${n[t]} + start[${t}];`)).join("\n");this.userCode=`\n uniform int start[${this.rank}];\n void main() {\n ${t} coords = getOutputCoords();\n ${t} sourceLoc;\n ${l}\n vec4 result = vec4(0.);\n ${i}\n ${o}\n setOutput(result);\n }\n `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{null==this.startLoc&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),null==this.startLoc)||t.gl.uniform1iv(this.startLoc,e)}}}function UT(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{begin:r,size:i}=s,[o,l]=xr(a,r,i);if(rr(a,o,l),0===D(l))return n.makeTensorInfo(l,a.dtype,[]);if(n.shouldExecuteOnCPU([a])||"string"===a.dtype){const e=n.texData.get(a.dataId),t=qC(e.values,o,l,a.shape,a.dtype);return n.makeTensorInfo(l,a.dtype,t)}const{isPacked:u}=n.texData.get(a.dataId),c=yr(a.shape,o,l);if(u||!c){const e=ue().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new VT(l):new BT(l),t=e.getCustomSetupFunc(o);return n.runWebGLProgram(e,[a],a.dtype,t)}return n.uploadToGPU(a.dataId),function(e,t,n,s){const a=s.texData.get(e.dataId),r=s.makeTensorInfo(n,e.dtype),i=s.texData.get(r.dataId);Object.assign(i,a),i.refCount=1,i.shape=n,i.dtype=e.dtype;let o=br(t,Z(e.shape));a.slice&&(o+=a.slice.flatOffset),i.slice={flatOffset:o,origDataId:a.slice&&a.slice.origDataId||e.dataId};const l=s.dataRefCount.get(i.slice.origDataId)||1;return s.dataRefCount.set(i.slice.origDataId,l+1),r}(a,o,l,n)}const GT={kernelName:An,backendName:"webgl",kernelFunc:UT},HT={kernelName:De,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{blockShape:r,crops:i}=s;E(a.shape.length<=4,(()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet"));const o=r.reduce(((e,t)=>e*t)),l=lu(a.shape,r,o),u=uu(l.length,r.length),c=cu(a.shape,r,o),h=hu(i,r.length),p=pu(c,i,r.length),d=[],f=LS({inputs:{x:a},backend:n,attrs:{shape:l}}),m=KS({inputs:{x:f},backend:n,attrs:{perm:u}}),g=LS({inputs:{x:m},backend:n,attrs:{shape:c}}),y=UT({inputs:{x:g},backend:n,attrs:{begin:h,size:p}});return d.push(f),d.push(m),d.push(g),d.forEach((e=>n.disposeIntermediateTensorInfo(e))),y}},jT={kernelName:_e,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,weights:r}=t,{size:i}=s,o=n.readSync(a.dataId),l=n.readSync(r.dataId),u=SC(o,l,r.dtype,r.shape,i);return n.makeTensorInfo([i],r.dtype,u)}},KT=AS({opSnippet:"return float(a != b);",dtype:"bool"}),qT={kernelName:nn,backendName:"webgl",kernelFunc:KT};function XT(e){const{inputs:t,backend:n}=e,{input:s}=t;return xS({inputs:{x:n.texData.get(s.dataId).complexTensorInfos.real},backend:n})}const YT={kernelName:mn,backendName:"webgl",kernelFunc:XT},JT={kernelName:Oe,backendName:"webgl",kernelFunc:function e(t){const{inputs:n,backend:s,attrs:a}=t,{x:r}=n,{dtype:i}=a;if("complex64"===i){if("complex64"===r.dtype)return xS({inputs:{x:r},backend:s});const t=io(r.shape),n=e({inputs:{x:r},backend:s,attrs:{dtype:"float32"}}),a=kS({inputs:{real:n,imag:t},backend:s});return t.dispose(),s.disposeIntermediateTensorInfo(n),a}if("complex64"===r.dtype){const t=XT({inputs:{input:r},backend:s}),n=e({inputs:{x:t},backend:s,attrs:{dtype:i}});return s.disposeIntermediateTensorInfo(t),n}if(!G(r.dtype,i)){const e=xS({inputs:{x:r},backend:s});return{dataId:e.dataId,shape:e.shape,dtype:i}}if("int32"===i)return function(e,t){const n=new uS(e.shape,"return float(int(x));"),s=t.runWebGLProgram(n,[e],"int32");return{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}(r,s);if("bool"===i){const e=s.makeTensorInfo([],"bool",V("bool",1)),t=KT({inputs:{a:r,b:e},backend:s});return s.disposeIntermediateTensorInfo(e),t}throw new Error(`Error in Cast: failed to cast ${r.dtype} to ${i}`)}},ZT="return ceil(x);",QT=ES({opSnippet:ZT,packedOpSnippet:ZT,cpuKernelImpl:EC}),eE={kernelName:Me,backendName:"webgl",kernelFunc:QT};class tE{constructor(e){this.variableNames=["A"],this.outputShape=e,this.userCode="\n uniform float minVal;\n uniform float maxVal;\n\n void main() {\n float value = getAAtOutCoords();\n if (isnan(value)) {\n setOutput(value);\n return;\n }\n\n setOutput(clamp(value, minVal, maxVal));\n }\n "}getCustomSetupFunc(e,t){return(n,s)=>{null==this.minLoc&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class nE{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode="\n uniform float minVal;\n uniform float maxVal;\n\n void main() {\n vec4 value = getAAtOutCoords();\n\n if (any(isnan(value))) {\n setOutput(value);\n return;\n }\n\n setOutput(clamp(value, vec4(minVal), vec4(maxVal)));\n }\n "}getCustomSetupFunc(e,t){return(n,s)=>{null==this.minLoc&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}const sE={kernelName:Le,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{clipValueMin:r,clipValueMax:i}=s;let o;o=ue().getBool("WEBGL_PACK_CLIP")?new nE(a.shape):new tE(a.shape);const l=o.getCustomSetupFunc(r,i);return n.runWebGLProgram(o,[a],a.dtype,l)}};class aE{constructor(e){this.variableNames=["real","imag"],this.outputShape=e,this.userCode="\n void main() {\n float re = abs(getRealAtOutCoords());\n float im = abs(getImagAtOutCoords());\n float mx = max(re, im);\n\n // sadly the length function in glsl is not underflow-safe\n // (at least not on Intel GPUs). So the safe solution is\n // to ensure underflow-safety in all cases.\n setOutput(\n mx == 0.0 ? 0.0 : mx * length(vec2(1, min(re, im)/mx))\n );\n }\n "}}function rE(e,t){return{dataId:t.dataId,dtype:t.dtype,shape:e.shape}}const iE={kernelName:Pe,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s}=t,a=n.texData.get(s.dataId),r=new aE(s.shape),i=[rE(s,a.complexTensorInfos.real),rE(s,a.complexTensorInfos.imag)];return n.runWebGLProgram(r,i,i[0].dtype)}};class oE{constructor(e){this.outputShape=[],this.outputShape=au(e,1),this.variableNames=e.map(((e,t)=>`T${t}`));const t=new Array(e.length-1);t[0]=e[0][1];for(let n=1;n<t.length;n++)t[n]=t[n-1]+e[n][1];const n=[`if (yC < ${t[0]}) setOutput(getT0(yR, yC));`];for(let e=1;e<t.length;e++){const s=t[e-1];n.push(`else if (yC < ${t[e]}) setOutput(getT${e}(yR, yC-${s}));`)}const s=t.length,a=t[t.length-1];n.push(`else setOutput(getT${s}(yR, yC-${a}));`),this.userCode=`\n void main() {\n ivec2 coords = getOutputCoords();\n int yR = coords.x;\n int yC = coords.y;\n\n ${n.join("\n ")}\n }\n `}}class lE{constructor(e,t){this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[],this.outputShape=au(e,t);const n=this.outputShape,s=n.length,a=vC(s),r=nS("coords",s),i=["x","y","z","w","u","v"].slice(0,s);this.variableNames=e.map(((e,t)=>`T${t}`));const o=new Array(e.length-1);o[0]=e[0][t];for(let n=1;n<o.length;n++)o[n]=o[n-1]+e[n][t];const l=i[t],u=i.slice(-2),c=i.join();let h=`if (${l} < ${o[0]}) {\n return getChannel(\n getT0(${c}), vec2(${u.join()}));\n }`;for(let e=1;e<o.length;e++){const t=o[e-1];h+=`\n if (${l} < ${o[e]} && ${l} >= ${o[e-1]}) {\n return getChannel(\n getT${e}(${uE(i,l,t)}),\n vec2(${uE(u,l,t)}));\n }`}const p=o.length,d=o[o.length-1];h+=`\n return getChannel(\n getT${p}(${uE(i,l,d)}),\n vec2(${uE(u,l,d)}));`,this.userCode=`\n float getValue(${i.map((e=>"int "+e))}) {\n ${h}\n }\n\n void main() {\n ${a} coords = getOutputCoords();\n vec4 result = vec4(getValue(${r}), 0., 0., 0.);\n\n ${r[s-1]} = ${r[s-1]} + 1;\n if (${r[s-1]} < ${n[s-1]}) {\n result.g = getValue(${r});\n }\n\n ${r[s-2]} = ${r[s-2]} + 1;\n if (${r[s-2]} < ${n[s-2]}) {\n result.a = getValue(${r});\n }\n\n ${r[s-1]} = ${r[s-1]} - 1;\n if (${r[s-2]} < ${n[s-2]} &&\n ${r[s-1]} < ${n[s-1]}) {\n result.b = getValue(${r});\n }\n setOutput(result);\n }\n `}}function uE(e,t,n){const s=e.indexOf(t);return e.map(((e,t)=>t===s?`${e} - ${n}`:e)).join()}function cE(e){const{inputs:t,backend:n}=e,{input:s}=t;return xS({inputs:{x:n.texData.get(s.dataId).complexTensorInfos.imag},backend:n})}const hE={kernelName:Ct,backendName:"webgl",kernelFunc:cE};function pE(e,t,n){const s=e[0].dtype;if("complex64"===s){const s=e.map((e=>XT({inputs:{input:e},backend:n}))),a=e.map((e=>cE({inputs:{input:e},backend:n}))),r=pE(s,t,n),i=pE(a,t,n),o=kS({inputs:{real:r,imag:i},backend:n});return s.forEach((e=>n.disposeIntermediateTensorInfo(e))),a.forEach((e=>n.disposeIntermediateTensorInfo(e))),n.disposeIntermediateTensorInfo(r),n.disposeIntermediateTensorInfo(i),o}if("string"===s){const{tensors2D:a,outShape:r}=dE(e,t,n),i=a.map((e=>({vals:n.readSync(e.dataId),shape:e.shape}))),o=1===a[0].shape[0],l=AC(i,r,s,o),u=au(e.map((e=>e.shape)),t),c=n.makeTensorInfo(u,s,l);return a.forEach((e=>n.disposeIntermediateTensorInfo(e))),c}if(e.length>ue().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){const s=Math.floor(e.length/2),a=pE(e.slice(0,s),t,n),r=pE(e.slice(s),t,n),i=pE([a,r],t,n);return n.disposeIntermediateTensorInfo(a),n.disposeIntermediateTensorInfo(r),i}if(ue().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&e[0].shape.length>1){const a=new lE(e.map((e=>e.shape)),t);return n.runWebGLProgram(a,e,s)}const{tensors2D:a,outShape:r}=dE(e,t,n),i=new oE(a.map((e=>e.shape))),o=n.runWebGLProgram(i,a,s);a.forEach((e=>n.disposeIntermediateTensorInfo(e)));const l=LS({inputs:{x:o},attrs:{shape:r},backend:n});return n.disposeIntermediateTensorInfo(o),l}function dE(e,t,n){const s=au(e.map((e=>e.shape)),t);return{tensors2D:e.map((e=>LS({inputs:{x:e},attrs:{shape:[-1,D(e.shape.slice(t))]},backend:n}))),outShape:s}}function fE(e){const{inputs:t,backend:n,attrs:s}=e,{axis:a}=s,r=B(a,t[0].shape)[0],i=au(t.map((e=>e.shape)),r);if(0===D(i))return n.makeTensorInfo(i,t[0].dtype,[]);const o=t.filter((e=>D(e.shape)>0));return 1===o.length?xS({inputs:{x:o[0]},backend:n}):(su(o.map((e=>e.shape)),r),pE(o,r,n))}const mE={kernelName:Be,backendName:"webgl",kernelFunc:fE};class gE{constructor(e,t=!1,n=null,s=!1,a=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const r=e.padInfo.top,i=e.padInfo.left,o=e.strideHeight,l=e.strideWidth,u=e.dilationHeight,c=e.dilationWidth,h=e.filterHeight,p=e.filterWidth,d=4*Math.floor(e.inChannels/4),f=e.inChannels%4,m="channelsLast"===e.dataFormat,g=m?1:2,y=m?2:3,b=m?3:1;let x="",w="";n&&(x=s?`float activation(float a) {\n float b = getPreluActivationWeightsAtOutCoords();\n ${n}\n }`:a?`float activation(float a) {\n float b = getLeakyreluAlphaAtOutCoords();\n ${n}\n }`:`\n float activation(float x) {\n ${n}\n }\n `,w="result = activation(result);");const k=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),a&&this.variableNames.push("leakyreluAlpha"),this.userCode=`\n ${x}\n\n const ivec2 strides = ivec2(${o}, ${l});\n const ivec2 pads = ivec2(${r}, ${i});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d2 = coords[${b}];\n\n ivec2 xRCCorner =\n ivec2(coords[${g}], coords[${y}]) * strides - pads;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // Convolve x(?, ?, d1) with w(:, :, d1, d2) to get y(yR, yC, d2).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${h}; wR++) {\n int xR = xRCorner + wR * ${u};\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${p}; wC++) {\n int xC = xCCorner + wC * ${c};\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n for (int d1 = 0; d1 < ${d}; d1 += 4) {\n vec4 wValues = vec4(\n getW(wR, wC, d1, d2),\n getW(wR, wC, d1 + 1, d2),\n getW(wR, wC, d1 + 2, d2),\n getW(wR, wC, d1 + 3, d2)\n );\n\n if (${m}) {\n vec4 xValues = vec4(\n getX(batch, xR, xC, d1),\n getX(batch, xR, xC, d1 + 1),\n getX(batch, xR, xC, d1 + 2),\n getX(batch, xR, xC, d1 + 3)\n );\n dotProd += dot(xValues, wValues);\n } else {\n vec4 xValues = vec4(\n getX(batch, d1, xR, xC),\n getX(batch, d1 + 1, xR, xC),\n getX(batch, d1 + 2, xR, xC),\n getX(batch, d1 + 3, xR, xC)\n );\n dotProd += dot(xValues, wValues);\n }\n }\n\n if (${1===f}) {\n\n if (${m}) {\n dotProd +=\n getX(batch, xR, xC, ${d}) *\n getW(wR, wC, ${d}, d2);\n } else {\n dotProd +=\n getX(batch, ${d}, xR, xC) *\n getW(wR, wC, ${d}, d2);\n }\n\n } else if (${2===f}) {\n vec2 wValues = vec2(\n getW(wR, wC, ${d}, d2),\n getW(wR, wC, ${d} + 1, d2)\n );\n\n if (${m}) {\n vec2 xValues = vec2(\n getX(batch, xR, xC, ${d}),\n getX(batch, xR, xC, ${d} + 1)\n );\n dotProd += dot(xValues, wValues);\n } else {\n vec2 xValues = vec2(\n getX(batch, ${d}, xR, xC),\n getX(batch, ${d} + 1, xR, xC)\n );\n dotProd += dot(xValues, wValues);\n }\n\n } else if (${3===f}) {\n vec3 wValues = vec3(\n getW(wR, wC, ${d}, d2),\n getW(wR, wC, ${d} + 1, d2),\n getW(wR, wC, ${d} + 2, d2)\n );\n\n if (${m}) {\n vec3 xValues = vec3(\n getX(batch, xR, xC, ${d}),\n getX(batch, xR, xC, ${d} + 1),\n getX(batch, xR, xC, ${d} + 2)\n );\n dotProd += dot(xValues, wValues);\n } else {\n vec3 xValues = vec3(\n getX(batch, ${d}, xR, xC),\n getX(batch, ${d} + 1, xR, xC),\n getX(batch, ${d} + 2, xR, xC)\n );\n dotProd += dot(xValues, wValues);\n }\n\n }\n }\n }\n\n float result = dotProd;\n ${k}\n ${w}\n setOutput(result);\n }\n `}}class yE{constructor(e){this.variableNames=["x","W"],this.outputShape=e.outShape;const t=e.padInfo.front,n=e.padInfo.top,s=e.padInfo.left,a=e.strideDepth,r=e.strideHeight,i=e.strideWidth,o=e.dilationDepth,l=e.dilationHeight,u=e.dilationWidth,c=e.filterDepth,h=e.filterHeight,p=e.filterWidth,d=4*Math.floor(e.inChannels/4),f=e.inChannels%4;this.userCode=`\n const ivec3 strides = ivec3(${a}, ${r}, ${i});\n const ivec3 pads = ivec3(${t}, ${n}, ${s});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int d2 = coords.u;\n\n ivec3 xFRCCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads;\n int xFCorner = xFRCCorner.x;\n int xRCorner = xFRCCorner.y;\n int xCCorner = xFRCCorner.z;\n\n // Convolve x(?, ?, ?, d1) with w(:, :, :, d1, d2) to get\n // y(yF, yR, yC, d2). ? = to be determined. : = across all\n // values in that axis.\n float dotProd = 0.0;\n for (int wF = 0; wF < ${c}; wF++) {\n int xF = xFCorner + wF * ${o};\n\n if (xF < 0 || xF >= ${e.inDepth}) {\n continue;\n }\n\n for (int wR = 0; wR < ${h}; wR++) {\n int xR = xRCorner + wR * ${l};\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${p}; wC++) {\n int xC = xCCorner + wC * ${u};\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n for (int d1 = 0; d1 < ${d}; d1 += 4) {\n vec4 xValues = vec4(\n getX(batch, xF, xR, xC, d1),\n getX(batch, xF, xR, xC, d1 + 1),\n getX(batch, xF, xR, xC, d1 + 2),\n getX(batch, xF, xR, xC, d1 + 3)\n );\n vec4 wValues = vec4(\n getW(wF, wR, wC, d1, d2),\n getW(wF, wR, wC, d1 + 1, d2),\n getW(wF, wR, wC, d1 + 2, d2),\n getW(wF, wR, wC, d1 + 3, d2)\n );\n\n dotProd += dot(xValues, wValues);\n }\n\n if (${1===f}) {\n dotProd +=\n getX(batch, xF, xR, xC, ${d}) *\n getW(wF, wR, wC, ${d}, d2);\n } else if (${2===f}) {\n vec2 xValues = vec2(\n getX(batch, xF, xR, xC, ${d}),\n getX(batch, xF, xR, xC, ${d} + 1)\n );\n vec2 wValues = vec2(\n getW(wF, wR, wC, ${d}, d2),\n getW(wF, wR, wC, ${d} + 1, d2)\n );\n dotProd += dot(xValues, wValues);\n } else if (${3===f}) {\n vec3 xValues = vec3(\n getX(batch, xF, xR, xC, ${d}),\n getX(batch, xF, xR, xC, ${d} + 1),\n getX(batch, xF, xR, xC, ${d} + 2)\n );\n vec3 wValues = vec3(\n getW(wF, wR, wC, ${d}, d2),\n getW(wF, wR, wC, ${d} + 1, d2),\n getW(wF, wR, wC, ${d} + 2, d2)\n );\n dotProd += dot(xValues, wValues);\n }\n }\n }\n }\n setOutput(dotProd);\n }\n `}}class bE{constructor(e,t,n){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;const{filterWidth:s,inChannels:a,strideWidth:r,strideHeight:i,padInfo:o,outWidth:l,dilationWidth:u,dilationHeight:c,dataFormat:h}=n,{left:p,top:d}=o,f=a*s,m=X$(),g="channelsLast"===h,y=g?0:1,b=g?1:2;let x="";for(let n=0;n<=1;n++)for(let s=0;s<=1;s++)x+=`\n blockIndex = rc.y + ${s};\n pos = rc.x + ${n};\n\n if(blockIndex < ${e[1]} && pos < ${e[0]}) {\n offsetY = int(blockIndex / (${l})) * ${i} - ${d};\n d0 = offsetY + ${c} * (pos / ${f});\n\n if(d0 < ${t[y]} && d0 >= 0) {\n\n offsetX = int(mod(float(blockIndex), ${l}.) * ${r}. - ${p}.);\n d1 = offsetX + ${u} * (int(mod(float(pos), ${f}.) / ${a}.));\n\n if(d1 < ${t[b]} && d1 >= 0) {\n\n ch = int(mod(float(pos), ${a}.));\n\n if (${g}) {\n innerDims = vec2(d1, ch);\n result[${2*n+s}] = getChannel(\n getA(d0, int(innerDims.x),\n int(innerDims.y)), innerDims);\n } else {\n innerDims = vec2(d0, d1);\n result[${2*n+s}] = getChannel(\n getA(ch, int(innerDims.x),\n int(innerDims.y)), innerDims);\n }\n }\n }\n }\n `;this.userCode=`\n void main() {\n ivec2 rc = getOutputCoords();\n\n vec4 result = vec4(0);\n\n int blockIndex, pos, offsetY, d0, offsetX, d1, ch;\n vec2 innerDims;\n\n ${x}\n\n ${m.output} = result;\n }\n `}}function xE({x:e,filter:t,convInfo:n,backend:s,bias:a=null,preluActivationWeights:r=null,leakyreluAlpha:i=0,activation:o=null}){const l=e.shape,u=s.texData.get(e.dataId),c=n.inChannels,h=l[0]*l[1]*l[2],p=n.outChannels,d="channelsLast"===n.dataFormat;let f;const m=[],g=(1===h||1===p)&&c>1e3,y=l[2]%2!=0&&!!u.isPacked;if(!g&&ue().getBool("WEBGL_LAZILY_UNPACK")&&ue().getBool("WEBGL_PACK_BINARY_OPERATIONS")&&y){const c=d?l[0]*l[1]*(l[2]+1):l[0]*l[2]*(l[3]+1),h={dataId:e.dataId,shape:[1,c,n.inChannels],dtype:e.dtype},p=u.shape;u.shape=u.shape.slice(),u.shape[u.shape.length-2]++,E(W$(u.shape,h.shape),(()=>`packed reshape ${u.shape} to ${h.shape} isn't free`));const g=LS({inputs:{x:t},backend:s,attrs:{shape:[1,n.inChannels,n.outChannels]}});m.push(g);const y=XS({a:h,b:g,backend:s,transposeA:!1,transposeB:!1,bias:a,activation:o,preluActivationWeights:r,leakyreluAlpha:i}),b=s.texData.get(y.dataId);E(b.isPacked,(()=>"batchMatMul result is expected to be packed")),u.shape=p,b.shape=n.outShape,f=xS({inputs:{x:y},backend:s}),f.shape=n.outShape,m.push(y)}else{const u=LS({inputs:{x:e},backend:s,attrs:{shape:[1,d?l[0]*l[1]*l[2]:l[0]*l[2]*l[3],n.inChannels]}}),c=LS({inputs:{x:t},backend:s,attrs:{shape:[1,n.inChannels,n.outChannels]}}),h=XS({a:u,b:c,transposeA:!1,transposeB:!1,backend:s,bias:a,activation:o,preluActivationWeights:r,leakyreluAlpha:i});f=LS({inputs:{x:h},backend:s,attrs:{shape:n.outShape}}),m.push(u),m.push(c),m.push(h)}for(const e of m)s.disposeIntermediateTensorInfo(e);return f}function wE({x:e,filter:t,convInfo:n,backend:s,bias:a=null,preluActivationWeights:r=null,leakyreluAlpha:i=0,activation:o=null}){const{filterWidth:l,filterHeight:u,inChannels:c,outWidth:h,outHeight:p,dataFormat:d}=n,f="channelsLast"===d,m=l*u*c,g=p*h,y=[m,g],b=[],x=LS({inputs:{x:e},backend:s,attrs:{shape:e.shape.slice(1)}}),w=LS({inputs:{x:t},backend:s,attrs:{shape:[1,m,D(t.shape)/m]}});b.push(x),b.push(w);const k=new bE(y,x.shape,n),v=s.runWebGLProgram(k,[x],"float32"),N=LS({inputs:{x:v},backend:s,attrs:{shape:[1,y[0],y[1]]}});b.push(v),b.push(N);const I=null!=a,$=null!=r,C="leakyrelu"===o,S=o?RS(o,!0):null,T=new FS(N.shape,w.shape,[1,g,n.outChannels],!0,!1,I,S,$,C),E=[N,w];if(a&&E.push(a),$&&E.push(r),C){const e=s.makeTensorInfo([],"float32",gs(i,"float32"));E.push(e),b.push(e)}const A=s.runWebGLProgram(T,E,"float32"),R=LS({inputs:{x:A},backend:s,attrs:{shape:f?[1,p,h,n.outChannels]:[1,n.outChannels,p,h]}});b.push(A);for(const e of b)s.disposeIntermediateTensorInfo(e);return R}const kE={kernelName:We,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dataFormat:l,dilations:u,dimRoundingMode:c}=s,h=Jr(l),p=Vr(a.shape,r.shape,i,u,o,c,!1,h);let d;if(1!==p.filterHeight||1!==p.filterWidth||1!==p.dilationHeight||1!==p.dilationWidth||1!==p.strideHeight||1!==p.strideWidth||"SAME"!==p.padInfo.type&&"VALID"!==p.padInfo.type)if(ue().getBool("WEBGL_CONV_IM2COL")&&1===a.shape[0])d=wE({x:a,filter:r,convInfo:p,backend:n});else{const e=new gE(p);d=n.runWebGLProgram(e,[a,r],"float32")}else d=xE({x:a,filter:r,convInfo:p,backend:n});const f=LS({inputs:{x:d},backend:n,attrs:{shape:p.outShape}});return n.disposeIntermediateTensorInfo(d),f}};class vE{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,a=e.padInfo.left,r="channelsLast"===e.dataFormat;this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int wR = coords.x;\n int wC = coords.y;\n int d1 = coords.z;\n int d2 = coords.w;\n\n // Convolve x(?, ?, d1) with dy(:, :, d2) to get dw(wR, wC, d1, d2).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n\n for (int b = 0; b < ${e.batchSize}; b++) {\n for (int yR = 0; yR < ${e.outHeight}; yR++) {\n int xR = wR + yR * ${t} - ${s};\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int yC = 0; yC < ${e.outWidth}; yC++) {\n int xC = wC + yC * ${n} - ${a};\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n if (${r}) {\n float dyValue = getDy(b, yR, yC, d2);\n float xValue = getX(b, xR, xC, d1);\n dotProd += (xValue * dyValue);\n } else {\n float dyValue = getDy(b, d2, yR, yC);\n float xValue = getX(b, d1, xR, xC);\n dotProd += (xValue * dyValue);\n }\n\n }\n }\n }\n setOutput(dotProd);\n }\n `}}class NE{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,a=e.strideWidth,r="channelsLast"===e.dataFormat,i=t-1-e.padInfo.top,o=n-1-e.padInfo.left,l=r?1:2,u=r?2:3,c=r?3:1;this.userCode=`\n const ivec2 pads = ivec2(${i}, ${o});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d1 = coords[${c}];\n\n ivec2 dyCorner = ivec2(coords[${l}], coords[${u}]) - pads;\n int dyRCorner = dyCorner.x;\n int dyCCorner = dyCorner.y;\n\n // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${t}; wR++) {\n float dyR = float(dyRCorner + wR) / ${s}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n int wRPerm = ${t} - 1 - wR;\n\n for (int wC = 0; wC < ${n}; wC++) {\n float dyC = float(dyCCorner + wC) / ${a}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n int wCPerm = ${n} - 1 - wC;\n\n for (int d2 = 0; d2 < ${e.outChannels}; d2++) {\n\n if (${r}) {\n float xValue = getDy(batch, idyR, idyC, d2);\n float wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd += xValue * wValue;\n } else {\n float xValue = getDy(batch, d2, idyR, idyC);\n float wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd += xValue * wValue;\n }\n\n }\n }\n }\n setOutput(dotProd);\n }\n `}}class IE{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,a=e.padInfo.front,r=e.padInfo.top,i=e.padInfo.left;this.userCode=`\n void main() {\n ivec5 coords = getOutputCoords();\n int wF = coords.x;\n int wR = coords.y;\n int wC = coords.z;\n int d1 = coords.w;\n int d2 = coords.u;\n\n float dotProd = 0.0;\n\n for (int b = 0; b < ${e.batchSize}; b++) {\n for (int yF = 0; yF < ${e.outDepth}; yF++) {\n int xF = wF + yF * ${t} - ${a};\n\n if (xF < 0 || xF >= ${e.inDepth}) {\n continue;\n }\n\n for (int yR = 0; yR < ${e.outHeight}; yR++) {\n int xR = wR + yR * ${n} - ${r};\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int yC = 0; yC < ${e.outWidth}; yC++) {\n int xC = wC + yC * ${s} - ${i};\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n float dyValue = getDy(b, yF, yR, yC, d2);\n float xValue = getX(b, xF, xR, xC, d1);\n dotProd += (xValue * dyValue);\n }\n }\n }\n }\n setOutput(dotProd);\n }\n `}}class $E{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,a=e.strideDepth,r=e.strideHeight,i=e.strideWidth,o=t-1-e.padInfo.front,l=n-1-e.padInfo.top,u=s-1-e.padInfo.left;this.userCode=`\n const ivec3 pads = ivec3(${o}, ${l}, ${u});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int d1 = coords.u;\n\n\n ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads;\n int dyFCorner = dyCorner.x;\n int dyRCorner = dyCorner.y;\n int dyCCorner = dyCorner.z;\n\n float dotProd = 0.0;\n for (int wF = 0; wF < ${t}; wF++) {\n float dyF = float(dyFCorner + wF) / ${a}.0;\n\n if (dyF < 0.0 || dyF >= ${e.outDepth}.0 || fract(dyF) > 0.0) {\n continue;\n }\n int idyF = int(dyF);\n\n int wFPerm = ${t} - 1 - wF;\n\n for (int wR = 0; wR < ${n}; wR++) {\n float dyR = float(dyRCorner + wR) / ${r}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 ||\n fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n int wRPerm = ${n} - 1 - wR;\n\n for (int wC = 0; wC < ${s}; wC++) {\n float dyC = float(dyCCorner + wC) / ${i}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n int wCPerm = ${s} - 1 - wC;\n\n for (int d2 = 0; d2 < ${e.outChannels}; d2++) {\n float xValue = getDy(batch, idyF, idyR, idyC, d2);\n float wValue = getW(wFPerm, wRPerm, wCPerm, d1, d2);\n dotProd += xValue * wValue;\n }\n }\n }\n }\n setOutput(dotProd);\n }\n `}}const CE={kernelName:Ve,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,dy:r}=t,{strides:i,pad:o,dataFormat:l,dimRoundingMode:u,filterShape:c}=s,h=Jr(l),p=Vr(a.shape,c,i,1,o,u,!1,h),d=new vE(p);return n.runWebGLProgram(d,[a,r],"float32")}},SE={kernelName:Ue,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,filter:r}=t,{inputShape:i,strides:o,pad:l,dataFormat:u,dimRoundingMode:c}=s,h=Jr(u),p=Vr(i,r.shape,o,1,l,c,!1,h),d=new NE(p);return n.runWebGLProgram(d,[a,r],"float32")}},TE={kernelName:Ge,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dilations:l}=s,u=Ur(a.shape,r.shape,i,l,o),c=new yE(u);return n.runWebGLProgram(c,[a,r],"float32")}},EE={kernelName:He,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,dy:r}=t,{strides:i,pad:o,filterShape:l}=s,u=Ur(a.shape,l,i,1,o),c=new IE(u);return n.runWebGLProgram(c,[a,r],"float32")}},AE={kernelName:je,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,filter:r}=t,{pad:i,strides:o,inputShape:l}=s,u=Ur(l,r.shape,o,1,i),c=new $E(u);return n.runWebGLProgram(c,[a,r],"float32")}},RE=ES({opSnippet:"if (isnan(x)) return x;\n return cos(x);\n"}),FE={kernelName:Ke,backendName:"webgl",kernelFunc:RE},DE=ES({opSnippet:"\n float e2x = exp(-x);\n return (e2x + 1.0 / e2x) / 2.0;\n"}),_E={kernelName:qe,backendName:"webgl",kernelFunc:DE};class OE{constructor(e,t,n,s,a){this.variableNames=["Image","Boxes","BoxInd"],this.outputShape=[];const[r,i,o,l]=e,[u]=t,[c,h]=n;this.outputShape=[u,c,h,l];const p="bilinear"===s?1:0,[d,f]=[i-1+".0",o-1+".0"],[m,g,y]=c>1?[""+(i-1)/(c-1),"(y2-y1) * height_ratio",`y1*${d} + float(y)*(height_scale)`]:["0.0","0.0",`0.5 * (y1+y2) * ${d}`],[b,x,w]=h>1?[""+(o-1)/(h-1),"(x2-x1) * width_ratio",`x1*${f} + float(x)*(width_scale)`]:["0.0","0.0",`0.5 * (x1+x2) * ${f}`];this.userCode=`\n const float height_ratio = float(${m});\n const float width_ratio = float(${b});\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int y = coords[1];\n int x = coords[2];\n int d = coords[3];\n\n // get box vals\n float y1 = getBoxes(b,0);\n float x1 = getBoxes(b,1);\n float y2 = getBoxes(b,2);\n float x2 = getBoxes(b,3);\n\n // get image in batch index\n int bInd = round(getBoxInd(b));\n if(bInd < 0 || bInd >= ${r}) {\n return;\n }\n\n float height_scale = ${g};\n float width_scale = ${x};\n\n float in_y = ${y};\n if( in_y < 0.0 || in_y > ${d} ) {\n setOutput(float(${a}));\n return;\n }\n float in_x = ${w};\n if( in_x < 0.0 || in_x > ${f} ) {\n setOutput(float(${a}));\n return;\n }\n\n vec2 sourceFracIndexCR = vec2(in_x,in_y);\n if(${p} == 1) {\n // Compute the four integer indices.\n ivec2 sourceFloorCR = ivec2(sourceFracIndexCR);\n ivec2 sourceCeilCR = ivec2(ceil(sourceFracIndexCR));\n\n float topLeft = getImage(b, sourceFloorCR.y, sourceFloorCR.x, d);\n float bottomLeft = getImage(b, sourceCeilCR.y, sourceFloorCR.x, d);\n float topRight = getImage(b, sourceFloorCR.y, sourceCeilCR.x, d);\n float bottomRight = getImage(b, sourceCeilCR.y, sourceCeilCR.x, d);\n\n vec2 fracCR = sourceFracIndexCR - vec2(sourceFloorCR);\n\n float top = topLeft + (topRight - topLeft) * fracCR.x;\n float bottom = bottomLeft + (bottomRight - bottomLeft) * fracCR.x;\n float newValue = top + (bottom - top) * fracCR.y;\n setOutput(newValue);\n } else {\n // Compute the coordinators of nearest neighbor point.\n ivec2 sourceNearestCR = ivec2(floor(\n sourceFracIndexCR + vec2(0.5,0.5)));\n float newValue = getImage(b, sourceNearestCR.y, sourceNearestCR.x, d);\n setOutput(newValue);\n }\n }\n `}}const ME={kernelName:Ye,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n,attrs:s}=e,{image:a,boxes:r,boxInd:i}=t,{cropSize:o,method:l,extrapolationValue:u}=s,c=new OE(a.shape,r.shape,o,l,u);return n.runWebGLProgram(c,[a,r,i],"float32")}};class LE{constructor(e,t,n){this.variableNames=["x"],this.outputShape=e;const s=e.length,a=t?"0.0":`getX(${zE(s,"coords")})`,r=e[e.length-1];let i="",o="";t?(i=n?"end != "+(r-1):"end != 0",o=n?"end + 1":"end - 1"):(i=n?`end + pow2 < ${r}`:"end >= pow2",o=n?"end + pow2":"end - pow2"),this.userCode=`\n uniform float index;\n void main() {\n ${vC(s)} coords = getOutputCoords();\n int end = ${PE(s,"coords")};\n float val = ${a};\n int pow2 = int(pow(2.0, index));\n if (${i}) {\n int idx = ${o};\n ${PE(s,"coords")} = idx;\n val += getX(${zE(s,"coords")});\n }\n setOutput(val);\n }\n `}getCustomSetupFunc(e){return(t,n)=>{null==this.index&&(this.index=t.getUniformLocation(n,"index")),t.gl.uniform1f(this.index,e)}}}function zE(e,t){if(1===e)return`${t}`;if(2===e)return`${t}.x, ${t}.y`;if(3===e)return`${t}.x, ${t}.y, ${t}.z`;if(4===e)return`${t}.x, ${t}.y, ${t}.z, ${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}function PE(e,t){if(1===e)return`${t}`;if(2===e)return`${t}.y`;if(3===e)return`${t}.z`;if(4===e)return`${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}const BE={kernelName:Xe,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,exclusive:i,reverse:o}=s,l=a.shape.length,u=Zi([r],l);let c=a;null!=u&&(c=KS({inputs:{x:a},backend:n,attrs:{perm:u}}));const h=eo(1,l)[0];if(h!==l-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${a.shape.length-1} but got axis=${r}`);const p=c.shape[h];let d=xS({inputs:{x:c},backend:n});for(let e=0;e<=Math.ceil(Math.log2(p))-1;e++){const t=new LE(c.shape,!1,o),s=t.getCustomSetupFunc(e),a=d;d=n.runWebGLProgram(t,[d],d.dtype,s),n.disposeIntermediateTensorInfo(a)}if(i){const e=new LE(c.shape,i,o),t=d;d=n.runWebGLProgram(e,[d],d.dtype),n.disposeIntermediateTensorInfo(t)}if(null!=u){const e=KS({inputs:{x:d},backend:n,attrs:{perm:Qi(u)}});return n.disposeIntermediateTensorInfo(d),n.disposeIntermediateTensorInfo(c),e}return d}},WE={kernelName:Je,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,weights:r}=t,{size:i,binaryOutput:o}=s;if(1===a.shape.length){const e=n.readSync(a.dataId),t=n.readSync(r.dataId),s=SC(e,t,r.dtype,r.shape,i);return n.makeTensorInfo([i],r.dtype,s)}if(2===a.shape.length){const e=n.bufferSync(a),t=n.bufferSync(r),s=TC(e,t,i,o);return n.makeTensorInfo(s.shape,r.dtype,s.values)}throw new Error(`Error in denseBincount: input must be at most rank 2, but got rank${a.shape.length}.`)}};class VE{constructor(e,t,n){this.variableNames=["x"],this.outputShape=[],this.outputShape=e,this.blockSize=t,this.dataFormat=n,this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int h = ${this.getHeightCoordString()};\n int w = ${this.getWidthCoordString()};\n int d = ${this.getDepthCoordString()};\n\n int in_h = h / ${t};\n int offset_h = imod(h, ${t});\n int in_w = w / ${t};\n int offset_w = imod(w, ${t});\n int offset_d = (offset_h * ${t} + offset_w) *\n ${this.getOutputDepthSize()};\n int in_d = d + offset_d;\n\n float result = ${this.getInputSamplingString()};\n setOutput(result);\n }\n `}getHeightCoordString(){return"NHWC"===this.dataFormat?"coords[1]":"coords[2]"}getWidthCoordString(){return"NHWC"===this.dataFormat?"coords[2]":"coords[3]"}getDepthCoordString(){return"NHWC"===this.dataFormat?"coords[3]":"coords[1]"}getOutputDepthSize(){return"NHWC"===this.dataFormat?this.outputShape[3]:this.outputShape[1]}getInputSamplingString(){return"NHWC"===this.dataFormat?"getX(b, in_h, in_w, in_d)":"getX(b, in_d, in_h, in_w)"}}const UE={kernelName:Ze,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{blockSize:r,dataFormat:i}=s;E(r>1,(()=>`blockSize should be > 1 for depthToSpace, but was: ${r}`));const o=a.shape[0],l="NHWC"===i?a.shape[1]:a.shape[2],u="NHWC"===i?a.shape[2]:a.shape[3],c="NHWC"===i?a.shape[3]:a.shape[1],h=l*r,p=u*r,d=c/(r*r),f=new VE("NHWC"===i?[o,h,p,d]:[o,d,h,p],r,i);return n.runWebGLProgram(f,[a],a.dtype)}};class GE{constructor(e,t=!1,n=null,s=!1,a=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const r=e.inHeight,i=e.inWidth,o=e.padInfo.top,l=e.padInfo.left,u=e.strideHeight,c=e.strideWidth,h=e.dilationHeight,p=e.dilationWidth,d=e.filterHeight,f=e.filterWidth,m=e.outChannels/e.inChannels;let g="",y="";n&&(g=s?`float activation(float a) {\n float b = getPreluActivationWeightsAtOutCoords();\n ${n}\n }`:a?`float activation(float a) {\n float b = getLeakyreluAlphaAtOutCoords();\n ${n}\n }`:`\n float activation(float x) {\n ${n}\n }\n `,y="result = activation(result);");const b=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),a&&this.variableNames.push("leakyreluAlpha"),this.userCode=`\n ${g}\n\n const ivec2 strides = ivec2(${u}, ${c});\n const ivec2 pads = ivec2(${o}, ${l});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n ivec2 xRCCorner = coords.yz * strides - pads;\n int d2 = coords.w;\n int d1 = d2 / ${m};\n int q = d2 - d1 * ${m};\n\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // Convolve x(?, ?, d1) with w(:, :, d1, q) to get y(yR, yC, d2).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n // TO DO(dsmilkov): Flatten the two for loops and vec4 the operations.\n for (int wR = 0; wR < ${d}; wR++) {\n int xR = xRCorner + wR * ${h};\n\n if (xR < 0 || xR >= ${r}) {\n continue;\n }\n\n for (int wC = 0; wC < ${f}; wC++) {\n int xC = xCCorner + wC * ${p};\n\n if (xC < 0 || xC >= ${i}) {\n continue;\n }\n\n float xVal = getX(batch, xR, xC, d1);\n float wVal = getW(wR, wC, d1, q);\n dotProd += xVal * wVal;\n }\n }\n\n float result = dotProd;\n ${b}\n ${y}\n setOutput(result);\n }\n `}}class HE{constructor(e,t=!1,n=null,s=!1,a=!1){this.variableNames=["x","W"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e.outShape;const r=e.inHeight,i=e.inWidth,o=e.padInfo.top,l=e.padInfo.left,u=e.strideHeight,c=e.strideWidth,h=e.dilationHeight,p=e.dilationWidth,d=e.filterHeight,f=e.filterWidth,m=f;let g="int xR; int xC; int xCOffset;";for(let e=0;e<d;e++)for(let t=0;t<f;t++)g+=`\n vec4 xTexelR${e}C${2*t} = vec4(0.);\n vec4 wR${e}C${t} = vec4(0.);\n vec4 xR${e}C${t} = vec4(0.);`;for(let e=0;e<d;e++)for(let t=0;t<m;t++){const n=2*t;if(g+=`\n xR = xRCorner + ${e*h};\n xC = xCCorner + ${n*p};\n `,1===c){if(n<f&&(g+=l%2==1?`\n xCOffset = xC + 1;\n if(xR >= 0 && xR < ${r} && xCOffset >= 0 && xCOffset < ${i}) {\n xTexelR${e}C${n} = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if(xCOffset + 1 >= ${i}) {\n xTexelR${e}C${n}.zw = vec2(0.);\n }\n } else {\n xTexelR${e}C${n} = vec4(0.);\n }\n\n xCOffset = xC + 1 - 2;\n if(xR >= 0 && xR < ${r} && xCOffset >= 0 && xCOffset < ${i}) {\n vec4 previous = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if(xCOffset + 1 >= ${i}) {\n previous.zw = vec2(0.);\n }\n\n xR${e}C${n} = vec4(previous.zw, xTexelR${e}C${n}.xy);\n } else {\n xR${e}C${n} = vec4(0, 0, xTexelR${e}C${n}.xy);\n }\n `:`\n if(xR >= 0 && xR < ${r} && xC >= 0 && xC < ${i}) {\n xTexelR${e}C${n} = getX(batch, xR, xC, d1);\n } else {\n xTexelR${e}C${n} = vec4(0.);\n }\n\n xR${e}C${n} = xTexelR${e}C${n};\n `,n+1<f)){const t=l%2==0?T(p):p;p%2==0&&l%2==1||p%2!=0&&l%2!=1?(g+=`\n xCOffset = xC + ${l%2} + ${t};\n\n if(xR >= 0 && xR < ${r} &&\n xCOffset >= 0 && xCOffset < ${i}) {\n xTexelR${e}C${n+2} = getX(batch, xR, xCOffset, d1);\n }\n `,p>1&&(g+=`\n xCOffset -= 2;\n if(xR >= 0 && xR < ${r} &&\n xCOffset >= 0 && xCOffset < ${i}) {\n xTexelR${e}C${n} = getX(batch, xR, xCOffset, d1);\n } else {\n xTexelR${e}C${n} = vec4(0.);\n }\n `),g+=`\n xR${e}C${n+1} = vec4(\n xTexelR${e}C${n}.zw, xTexelR${e}C${n+2}.xy);\n `):g+=`\n xCOffset = xC + ${t};\n\n if(xR >= 0 && xR < ${r} &&\n xCOffset >= 0 && xCOffset < ${i}) {\n xTexelR${e}C${n+2} = getX(batch, xR, xCOffset, d1);\n }\n\n xR${e}C${n+1} = xTexelR${e}C${n+2};\n `}}else n<f&&(g+=`\n if(xR >= 0 && xR < ${r}) {\n `,l%2==1?(g+=`\n xCOffset = xC + 1 - ${c};\n if(xCOffset >= 0 && xCOffset < ${i}) {\n xTexelR${e}C${n} = getX(batch, xR, xCOffset, d1);\n } else {\n xTexelR${e}C${n} = vec4(0.);\n }\n\n if(xC + 1 >= 0 && xC + 1 < ${i}) {\n xTexelR${e}C${n+2} = getX(batch, xR, xC + 1, d1);\n } else {\n xTexelR${e}C${n+2} = vec4(0.);\n }\n\n xR${e}C${n} = vec4(\n xTexelR${e}C${n}.zw, xTexelR${e}C${n+2}.zw);\n `,n+1<f&&(g+=`\n vec4 final = vec4(0.);\n xCOffset = xC + 1 + ${c};\n if(xCOffset >= 0 && xCOffset < ${i}) {\n final = getX(batch, xR, xCOffset, d1);\n }\n xR${e}C${n+1} = vec4(xTexelR${e}C${n+2}.xy, final.xy);\n `)):(g+=`\n if(xC >= 0 && xC < ${i}) {\n xTexelR${e}C${n} = getX(batch, xR, xC, d1);\n } else {\n xTexelR${e}C${n} = vec4(0.);\n }\n\n xCOffset = xC + ${c};\n if(xCOffset >= 0 && xCOffset < ${i}) {\n xTexelR${e}C${n+2} = getX(batch, xR, xCOffset, d1);\n } else {\n xTexelR${e}C${n+2} = vec4(0.);\n }\n\n xR${e}C${n} = vec4(\n xTexelR${e}C${n}.xy, xTexelR${e}C${n+2}.xy);\n `,n+1<f&&(g+=`\n xR${e}C${n+1} = vec4(\n xTexelR${e}C${n}.zw, xTexelR${e}C${n+2}.zw);\n `)),g+="}");n<f&&(g+=`\n vec4 wTexelR${e}C${n} = getW(${e}, ${n}, d1, q);\n wR${e}C${n} = vec4(wTexelR${e}C${n}.xz, wTexelR${e}C${n}.xz);\n `,n+1<f&&(g+=`\n vec4 wTexelR${e}C${n+1} = getW(${e}, ${n+1}, d1, q);\n wR${e}C${n+1} =\n vec4(wTexelR${e}C${n+1}.xz, wTexelR${e}C${n+1}.xz);`))}for(let e=0;e<d;e++)for(let t=0;t<f;t++)g+=`dotProd += xR${e}C${t} * wR${e}C${t};`;let y="",b="";n&&(y=s?`vec4 activation(vec4 a) {\n vec4 b = getPreluActivationWeightsAtOutCoords();\n ${n}\n }`:a?`vec4 activation(vec4 a) {\n vec4 b = getLeakyreluAlphaAtOutCoords();\n ${n}\n }`:`vec4 activation(vec4 x) {\n ${n}\n }`,b="result = activation(result);");const x=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),a&&this.variableNames.push("leakyreluAlpha"),this.userCode=`\n ${y}\n\n const ivec2 strides = ivec2(${u}, ${c});\n const ivec2 pads = ivec2(${o}, ${l});\n\n void main() {\n\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n ivec2 xRCCorner = coords.yz * strides - pads;\n int d2 = coords.w;\n int d1 = d2;\n int q = 0;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n vec4 dotProd = vec4(0.);\n\n ${g}\n\n vec4 result = dotProd;\n ${x}\n ${b}\n setOutput(result);\n }\n `}}const jE={kernelName:Qe,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dilations:l,dimRoundingMode:u}=s;let c=l;null==c&&(c=[1,1]),E(Yr(i,c),(()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${c}'`));const h=Vr(a.shape,r.shape,i,c,o,u,!0);let p;return p=ue().getBool("WEBGL_PACK_DEPTHWISECONV")&&h.strideWidth<=2&&h.outChannels/h.inChannels==1?new HE(h):new GE(h),n.runWebGLProgram(p,[a,r],"float32")}};class KE{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,a=e.padInfo.left,r=e.outChannels/e.inChannels;this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int wR = coords.x;\n int wC = coords.y;\n int d1 = coords.z;\n int dm = coords.w;\n int d2 = d1 * ${r} + dm;\n\n float dotProd = 0.0;\n\n // TO DO: Vec4 over the batch size\n for (int b = 0; b < ${e.batchSize}; b++) {\n for (int yR = 0; yR < ${e.outHeight}; yR++) {\n int xR = wR + yR * ${t} - ${s};\n\n if (xR < 0 || xR >= ${e.inHeight}) {\n continue;\n }\n\n for (int yC = 0; yC < ${e.outWidth}; yC++) {\n int xC = wC + yC * ${n} - ${a};\n\n if (xC < 0 || xC >= ${e.inWidth}) {\n continue;\n }\n\n float dyValue = getDy(b, yR, yC, d2);\n float xValue = getX(b, xR, xC, d1);\n dotProd += (xValue * dyValue);\n }\n }\n }\n setOutput(dotProd);\n }\n `}}class qE{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,a=e.strideWidth,r=t-1-e.padInfo.top,i=n-1-e.padInfo.left,o=e.outChannels/e.inChannels;this.userCode=`\n const ivec2 pads = ivec2(${r}, ${i});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d1 = coords[3];\n ivec2 dyCorner = coords.yz - pads;\n int dyRCorner = dyCorner.x;\n int dyCCorner = dyCorner.y;\n\n float dotProd = 0.0;\n\n for (int wR = 0; wR < ${t}; wR++) {\n float dyR = float(dyRCorner + wR) / ${s}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n int wRPerm = ${t} - 1 - wR;\n\n for (int wC = 0; wC < ${n}; wC++) {\n float dyC = float(dyCCorner + wC) / ${a}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n int wCPerm = ${n} - 1 - wC;\n\n // TO DO: Vec4 over the channelMul\n for (int dm = 0; dm < ${o}; dm++) {\n int d2 = d1 * ${o} + dm;\n float xValue = getDy(batch, idyR, idyC, d2);\n float wValue = getW(wRPerm, wCPerm, d1, dm);\n dotProd += xValue * wValue;\n }\n }\n }\n setOutput(dotProd);\n }\n `}}const XE={kernelName:et,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,dy:r}=t,{strides:i,dilations:o,pad:l,dimRoundingMode:u,filterShape:c}=s,h=Vr(a.shape,c,i,o,l,u,!0),p=new KE(h);return n.runWebGLProgram(p,[a,r],"float32")}},YE={kernelName:tt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,filter:r}=t,{strides:i,dilations:o,pad:l,dimRoundingMode:u,inputShape:c}=s,h=Vr(c,r.shape,i,o,l,u,!0),p=new qE(h);return n.runWebGLProgram(p,[a,r],"float32")}};class JE{constructor(e){this.variableNames=["X"],this.outputShape=[e,e],this.userCode="\n void main() {\n ivec2 coords = getOutputCoords();\n float val = coords[0] == coords[1] ? getX(coords[0]) : 0.0;\n setOutput(val);\n }\n "}}const ZE={kernelName:nt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s}=t,a=[...s.shape,...s.shape],r=D(s.shape),i=LS({inputs:{x:s},backend:n,attrs:{shape:[r]}}),o=new JE(r),l=n.runWebGLProgram(o,[i],i.dtype),u=LS({inputs:{x:l},backend:n,attrs:{shape:a}});return n.disposeIntermediateTensorInfo(i),n.disposeIntermediateTensorInfo(l),u}};class QE{constructor(e){this.variableNames=["x","W"],this.outputShape=e.outShape;const{inHeight:t,inWidth:n,padInfo:s,strideHeight:a,strideWidth:r,filterHeight:i,filterWidth:o,dilationHeight:l,dilationWidth:u}=e,{top:c,left:h}=s;this.userCode=`\n const ivec2 strides = ivec2(${a}, ${r});\n const ivec2 pads = ivec2(${c}, ${h});\n const float neg_infinity = -3.4e38;\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n int d1 = coords.w;\n ivec2 outTopLeftCorner =\n coords.yz * strides - pads;\n int hBeg = outTopLeftCorner.x;\n int wBeg = outTopLeftCorner.y;\n\n float curVal = neg_infinity;\n for (int h = 0; h < ${i}; h++) {\n int hIn = hBeg + h * ${l};\n\n if (hIn >= 0 && hIn < ${t}) {\n for (int w = 0; w < ${o}; w++) {\n int wIn = wBeg + w * ${u};\n\n if (wIn >= 0 && wIn < ${n}) {\n float xVal = getX(batch, hIn, wIn, d1);\n float wVal = getW(h, w, d1);\n\n float val = xVal + wVal;\n if (val > curVal) {\n curVal = val;\n }\n }\n }\n }\n }\n\n float result = curVal;\n setOutput(result);\n }\n `}}const eA={kernelName:st,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r}=t,{strides:i,pad:o,dilations:l}=s,u=Pr(a.shape,r.shape,i,o,"NHWC",l);let c;const h=new QE(u);c=n.runWebGLProgram(h,[a,r],"float32");const p=LS({inputs:{x:c},backend:n,attrs:{shape:u.outShape}});return n.disposeIntermediateTensorInfo(c),p}},tA=ES({opSnippet:"return (x >= 0.0) ? x : (exp(x) - 1.0);",packedOpSnippet:"\n vec4 result;\n\n result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0);\n result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0);\n result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0);\n result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0);\n\n return result;\n"}),nA={kernelName:ot,backendName:"webgl",kernelFunc:tA},sA={kernelName:lt,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n}=e,{dy:s,y:a}=t,r=ue().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new bS("\n vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.)));\n return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0))));\n",s.shape,a.shape):new yS("return (b >= 1.0) ? a : a * (b + 1.0);",s.shape,a.shape);return n.runWebGLProgram(r,[s,a],s.dtype)}},aA=AS({opSnippet:"return float(a == b);",packedOpSnippet:"\n return vec4(equal(a, b));\n",dtype:"bool"}),rA={kernelName:ct,backendName:"webgl",kernelFunc:aA},iA=ES({opSnippet:`\n // Error function is calculated approximately with elementary function.\n // See "Handbook of Mathematical Functions with Formulas,\n // Graphs, and Mathematical Tables", Abramowitz and Stegun.\n float p = ${xu};\n float a1 = ${wu};\n float a2 = ${ku};\n float a3 = ${vu};\n float a4 = ${Nu};\n float a5 = ${Iu};\n\n float sign = sign(x);\n x = abs(x);\n float t = 1.0 / (1.0 + p * x);\n return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x));\n`}),oA={kernelName:ut,backendName:"webgl",kernelFunc:iA},lA="return exp(x);",uA=ES({opSnippet:lA,packedOpSnippet:lA,cpuKernelImpl:RC}),cA={kernelName:ht,backendName:"webgl",kernelFunc:uA};function hA(e){const{inputs:t,attrs:n,backend:s}=e,{dim:a}=n,{input:r}=t,i=r.shape.length,o=r.shape.slice();let l=a;return a<0&&(E(-(i+1)<=a,(()=>`Axis must be in the interval [${-(i+1)}, ${i}]`)),l=i+a+1),o.splice(l,0,1),LS({inputs:{x:r},backend:s,attrs:{shape:o}})}const pA={kernelName:pt,backendName:"webgl",kernelFunc:hA},dA="return exp(x) - 1.0;",fA=ES({opSnippet:dA,packedOpSnippet:dA,cpuKernelImpl:FC}),mA={kernelName:dt,backendName:"webgl",kernelFunc:fA};class gA{constructor(e,t,n){this.variableNames=["real","imag"];const s=t[1];this.outputShape=t;const a=n?`2.0 * ${Math.PI}`:`-2.0 * ${Math.PI}`,r=n?`${s}.0`:"1.0";let i;if("real"===e)i="return real * expR - imag * expI;";else{if("imag"!==e)throw new Error(`FFT component must be either "real" or "imag", got ${e}.`);i="return real * expI + imag * expR;"}this.userCode=`\n const float exponentMultiplier = ${a};\n\n float unaryOpComplex(float real, float expR, float imag, float expI) {\n ${i}\n }\n\n float mulMatDFT(int batch, int index) {\n float indexRatio = float(index) / float(${s});\n float exponentMultiplierTimesIndexRatio =\n exponentMultiplier * indexRatio;\n\n float result = 0.0;\n\n for (int i = 0; i < ${s}; i++) {\n // x = (-2|2 * PI / N) * index * i;\n float x = exponentMultiplierTimesIndexRatio * float(i);\n float expR = cos(x);\n float expI = sin(x);\n float real = getReal(batch, i);\n float imag = getImag(batch, i);\n\n result +=\n unaryOpComplex(real, expR, imag, expI) / ${r};\n }\n\n return result;\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n setOutput(mulMatDFT(coords[0], coords[1]));\n }\n `}}function yA(e,t,n){const s=n.texData.get(e.dataId),a=D(e.shape),r=e.shape[e.shape.length-1],i=LS({inputs:{x:e},backend:n,attrs:{shape:[a/r,r]}}),o=i.shape,l=new gA("real",o,t),u=new gA("imag",o,t),c=[{dataId:s.complexTensorInfos.real.dataId,dtype:s.complexTensorInfos.real.dtype,shape:o},{dataId:s.complexTensorInfos.imag.dataId,dtype:s.complexTensorInfos.imag.dtype,shape:o}],h=n.runWebGLProgram(l,c,"float32"),p=n.runWebGLProgram(u,c,"float32"),d=kS({inputs:{real:h,imag:p},backend:n});n.disposeIntermediateTensorInfo(h),n.disposeIntermediateTensorInfo(p);const f=LS({inputs:{x:d},backend:n,attrs:{shape:e.shape}});return n.disposeIntermediateTensorInfo(i),n.disposeIntermediateTensorInfo(d),f}const bA={kernelName:ft,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{input:s}=t;return yA(s,!1,n)}};class xA{constructor(e,t){this.outputShape=[],this.variableNames=["x"],this.outputShape=e,this.userCode="\n uniform float value;\n void main() {\n // Input can be obtained from uniform value.\n setOutput(value);\n }\n "}getCustomSetupFunc(e){return(t,n)=>{null==this.valueLoc&&(this.valueLoc=t.getUniformLocationNoThrow(n,"value")),t.gl.uniform1f(this.valueLoc,e)}}}function wA(e){const{backend:t,attrs:n}=e,{shape:s,value:a}=n;let{dtype:r}=n;if(r=r||X(a),"string"===r){const e=U(r,D(s));return e.fill(a),t.makeTensorInfo(s,r,e)}{const e=new xA(s,a),n=e.getCustomSetupFunc(a);return t.runWebGLProgram(e,[],r,n)}}const kA={kernelName:mt,backendName:"webgl",kernelFunc:wA};class vA{constructor(e){this.variableNames=["Image"],this.outputShape=[];const t=e[2];this.outputShape=e,this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int x = coords[2];\n\n int coordX = ${t} - x;\n float outputValue;\n if(coordX >= 0 && coordX < ${t}) {\n outputValue = getImage(coords[0], coords[1], coordX, coords[3]);\n } else {\n outputValue = getImage(coords[0], coords[1], coords[2], coords[3]);\n }\n setOutput(outputValue);\n }\n `}}const NA={kernelName:gt,backendName:"webgl",kernelFunc:({inputs:e,backend:t})=>{const{image:n}=e,s=t,a=new vA(n.shape);return s.runWebGLProgram(a,[n],n.dtype)}},IA="return floor(x);",$A=ES({opSnippet:IA,packedOpSnippet:IA,cpuKernelImpl:DC}),CA={kernelName:yt,backendName:"webgl",kernelFunc:$A},SA=AS({opSnippet:"\n float s = sign(a) * sign(b);\n int ia = round(a);\n int ib = round(b);\n if (ib != 0) {\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n return float(idiv(ia, ib, s));\n } else {\n return NAN;\n }\n",packedOpSnippet:"\n ivec4 ia = round(a);\n ivec4 ib = round(b);\n bvec4 cond = notEqual(ib, ivec4(0));\n ivec4 result = ivec4(0);\n vec4 s = sign(a) * sign(b);\n\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n if (cond[0]) {\n result[0] = idiv(ia[0], ib[0], s[0]);\n }\n if (cond[1]) {\n result[1] = idiv(ia[1], ib[1], s[1]);\n }\n if (cond[2]) {\n result[2] = idiv(ia[2], ib[2], s[2]);\n }\n if (cond[3]) {\n result[3] = idiv(ia[3], ib[3], s[3]);\n }\n return vec4(result);\n",dtype:"int32"}),TA={kernelName:bt,backendName:"webgl",kernelFunc:SA};class EA{constructor(e){this.variableNames=["A"];const t=X$(),[n,s]=e;this.outputShape=e,this.userCode=`\n void main() {\n ivec3 coords = getOutputCoords();\n int texR = coords[0];\n int texC = coords[1];\n int depth = coords[2];\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${s}.0, ${n}.0);\n\n vec4 values = ${t.texture2D}(A, uv);\n float value;\n if (depth == 0) {\n value = values.r;\n } else if (depth == 1) {\n value = values.g;\n } else if (depth == 2) {\n value = values.b;\n } else if (depth == 3) {\n value = values.a;\n }\n\n setOutput(floor(value * 255.0 + 0.5));\n }\n `}}class AA{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const t=X$(),[n,s]=e;this.outputShape=e,this.userCode=`\n void main() {\n ivec3 coords = getOutputCoords();\n int texR = coords[0];\n int texC = coords[1];\n int depth = coords[2];\n\n vec4 result = vec4(0.);\n\n for(int row=0; row<=1; row++) {\n for(int col=0; col<=1; col++) {\n texC = coords[1] + row;\n depth = coords[2] + col;\n\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${s}.0, ${n}.0);\n vec4 values = ${t.texture2D}(A, uv);\n float value;\n if (depth == 0) {\n value = values.r;\n } else if (depth == 1) {\n value = values.g;\n } else if (depth == 2) {\n value = values.b;\n } else if (depth == 3) {\n value = values.a;\n }\n\n result[row * 2 + col] = floor(value * 255.0 + 0.5);\n }\n }\n\n ${t.output} = result;\n }\n `}}const RA={kernelName:ss,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e;let{pixels:a}=t;const{numChannels:r}=s,i="undefined"!=typeof HTMLVideoElement&&a instanceof HTMLVideoElement,o="undefined"!=typeof HTMLImageElement&&a instanceof HTMLImageElement,[l,u]=i?[a.videoWidth,a.videoHeight]:[a.width,a.height],c=[u,l],h=[u,l,r];(o||i)&&(null==FA&&(FA=document.createElement("canvas").getContext("2d")),FA.canvas.width=l,FA.canvas.height=u,FA.drawImage(a,0,0,l,u),a=FA.canvas);const p=n.makeTensorInfo(c,"int32");n.texData.get(p.dataId).usage=w$.PIXELS,n.gpgpu.uploadPixelDataToTexture(n.getTexture(p.dataId),a);const d=ue().getBool("WEBGL_PACK")?new AA(h):new EA(h),f=n.runWebGLProgram(d,[p],"int32");return n.disposeData(p.dataId),f}};let FA;const DA={kernelName:is,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r,bias:i,preluActivationWeights:o}=t,{strides:l,pad:u,dataFormat:c,dilations:h,dimRoundingMode:p,activation:d,leakyreluAlpha:f}=s,m=Jr(c),g=Vr(a.shape,r.shape,l,h,u,p,!1,m);let y;const b=[];if(1!==g.filterHeight||1!==g.filterWidth||1!==g.dilationHeight||1!==g.dilationWidth||1!==g.strideHeight||1!==g.strideWidth||"SAME"!==g.padInfo.type&&"VALID"!==g.padInfo.type)if(ue().getBool("WEBGL_CONV_IM2COL")&&1===a.shape[0])y=wE({x:a,filter:r,convInfo:g,backend:n,bias:i,activation:d,preluActivationWeights:o,leakyreluAlpha:f});else{const e=null!=i,t=null!=o,s="leakyrelu"===d,l=d?RS(d,!1):null,u=new gE(g,e,l,t,s),c=[a,r];if(i&&c.push(i),o&&c.push(o),s){const e=n.makeTensorInfo([],"float32",gs(f,"float32"));c.push(e),b.push(e)}y=n.runWebGLProgram(u,c,"float32")}else y=xE({x:a,filter:r,convInfo:g,backend:n,bias:i,activation:d,preluActivationWeights:o,leakyreluAlpha:f});const x=LS({inputs:{x:y},backend:n,attrs:{shape:g.outShape}});return b.push(y),b.forEach((e=>n.disposeIntermediateTensorInfo(e))),x}},_A={kernelName:os,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,filter:r,bias:i,preluActivationWeights:o}=t,{strides:l,pad:u,dilations:c,dimRoundingMode:h,activation:p,leakyreluAlpha:d}=s,f=[];let m=c;null==m&&(m=[1,1]),E(Yr(l,m),(()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${l} and dilations '${m}'`));const g=Vr(a.shape,r.shape,l,m,u,h,!0),y=ue().getBool("WEBGL_PACK_DEPTHWISECONV")&&g.strideWidth<=2&&g.outChannels/g.inChannels==1,b=p?RS(p,y):null,x=[a,r],w=null!=i,k=null!=o,v="leakyrelu"===p;if(w&&x.push(i),k&&x.push(o),v){const e=n.makeTensorInfo([],"float32",gs(d,"float32"));x.push(e),f.push(e)}let N;N=y?new HE(g,w,b,k,v):new GE(g,w,b,k,v);const I=n.runWebGLProgram(N,x,"float32");return f.forEach((e=>n.disposeIntermediateTensorInfo(e))),I}};class OA{constructor(e,t,n){this.sliceDim=e,this.strides=t,this.variableNames=["x","indices"],this.outputShape=n;const s=vC(t.length),a=vC(n.length),r=this.sliceDim>1?"strides[j]":"strides";this.userCode=`\n ${s} strides = ${s}(${this.strides});\n void main() {\n ${a} coords = getOutputCoords();\n int flattenIndex = 0;\n for (int j = 0; j < ${this.sliceDim}; j++) {\n int index = round(getIndices(coords[0], j));\n flattenIndex += index * ${r};\n }\n setOutput(getX(flattenIndex, coords[1]));\n }\n `}}const MA={kernelName:kt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{params:s,indices:a}=t,r=a.shape,i=r[r.length-1],[o,l,u,c]=du(s,a),h=LS({inputs:{x:a},backend:n,attrs:{shape:[l,i]}}),p=LS({inputs:{x:s},backend:n,attrs:{shape:[D(s.shape)/u,u]}}),d=new OA(i,c,[l,u]),f=n.runWebGLProgram(d,[p,h],p.dtype),m=LS({inputs:{x:f},backend:n,attrs:{shape:o}});return n.disposeIntermediateTensorInfo(h),n.disposeIntermediateTensorInfo(p),n.disposeIntermediateTensorInfo(f),m}};class LA{constructor(e,t){this.variableNames=["A","indices"],this.outputShape=t,this.rank=t.length;const n=vC(this.rank),s=function(e,t){const n=["resRC.x","resRC.y","resRC.z","resRC.w"],s=[];for(let t=0;t<e.length;t++)2===t?s.push("int(getIndices(resRC.x, resRC.z))"):s.push(`${n[t]}`);return s.join()}(e);this.userCode=`\n void main() {\n ${n} resRC = getOutputCoords();\n setOutput(getA(${s}));\n }\n `}}const zA={kernelName:wt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,indices:r}=t,{axis:i,batchDims:o}=s,l=zu(a,r,B(i,a.shape)[0],o),u=D(r.shape),c=[],h=LS({inputs:{x:a},backend:n,attrs:{shape:[l.batchSize,l.outerSize,l.dimSize,l.sliceSize]}}),p=LS({inputs:{x:r},backend:n,attrs:{shape:[l.batchSize,u/l.batchSize]}});c.push(h),c.push(p);const d=[l.batchSize,l.outerSize,u/l.batchSize,l.sliceSize];if(n.shouldExecuteOnCPU([a,r])||"string"===a.dtype){const e=n.bufferSync(p),t=n.bufferSync(h),s=_C(t,e,d);return c.forEach((e=>n.disposeIntermediateTensorInfo(e))),n.makeTensorInfo(l.outputShape,s.dtype,s.values)}const f=new LA(h.shape,d),m=n.runWebGLProgram(f,[h,p],h.dtype);c.push(m);const g=LS({inputs:{x:m},backend:n,attrs:{shape:l.outputShape}});return c.forEach((e=>n.disposeIntermediateTensorInfo(e))),g}},PA=AS({opSnippet:"return float(a > b);",packedOpSnippet:"\n return vec4(greaterThan(a, b));\n",cpuKernelImpl:OC,dtype:"bool"}),BA={kernelName:vt,backendName:"webgl",kernelFunc:PA},WA=AS({opSnippet:"return float(a >= b);",packedOpSnippet:"\n return vec4(greaterThanEqual(a, b));\n",dtype:"bool"}),VA={kernelName:Nt,backendName:"webgl",kernelFunc:WA},UA={kernelName:$t,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{input:s}=t;return yA(s,!0,n)}},GA=ES({opSnippet:"return float(!isnan(x) && !isinf(x));",dtype:"bool"}),HA={kernelName:St,backendName:"webgl",kernelFunc:GA},jA=ES({opSnippet:"return float(isinf(x));",dtype:"bool"}),KA={kernelName:Tt,backendName:"webgl",kernelFunc:jA},qA=ES({opSnippet:"return float(isnan(x));",dtype:"bool"}),XA={kernelName:Et,backendName:"webgl",kernelFunc:qA},YA=AS({opSnippet:"return float(a < b);",packedOpSnippet:"\n return vec4(lessThan(a, b));\n",cpuKernelImpl:MC,dtype:"bool"}),JA={kernelName:Rt,backendName:"webgl",kernelFunc:YA},ZA=AS({opSnippet:"return float(a <= b);",packedOpSnippet:"\n return vec4(lessThanEqual(a, b));\n",dtype:"bool"}),QA={kernelName:Ft,backendName:"webgl",kernelFunc:ZA},eR={kernelName:Dt,backendName:"webgl",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:s,stop:a,num:r}=n,i=LC(s,a,r);return t.makeTensorInfo([i.length],"float32",i)}},tR=ES({opSnippet:"if (x < 0.0) return NAN;\n return log(x);",packedOpSnippet:"\n vec4 result = log(x);\n vec4 isNaN = vec4(lessThan(x, vec4(0.0)));\n result.r = isNaN.r == 1.0 ? NAN : result.r;\n result.g = isNaN.g == 1.0 ? NAN : result.g;\n result.b = isNaN.b == 1.0 ? NAN : result.b;\n result.a = isNaN.a == 1.0 ? NAN : result.a;\n\n return result;\n",cpuKernelImpl:zC}),nR={kernelName:_t,backendName:"webgl",kernelFunc:tR},sR=ES({opSnippet:"return log(1.0 + x);"}),aR={kernelName:Ot,backendName:"webgl",kernelFunc:sR},rR=AS({opSnippet:"return float(a >= 1.0 && b >= 1.0);",packedOpSnippet:"\n return vec4(\n vec4(greaterThanEqual(a, vec4(1.0))) *\n vec4(greaterThanEqual(b, vec4(1.0))));\n",dtype:"bool"}),iR={kernelName:Mt,backendName:"webgl",kernelFunc:rR},oR=ES({opSnippet:"return float(!(x >= 1.0));"}),lR={kernelName:Lt,backendName:"webgl",kernelFunc:oR},uR=AS({opSnippet:"return float(a >= 1.0 || b >= 1.0);",packedOpSnippet:"\n return min(\n vec4(greaterThanEqual(a, vec4(1.0))) +\n vec4(greaterThanEqual(b, vec4(1.0))),\n vec4(1.0));\n",dtype:"bool"}),cR={kernelName:zt,backendName:"webgl",kernelFunc:uR};class hR{constructor(e,t,n,s,a){this.variableNames=["x"],this.outputShape=[];const r=t,i=e[3]-1;let o;this.outputShape=e;const l=`float(${n}) + float(${s}) * sum`;o=.5===a?`inversesqrt(${l})`:1===a?`1.0/(${l})`:`exp(log(${l}) * float(-${a}));`,this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int r = coords[1];\n int c = coords[2];\n int d = coords[3];\n float x = getX(b, r, c, d);\n float sum = 0.0;\n for (int j = -${r}; j <= ${r}; j++) {\n int idx = d + j;\n if (idx >= 0 && idx <= ${i}) {\n float z = getX(b, r, c, idx);\n sum += z * z;\n }\n }\n float val = x * ${o};\n setOutput(val);\n }\n `}}class pR{constructor(e,t,n,s,a){this.variableNames=["x"],this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0;const r=t,i=e[3]-1;let o;this.outputShape=e;const l=`float(${n}) + float(${s}) * sum`;o=.5===a?`inversesqrt(${l})`:1===a?`1.0/(${l})`:`exp(log(${l}) * float(-${a}));`,this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords.x;\n int r = coords.y;\n int c = coords.z;\n int d = coords.w;\n\n bool hasNextCol = d < ${this.outputShape[3]};\n bool hasNextRow = c < ${this.outputShape[2]};\n\n vec4 sum = vec4(0.);\n vec4 xFragAtOutputCoords = getX(b, r, c, d);\n\n vec4 xAtOutputCoords = vec4(\n getChannel(xFragAtOutputCoords, vec2(c, d)),\n hasNextCol ?\n getChannel(xFragAtOutputCoords, vec2(c, d + 1)) : 0.0,\n hasNextRow ?\n getChannel(xFragAtOutputCoords , vec2(c + 1, d)) : 0.0,\n (hasNextRow && hasNextCol) ?\n getChannel(xFragAtOutputCoords, vec2(c + 1, d + 1)) : 0.0\n );\n\n int firstChannel = d - ${r};\n vec2 cache = vec2(0.);\n if(firstChannel >= 0){\n vec4 firstChannelFrag = getX(b, r, c, firstChannel);\n cache.x = getChannel(firstChannelFrag, vec2(c, firstChannel));\n if(hasNextRow){\n cache.y = getChannel(firstChannelFrag, vec2(c + 1, firstChannel));\n }\n }\n\n ivec2 depth = ivec2(d, d + 1);\n for (int j = - ${r}; j <= ${r}; j++) {\n ivec2 idx = depth + j;\n bvec2 aboveLowerBound = greaterThanEqual(idx, ivec2(0));\n bvec2 belowUpperBound = lessThanEqual(idx, ivec2(${i}));\n\n bool depthInRange = aboveLowerBound.x && belowUpperBound.x;\n bool depthPlusOneInRange = aboveLowerBound.y && belowUpperBound.y;\n\n if(depthInRange || depthPlusOneInRange){\n vec4 z = vec4(0.);\n vec4 xFragAtCurrentDepth;\n z.xz = cache.xy;\n if(depthPlusOneInRange && hasNextCol){\n xFragAtCurrentDepth = idx.y != d ?\n getX(b, r, c, idx.y) : xFragAtOutputCoords;\n z.y = getChannel(xFragAtCurrentDepth, vec2(c, idx.y));\n if(hasNextRow){\n z.w = getChannel(xFragAtCurrentDepth, vec2(c + 1, idx.y));\n }\n }\n cache.xy = z.yw;\n sum += z * z;\n }\n }\n vec4 result = xAtOutputCoords * ${o};\n setOutput(result);\n }\n `}}const dR={kernelName:Pt,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{depthRadius:r,bias:i,alpha:o,beta:l}=s,u=ue().getBool("WEBGL_PACK_NORMALIZATION")?new pR(a.shape,r,i,o,l):new hR(a.shape,r,i,o,l);return n.runWebGLProgram(u,[a],a.dtype)}};class fR{constructor(e,t,n,s,a){this.variableNames=["inputImage","outputImage","dy"],this.outputShape=[],this.outputShape=e,this.depth=e[3],this.depthRadius=t,this.bias=n,this.alpha=s,this.beta=a,this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int r = coords[1];\n int c = coords[2];\n\n float result = 0.0;\n for (int d = 0; d < ${this.depth}; ++d) {\n int depthBegin = int(max(0.0, float(d - ${t})));\n int depthEnd = int(min(float(${this.depth}),\n float(d + ${t} + 1)));\n\n const int MIN_DEPTH_BEGIN = 0;\n const int MAX_DEPTH_END = ${this.depth};\n\n float norm = 0.0;\n for (int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k) {\n if (k < depthBegin){\n continue;\n }\n else if (k >= depthBegin && k < depthEnd) {\n norm += getInputImage(b, r, c, k) * getInputImage(b, r, c, k);\n }\n else {\n break;\n }\n }\n\n norm = float(${s}) * norm + float(${n});\n\n for(int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k){\n if (k < depthBegin){\n continue;\n }\n else if (k >= depthBegin && k < depthEnd){\n float dyi = -2.0 * float(${s})\n * float(${a})\n * getInputImage(b ,r ,c, k) * getOutputImage(b, r, c, d)\n / norm;\n if (k == d) {\n dyi += pow(norm, -1.0 * ${a});\n }\n if (k == coords[3]) {\n dyi *= getDy(b, r, c, d);\n result += dyi;\n }\n }\n else {\n break;\n }\n }\n }\n setOutput(result);\n }\n `}}const mR={kernelName:Bt,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n,attrs:s}=e,{x:a,y:r,dy:i}=t,{depthRadius:o,bias:l,alpha:u,beta:c}=s,h=new fR(a.shape,o,l,u,c);return n.runWebGLProgram(h,[a,r,i],a.dtype)}};function gR(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{reductionIndices:r,keepDims:i}=s,o=a.shape.length,l=B(r,a.shape);let u=l;const c=Zi(u,o),h=null!=c,p=n.shouldExecuteOnCPU([a]);let d=a;if(h){if(p){const e=n.texData.get(d.dataId).values,t=new Array(o);for(let e=0;e<t.length;e++)t[e]=a.shape[c[e]];const s=QC(e,a.shape,a.dtype,c,t);d=n.makeTensorInfo(t,a.dtype),n.texData.get(d.dataId).values=s}else d=GS(a,c,n);u=eo(u.length,o)}Ji("max",u,o);const[f,m]=Xi(d.shape,u);let g,y=f;if(i&&(y=Yi(f,l)),p){const e=n.texData.get(d.dataId).values,t=PC(e,D(m),y,a.dtype);g=n.makeTensorInfo(y,a.dtype),n.texData.get(g.dataId).values=t}else g=function(e,t,n,s){const a=D(t),r=LS({inputs:{x:e},attrs:{shape:[D(e.shape)/a,a]},backend:s}),i=WS(r,e.dtype,"max",s),o=LS({inputs:{x:i},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(r),s.disposeIntermediateTensorInfo(i),o}(d,m,y,n);return h&&n.disposeIntermediateTensorInfo(d),g}const yR={kernelName:Wt,backendName:"webgl",kernelFunc:gR},bR=AS({opSnippet:"\n if (isnan(a)) return a;\n if (isnan(b)) return b;\n\n return max(a, b);\n",packedOpSnippet:"\n vec4 result = vec4(max(a, b));\n vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0));\n \n result.r = isNaN.r > 0. ? NAN : result.r;\n result.g = isNaN.g > 0. ? NAN : result.g;\n result.b = isNaN.b > 0. ? NAN : result.b;\n result.a = isNaN.a > 0. ? NAN : result.a;\n\n return result;\n",cpuKernelImpl:BC}),xR={kernelName:Vt,backendName:"webgl",kernelFunc:bR},wR={kernelName:Ut,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t;K$(a,"maxPool");const{filterSize:r,strides:i,pad:o,dimRoundingMode:l}=s;E(Yr(i,1),(()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${i} and dilations '1'`));const u=Br(a.shape,r,i,1,o,l);if(1===u.filterWidth&&1===u.filterHeight&&_(u.inShape,u.outShape))return xS({inputs:{x:a},backend:n});const c=new TT(u,"max",!1);return n.runWebGLProgram(c,[a],a.dtype)}},kR={kernelName:Ht,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{filterSize:r,strides:i,pad:o,dataFormat:l,dimRoundingMode:u}=s,c=Wr(a.shape,r,i,[1,1,1],o,u,l),h=new ET(c,"max",!1);return n.runWebGLProgram(h,[a],a.dtype)}};class vR{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideHeight,n=e.strideWidth,s=e.dilationHeight,a=e.effectiveFilterHeight,r=e.effectiveFilterWidth,i=a-1-e.padInfo.top,o=r-1-e.padInfo.left,l=a*r-1;this.userCode=`\n const ivec2 pads = ivec2(${i}, ${o});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n\n ivec2 dyRCCorner = coords.yz - pads;\n int dyRCorner = dyRCCorner.x;\n int dyCCorner = dyRCCorner.y;\n\n // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${a};\n wR += ${s}) {\n float dyR = float(dyRCorner + wR) / ${t}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${r}; wC++) {\n float dyC = float(dyCCorner + wC) / ${n}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(b, idyR, idyC, d);\n int maxPosValue = ${l} - int(getMaxPos(b, idyR, idyC, d));\n\n // Get the current value, check it against the value from the\n // position matrix.\n int curPosValue = wR * ${r} + wC;\n float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0);\n\n dotProd += dyValue * mask;\n }\n }\n setOutput(dotProd);\n }\n `}}class NR{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,a=e.dilationDepth,r=e.dilationHeight,i=e.dilationWidth,o=e.effectiveFilterDepth,l=e.effectiveFilterHeight,u=e.effectiveFilterWidth,c=o-1-e.padInfo.front,h=l-1-e.padInfo.top,p=u-1-e.padInfo.left,d=o*l*u-1;this.userCode=`\n const ivec3 pads = ivec3(${c}, ${h}, ${p});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads;\n int dyDCorner = dyCorner.x;\n int dyRCorner = dyCorner.y;\n int dyCCorner = dyCorner.z;\n\n // Convolve dy(?, ?, ?, ch) with pos mask(:, :, :, d) to get\n // dx(xD, xR, xC, ch).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n\n for (int wD = 0; wD < ${o};\n wD += ${a}) {\n float dyD = float(dyDCorner + wD) / ${t}.0;\n\n if (dyD < 0.0 || dyD >= ${e.outDepth}.0 || fract(dyD) > 0.0) {\n continue;\n }\n int idyD = int(dyD);\n\n for (int wR = 0; wR < ${l};\n wR += ${r}) {\n float dyR = float(dyRCorner + wR) / ${n}.0;\n\n if (dyR < 0.0 || dyR >= ${e.outHeight}.0 ||\n fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${u};\n wC += ${i}) {\n float dyC = float(dyCCorner + wC) / ${s}.0;\n\n if (dyC < 0.0 || dyC >= ${e.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(batch, idyD, idyR, idyC, ch);\n int maxPosValue = ${d} -\n int(getMaxPos(batch, idyD, idyR, idyC, ch));\n\n // Get the current value, check it against the value from the\n // position matrix.\n int curPosValue =\n wD * ${l} * ${u} +\n wR * ${u} + wC;\n float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0);\n\n dotProd += dyValue * mask;\n }\n }\n }\n setOutput(dotProd);\n }\n `}}const IR={kernelName:jt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r}=t,i=r,{filterSize:o,strides:l,pad:u,dimRoundingMode:c}=s,h=Wr(i.shape,o,l,[1,1,1],u,c),p=new ET(h,"max",!0),d=n.runWebGLProgram(p,[i],i.dtype),f=new NR(h),m=n.runWebGLProgram(f,[a,d],i.dtype);return n.disposeIntermediateTensorInfo(d),m}},$R={kernelName:Gt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{dy:a,input:r,output:i}=t,o=r;K$([r,i],"maxPoolGrad");const{filterSize:l,strides:u,pad:c,dimRoundingMode:h}=s,p=Br(o.shape,l,u,1,c,h),d=new TT(p,"max",!0),f=n.runWebGLProgram(d,[o],o.dtype),m=new vR(p),g=n.runWebGLProgram(m,[a,f],o.dtype);return n.disposeIntermediateTensorInfo(f),g}},CR={kernelName:Kt,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:a,strides:r,pad:i,includeBatchInIndex:o}=t,l=n;E(4===s.shape.length,(()=>`Error in maxPool: input must be rank 4 but got rank ${s.shape.length}.`));const u=[1,1];E(Yr(r,u),(()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${r} and dilations '${u}'`));const c=Br(s.shape,a,r,u,i),[h,p]=function(e,t,n,s){let a=new TT(n,"max",!1);const r=s.runWebGLProgram(a,[e],"float32");return a=new TT(n,"max",!0,!0,t),[r,s.runWebGLProgram(a,[e],"float32")]}(s,o,c,l);return[h,p]}},SR={kernelName:qt,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{keepDims:a,axis:r}=t,i=n,o=s.shape.length,l=B(r,s.shape);let u=l;const c=Zi(u,o),h=null!=c,p=i.shouldExecuteOnCPU([s]),d=[];let f=s;if(h){if(p){const e=i.texData.get(f.dataId).values,t=new Array(o);for(let e=0;e<t.length;e++)t[e]=s.shape[c[e]];const n=QC(e,s.shape,s.dtype,c,t);f=i.makeTensorInfo(t,s.dtype),i.texData.get(f.dataId).values=n}else f=GS(s,c,i);d.push(f),u=eo(u.length,o)}Ji("sum",u,o);const[m,g]=Xi(f.shape,u);let y=m;a&&(y=Yi(m,l));const b=function(e,t,n,s){const a=D(t),r=LS({inputs:{x:e},attrs:{shape:[D(e.shape)/a,a]},backend:s}),i=WS(r,"float32","mean",s),o=LS({inputs:{x:i},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(r),s.disposeIntermediateTensorInfo(i),o}(f,g,y,i);for(const e of d)i.disposeIntermediateTensorInfo(e);return b}},TR={kernelName:Xt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s,o=a.shape.length,l=B(r,a.shape);let u=l;const c=Zi(u,o);let h=a;null!=c&&(h=KS({inputs:{x:a},backend:n,attrs:{perm:c}}),u=eo(u.length,a.shape.length)),Ji("min",u,o);const[p,d]=Xi(h.shape,u),f=LS({inputs:{x:h},backend:n,attrs:{shape:[-1,D(d)]}}),m=WS(f,f.dtype,"min",n);let g;return g=LS(i?{inputs:{x:m},backend:n,attrs:{shape:Yi(p,l)}}:{inputs:{x:m},backend:n,attrs:{shape:p}}),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(m),null!=c&&n.disposeIntermediateTensorInfo(h),g}},ER=AS({opSnippet:"\n if (isnan(a)) return a;\n if (isnan(b)) return b;\n\n return min(a, b);\n",packedOpSnippet:"\n vec4 result = vec4(min(a, b));\n vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0));\n \n result.r = isNaN.r > 0. ? NAN : result.r;\n result.g = isNaN.g > 0. ? NAN : result.g;\n result.b = isNaN.b > 0. ? NAN : result.b;\n result.a = isNaN.a > 0. ? NAN : result.a;\n\n return result;\n",cpuKernelImpl:WC}),AR={kernelName:Yt,backendName:"webgl",kernelFunc:ER};class RR{constructor(e,t,n){this.variableNames=["x"],this.outputShape=t.map(((t,n)=>t[0]+e[n]+t[1]));const s=e.length,a=vC(s),r=t.map((e=>e[0])).join(","),i=t.map(((t,n)=>t[0]+e[n])).join(","),o=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s),l="reflect"===n?0:1;this.userCode=1!==s?`\n ${a} start = ${a}(${r});\n ${a} end = ${a}(${i});\n\n void main() {\n ${a} outC = getOutputCoords();\n for (int i = 0; i < ${s}; i++) {\n if (outC[i] < start[i]) {\n outC[i] = start[i] * 2 - outC[i] - ${l};\n } else if(outC[i] >= end[i]) {\n outC[i] = (end[i] - 1) * 2 - outC[i] + ${l};\n }\n }\n ${a} coords = outC - start;\n setOutput(getX(${o}));\n }\n `:`\n int start = ${r};\n int end = ${i};\n\n void main() {\n int outC = getOutputCoords();\n if (outC < start) {\n outC = start * 2 - outC - ${l};\n } else if(outC >= end) {\n outC = (end - 1) * 2 - outC + ${l};\n }\n setOutput(getX(outC - start));\n }\n `}}class FR{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map(((t,n)=>t[0]+e[n]+t[1]));const s=e.length,a=vC(s),r=t.map((e=>e[0])).join(","),i=t.map(((t,n)=>t[0]+e[n])).join(","),o=nS("rc",s),l=nS("source",s),u=`${o[s-1]} < ${this.outputShape[s-1]}`,c=1===s?"source":`vec2(${l.slice(-2).join()})`,h="reflect"===n?0:1;let p="";if(1===s){const e=`\n ${a} source = rc;\n if (source < start) {\n source = start * 2 - source - ${h};\n } else if (source >= end) {\n source = (end - 1) * 2 - source + ${h};\n }\n source -= start;\n `;p=`\n ${a} rc = outputLoc;\n ${e}\n result[0] = getChannel(getX(${l.join()}), ${c});\n ${o[s-1]} += 1;\n if(${u}) {\n ${e}\n result[1] = getChannel(getX(${l.join()}), ${c});\n }\n `}else{const e=`\n ${a} source = rc;\n ${a} lt = ${a}(lessThan(source, start));\n ${a} gte = ${a}(greaterThanEqual(source, end));\n ${a} orig = 1 - (lt + gte);\n source = orig * source +\n lt * (start * 2 - source - ${h}) +\n gte * ((end - 1) * 2 - source + ${h});\n source -= start;\n `;p=`\n ${a} rc = outputLoc;\n ${e}\n result[0] = getChannel(getX(${l.join()}), ${c});\n ${o[s-1]} += 1;\n if(${u}) {\n ${e}\n result[1] = getChannel(getX(${l.join()}), ${c});\n }\n rc = outputLoc;\n ${o[s-2]} += 1;\n if(${o[s-2]} < ${this.outputShape[s-2]}) {\n ${e}\n result[2] = getChannel(getX(${l.join()}), ${c});\n ${o[s-1]} += 1;\n if(${u}) {\n ${e}\n result[3] = getChannel(getX(${l.join()}), ${c});\n }\n }\n `}this.userCode=`\n const ${a} start = ${a}(${r});\n const ${a} end = ${a}(${i});\n\n void main() {\n ${a} outputLoc = getOutputCoords();\n vec4 result = vec4(0.);\n ${p}\n setOutput(result);\n }\n `}}const DR={kernelName:Jt,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s}=e,{paddings:a,mode:r}=n,i=ue().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new FR(s.shape,a,r):new RR(s.shape,a,r);return t.runWebGLProgram(i,[s],s.dtype)}},_R=AS({opSnippet:"if (b == 0.0) return NAN;\n return mod(a, b);",packedOpSnippet:"\n vec4 result = mod(a, b);\n vec4 isNaN = vec4(equal(b, vec4(0.0)));\n \n result.r = isNaN.r > 0. ? NAN : result.r;\n result.g = isNaN.g > 0. ? NAN : result.g;\n result.b = isNaN.b > 0. ? NAN : result.b;\n result.a = isNaN.a > 0. ? NAN : result.a;\n\n return result;\n"}),OR={kernelName:Zt,backendName:"webgl",kernelFunc:_R};class MR{constructor(e,t,n){this.variableNames=["probs"],this.outputShape=[e,n],this.userCode=`\n uniform float seed;\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n\n float r = random(seed);\n float cdf = 0.0;\n\n for (int i = 0; i < ${t-1}; i++) {\n cdf += getProbs(batch, i);\n\n if (r < cdf) {\n setOutput(float(i));\n return;\n }\n }\n\n // If no other event happened, last event happened.\n setOutput(float(${t-1}));\n }\n `}getCustomSetupFunc(e){return(t,n)=>{null==this.seedLoc&&(this.seedLoc=t.getUniformLocation(n,"seed")),t.gl.uniform1f(this.seedLoc,e)}}}const LR=AS({opSnippet:"\nif (a == b) {\n return 1.0;\n};\nreturn a / b;",packedOpSnippet:"\n // vec4 one = vec4(equal(a, b));\n // return one + (vec4(1.0) - one) * a / b;\n vec4 result = a / b;\n if(a.x == b.x) {\n result.x = 1.;\n }\n if(a.y == b.y) {\n result.y = 1.;\n }\n if(a.z == b.z) {\n result.z = 1.;\n }\n if(a.w == b.w) {\n result.w = 1.;\n }\n\n return result;\n",checkOutOfBounds:!0}),zR={kernelName:it,backendName:"webgl",kernelFunc:LR},PR="return a - b;",BR=AS({opSnippet:PR,packedOpSnippet:PR,supportsComplex:!0,cpuKernelImpl:YC}),WR={kernelName:Un,backendName:"webgl",kernelFunc:BR};function VR(e){const{inputs:t,backend:n,attrs:s}=e,{logits:a}=t,{dim:r}=s,i=B([r],a.shape),o=gR({inputs:{x:a},backend:n,attrs:{reductionIndices:i,keepDims:!1}}),l=Yi(o.shape,i),u=LS({inputs:{x:o},backend:n,attrs:{shape:l}}),c=BR({inputs:{a,b:u},backend:n}),h=uA({inputs:{x:c},backend:n}),p=HS({inputs:{x:h},backend:n,attrs:{axis:i,keepDims:!1}}),d=LS({inputs:{x:p},backend:n,attrs:{shape:l}}),f=LR({inputs:{a:h,b:d},backend:n});return n.disposeIntermediateTensorInfo(o),n.disposeIntermediateTensorInfo(u),n.disposeIntermediateTensorInfo(c),n.disposeIntermediateTensorInfo(h),n.disposeIntermediateTensorInfo(p),n.disposeIntermediateTensorInfo(d),f}const UR={kernelName:Bn,backendName:"webgl",kernelFunc:VR},GR={kernelName:Qt,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{logits:a}=t,{numSamples:r,seed:i,normalized:o}=s,l=o?a:VR({inputs:{logits:a},backend:n,attrs:{dim:a.shape.length-1}}),u=l.shape[0],c=l.shape[1],h=new MR(u,c,r),p=h.getCustomSetupFunc(i),d=n.runWebGLProgram(h,[l],"int32",p);return o||n.disposeIntermediateTensorInfo(l),d}},HR="return -x;",jR={kernelName:tn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:s}=t;if(n.shouldExecuteOnCPU([s])){const e=n.texData.get(s.dataId),[t,a]=UC(e.values,s.shape,s.dtype);return n.makeTensorInfo(a,s.dtype,t)}let a;return a=ue().getBool("WEBGL_PACK_UNARY_OPERATIONS")?new pS(s.shape,HR):new uS(s.shape,HR),n.runWebGLProgram(a,[s],s.dtype)}},KR=bl,qR={kernelName:sn,backendName:"webgl",kernelFunc:function(e){$u("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{inputs:t,backend:n,attrs:s}=e,{boxes:a,scores:r}=t,{maxOutputSize:i,iouThreshold:o,scoreThreshold:l}=s,u=n.readSync(a.dataId),c=n.readSync(r.dataId),{selectedIndices:h}=KR(u,c,i,o,l);return n.makeTensorInfo([h.length],"int32",new Int32Array(h))}},XR=xl,YR={kernelName:an,backendName:"webgl",kernelFunc:function(e){$u("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{inputs:t,backend:n,attrs:s}=e,{boxes:a,scores:r}=t,{maxOutputSize:i,iouThreshold:o,scoreThreshold:l,padToMaxOutputSize:u}=s,c=n.readSync(a.dataId),h=n.readSync(r.dataId),{selectedIndices:p,validOutputs:d}=XR(c,h,i,o,l,u);return[n.makeTensorInfo([p.length],"int32",new Int32Array(p)),n.makeTensorInfo([],"int32",new Int32Array([d]))]}},JR=wl,ZR={kernelName:rn,backendName:"webgl",kernelFunc:function(e){$u("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{inputs:t,backend:n,attrs:s}=e,{boxes:a,scores:r}=t,{maxOutputSize:i,iouThreshold:o,scoreThreshold:l,softNmsSigma:u}=s,c=n.readSync(a.dataId),h=n.readSync(r.dataId),p=i,d=o,f=l,m=u,{selectedIndices:g,selectedScores:y}=JR(c,h,p,d,f,m);return[n.makeTensorInfo([g.length],"int32",new Int32Array(g)),n.makeTensorInfo([y.length],"float32",new Float32Array(y))]}};class QR{constructor(e,t,n,s){this.variableNames=["indices"],this.outputShape=[e,t],this.userCode=`\n void main() {\n ivec2 coords = getOutputCoords();\n int index = round(getIndices(coords.x));\n setOutput(mix(float(${s}), float(${n}),\n float(index == coords.y)));\n }\n `}}const eF={kernelName:ln,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n,attrs:s}=e,{indices:a}=t,{depth:r,onValue:i,offValue:o}=s,l=D(a.shape),u=new QR(l,r,i,o),c=LS({inputs:{x:a},backend:n,attrs:{shape:[l]}}),h=n.runWebGLProgram(u,[c],a.dtype);n.disposeIntermediateTensorInfo(c);const p=LS({inputs:{x:h},backend:n,attrs:{shape:[...a.shape,r]}});return n.disposeIntermediateTensorInfo(h),p}};function tF(e){const{inputs:t,backend:n}=e,{x:s}=t;if("complex64"===s.dtype){const e=XT({inputs:{input:s},backend:n}),t=tF({inputs:{x:e},backend:n}),a=cE({inputs:{input:s},backend:n}),r=tF({inputs:{x:a},backend:n}),i=kS({inputs:{real:t,imag:r},backend:n});return n.disposeIntermediateTensorInfo(e),n.disposeIntermediateTensorInfo(t),n.disposeIntermediateTensorInfo(a),n.disposeIntermediateTensorInfo(r),i}return wA({attrs:{shape:s.shape,dtype:s.dtype,value:"string"===s.dtype?"":0},backend:n})}const nF={kernelName:ts,backendName:"webgl",kernelFunc:tF},sF={kernelName:on,backendName:"webgl",kernelFunc:function e(t){const{inputs:n,backend:s}=t,{x:a}=n;if("string"===a.dtype)throw new Error("onesLike is not supported under string dtype");if("complex64"===a.dtype){const t=XT({inputs:{input:a},backend:s}),n=e({inputs:{x:t},backend:s}),r=cE({inputs:{input:a},backend:s}),i=tF({inputs:{x:r},backend:s}),o=kS({inputs:{real:n,imag:i},backend:s});return s.disposeIntermediateTensorInfo(t),s.disposeIntermediateTensorInfo(n),s.disposeIntermediateTensorInfo(r),s.disposeIntermediateTensorInfo(i),o}return wA({attrs:{shape:a.shape,dtype:a.dtype,value:1},backend:s})}},aF={kernelName:un,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{axis:a}=s;if(1===t.length)return hA({inputs:{input:t[0]},backend:n,attrs:{dim:a}});const r=t[0].shape,i=t[0].dtype;t.forEach((e=>{A(r,e.shape,"All tensors passed to stack must have matching shapes"),E(i===e.dtype,(()=>"All tensors passed to stack must have matching dtypes"))}));const o=[],l=fE({inputs:t.map((e=>{const t=hA({inputs:{input:e},backend:n,attrs:{dim:a}});return o.push(t),t})),backend:n,attrs:{axis:a}});return o.forEach((e=>n.disposeIntermediateTensorInfo(e))),l}};class rF{constructor(e,t,n){this.variableNames=["x"],this.outputShape=t.map(((t,n)=>t[0]+e[n]+t[1]));const s=e.length,a=vC(s),r=t.map((e=>e[0])).join(","),i=t.map(((t,n)=>t[0]+e[n])).join(","),o=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s);this.userCode=1!==s?`\n ${a} start = ${a}(${r});\n ${a} end = ${a}(${i});\n uniform float value;\n\n void main() {\n ${a} outC = getOutputCoords();\n if (any(lessThan(outC, start)) || any(greaterThanEqual(outC, end))) {\n setOutput(value);\n } else {\n ${a} coords = outC - start;\n setOutput(getX(${o}));\n }\n }\n `:`\n int start = ${r};\n int end = ${i};\n uniform float value;\n\n void main() {\n int outC = getOutputCoords();\n if (outC < start || outC >= end) {\n setOutput(value);\n } else {\n setOutput(getX(outC - start));\n }\n }\n `}getCustomSetupFunc(e){return(t,n)=>{null==this.valueLoc&&(this.valueLoc=t.getUniformLocationNoThrow(n,"value")),t.gl.uniform1f(this.valueLoc,e)}}}class iF{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map(((t,n)=>t[0]+e[n]+t[1]));const s=e.length,a=vC(s),r=t.map((e=>e[0])).join(","),i=t.map(((t,n)=>t[0]+e[n])).join(","),o=nS("rc",s),l=nS("source",s),u=`${o[s-1]} < ${this.outputShape[s-1]}`,c=1===s?"source":`vec2(${l.slice(-2).join()})`,h=[`${a} rc = outputLoc;`,`${o[s-1]} += 1;\n if(${u}) {\n `,1===s?"":`}\n rc = outputLoc;\n ${o[s-2]} += 1;\n if(${o[s-2]} < ${this.outputShape[s-2]}) {`,1===s?"":` ${o[s-1]} += 1;\n if(${u}) {`],p=1===s?"rc < start || rc >= end":"any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))";let d="";for(let e=0,t=1===s?2:4;e<t;e++)d+=`\n ${h[e]}\n if (${p}) {\n result[${e}] = float(value);\n } else {\n ${a} source = rc - start;\n result[${e}] = getChannel(getX(${l.join()}), ${c});\n }\n `;d+=1===s?"} ":"}}",this.userCode=`\n const ${a} start = ${a}(${r});\n const ${a} end = ${a}(${i});\n uniform float value;\n\n void main() {\n ${a} outputLoc = getOutputCoords();\n vec4 result = vec4(0.);\n ${d}\n setOutput(result);\n }\n `}getCustomSetupFunc(e){return(t,n)=>{null==this.valueLoc&&(this.valueLoc=t.getUniformLocationNoThrow(n,"value")),t.gl.uniform1f(this.valueLoc,e)}}}const oF=e=>{const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{paddings:r,constantValue:i}=s,o=ue().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new iF(a.shape,r,i):new rF(a.shape,r,i),l=o.getCustomSetupFunc(i);return n.runWebGLProgram(o,[a],a.dtype,l)},lF={kernelName:cn,backendName:"webgl",kernelFunc:oF},uF=AS({opSnippet:"\n if(a < 0.0 && floor(b) < b){\n return NAN;\n }\n if (b == 0.0) {\n return 1.0;\n }\n return (round(mod(b, 2.0)) != 1) ?\n pow(abs(a), b) : sign(a) * pow(abs(a), b);\n",packedOpSnippet:"\n // isModRound1 has 1 for components with round(mod(b, 2.0)) == 1, 0 otherwise.\n vec4 isModRound1 = vec4(equal(round(mod(b, 2.0)), ivec4(1)));\n vec4 multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1);\n vec4 result = multiplier * pow(abs(a), b);\n\n // Ensure that a^0 = 1, including 0^0 = 1 as this correspond to TF and JS\n bvec4 isExpZero = equal(b, vec4(0.0));\n result.r = isExpZero.r ? 1.0 : result.r;\n result.g = isExpZero.g ? 1.0 : result.g;\n result.b = isExpZero.b ? 1.0 : result.b;\n result.a = isExpZero.a ? 1.0 : result.a;\n\n vec4 isNaN = vec4(lessThan(a, vec4(0.0))) * vec4(lessThan(floor(b), b));\n \n result.r = isNaN.r > 0. ? NAN : result.r;\n result.g = isNaN.g > 0. ? NAN : result.g;\n result.b = isNaN.b > 0. ? NAN : result.b;\n result.a = isNaN.a > 0. ? NAN : result.a;\n\n return result;\n"}),cF={kernelName:hn,backendName:"webgl",kernelFunc:uF},hF={kernelName:dn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{axis:r,keepDims:i}=s,o=a.shape.length,l=[],u=B(r,a.shape);let c=u;const h=Zi(c,o);let p,d=a;if(null!=h&&(d=KS({inputs:{x:a},backend:n,attrs:{perm:h}}),c=eo(c.length,o),l.push(d)),Ji("prod",c,o),n.shouldExecuteOnCPU([d])){const e=n.texData.get(d.dataId).values,{outVals:t,outShape:s,outDtype:a}=GC(d.shape,d.dtype,e,c);p=n.makeTensorInfo(s,a,t)}else{const[e,t]=Xi(d.shape,c),s=D(t),r=LS({inputs:{x:d},backend:n,attrs:{shape:[-1,s]}}),i=WS(r,Us(a.dtype),"prod",n);p=LS({inputs:{x:i},backend:n,attrs:{shape:e}}),l.push(r),l.push(i)}if(i){l.push(p);const e=Yi(p.shape,u);p=LS({inputs:{x:p},backend:n,attrs:{shape:e}})}return l.forEach((e=>n.disposeIntermediateTensorInfo(e))),p}},pF=e=>{const{backend:t,attrs:n}=e,{start:s,stop:a,step:r,dtype:i}=n,o=HC(s,a,r,i);return t.makeTensorInfo([o.length],i,o)},dF={kernelName:fn,backendName:"webgl",kernelFunc:pF},fF=ES({opSnippet:"return 1.0 / x;"}),mF={kernelName:gn,backendName:"webgl",kernelFunc:fF},gF=ES({opSnippet:"if (isnan(x)) return x;\n return (x < 0.0) ? 0.0 : x;\n",packedOpSnippet:"\n vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n"}),yF={kernelName:yn,backendName:"webgl",kernelFunc:gF},bF=ES({opSnippet:"if (isnan(x)) return x;\n return (x < 0.0) ? 0.0 : min(6.0, x);\n",packedOpSnippet:"\n vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n"}),xF={kernelName:Nn,backendName:"webgl",kernelFunc:bF};class wF{constructor(e,t,n,s,a){this.variableNames=["A"],this.outputShape=[];const[r,i,o,l]=e;this.outputShape=[r,t,n,l];const u=[s&&t>1?i-1:i,s&&n>1?o-1:o],c=[s&&t>1?t-1:t,s&&n>1?n-1:n];let h;h=a?"(vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC - vec2(0.5)":"vec2(yRC) * effectiveInputOverOutputRatioRC",this.userCode=`\n const vec2 effectiveInputOverOutputRatioRC = vec2(\n ${u[0]/c[0]},\n ${u[1]/c[1]});\n const vec2 inputShapeRC = vec2(${i}.0, ${o}.0);\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n ivec2 yRC = coords.yz;\n\n // Fractional source index.\n vec2 sourceFracIndexRC = ${h};\n\n // Compute the four integer indices.\n ivec2 sourceFloorRC = ivec2(max(sourceFracIndexRC, vec2(0.0)));\n ivec2 sourceCeilRC = ivec2(\n min(inputShapeRC - 1.0, ceil(sourceFracIndexRC)));\n\n float topLeft = getA(b, sourceFloorRC.x, sourceFloorRC.y, d);\n float bottomLeft = getA(b, sourceCeilRC.x, sourceFloorRC.y, d);\n float topRight = getA(b, sourceFloorRC.x, sourceCeilRC.y, d);\n float bottomRight = getA(b, sourceCeilRC.x, sourceCeilRC.y, d);\n\n vec2 fracRC = sourceFracIndexRC - vec2(sourceFloorRC);\n\n float top = topLeft + (topRight - topLeft) * fracRC.y;\n float bottom = bottomLeft + (bottomRight - bottomLeft) * fracRC.y;\n float newValue = top + (bottom - top) * fracRC.x;\n\n setOutput(newValue);\n }\n `}}class kF{constructor(e,t,n,s,a){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];const[r,i,o,l]=e;this.outputShape=[r,t,n,l];const u=[s&&t>1?i-1:i,s&&n>1?o-1:o],c=[s&&t>1?t-1:t,s&&n>1?n-1:n];let h;h=a?"(vec3(yRC) + vec3(0.5)) * effectiveInputOverOutputRatioRC - vec3(0.5)":"vec3(yRC) * effectiveInputOverOutputRatioRC",this.userCode=`\n const vec3 effectiveInputOverOutputRatioRC = vec3(\n ${u[0]/c[0]},\n ${u[1]/c[1]},\n ${u[1]/c[1]});\n const vec3 inputShapeRC = vec3(${i}.0, ${o}.0,\n ${o}.0);\n\n float getAValue(int b, int r, int c, int d) {\n return getChannel(getA(b, r, c, d), vec2(c, d));\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n // Calculate values for next column in yRC.z.\n ivec3 yRC = coords.yzz + ivec3(0, 0, 1);\n\n // Fractional source index.\n vec3 sourceFracIndexRC = ${h};\n\n // Compute the four integer indices.\n ivec3 sourceFloorRC = ivec3(max(sourceFracIndexRC, vec3(0.0)));\n ivec3 sourceCeilRC = ivec3(\n min(inputShapeRC - 1.0, ceil(sourceFracIndexRC)));\n\n // Should we calculate next column and row elements in 2x2 packed cell.\n bool hasNextCol = d < ${l-1};\n bool hasNextRow = coords.z < ${n-1};\n\n // In parallel, construct four corners for all four components in\n // packed 2x2 cell.\n vec4 topLeft = vec4(\n getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d),\n hasNextCol ? getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d + 1) : 0.0);\n\n vec4 bottomLeft = vec4(\n getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d),\n hasNextCol ? getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d + 1) : 0.0);\n\n vec4 topRight = vec4(\n getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d),\n hasNextCol ? getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d + 1) : 0.0);\n\n vec4 bottomRight = vec4(\n getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d),\n hasNextCol ? getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d + 1) : 0.0);\n\n vec3 fracRC = sourceFracIndexRC - vec3(sourceFloorRC);\n\n vec4 top = mix(topLeft, topRight, fracRC.yyzz);\n vec4 bottom = mix(bottomLeft, bottomRight, fracRC.yyzz);\n vec4 newValue = mix(top, bottom, fracRC.x);\n\n setOutput(newValue);\n }\n `}}const vF={kernelName:kn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a}=t,{alignCorners:r,halfPixelCenters:i,size:o}=s,[l,u]=o,c=ue().getBool("WEBGL_PACK_IMAGE_OPERATIONS")?new kF(a.shape,l,u,r,i):new wF(a.shape,l,u,r,i);return n.runWebGLProgram(c,[a],"float32")}};class NF{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t;const[,s,a]=t,[,r,i]=e,o=[n&&r>1?s-1:s,n&&i>1?a-1:a],l=[n&&r>1?r-1:r,n&&i>1?i-1:i],u=o[0]/l[0],c=o[1]/l[1],h=1/u,p=1/c,d=2*Math.ceil(h)+2,f=2*Math.ceil(p)+2;this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n int r = coords[1];\n int c = coords[2];\n\n float accumulator = 0.0;\n\n const float heightScale = float(${u});\n const float widthScale = float(${c});\n\n const float invHeightScale = float(${h});\n const float invWidthScale = float(${p});\n\n const int winHeight = int(${d});\n const int winWidth = int(${f});\n\n // Compute bounds for where in dy we will look\n float startRLerp = floor(float(r) * invHeightScale);\n int startDyR = int(startRLerp - float(winHeight / 2));\n\n float startCLerp = floor(float(c) * invWidthScale);\n int startDyC = int(startCLerp - float(winWidth / 2));\n\n // Loop over dy\n for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) {\n int dyR = dyROffset + startDyR;\n\n // Guard against the window exceeding the bounds of dy\n if (dyR < 0 || dyR >= ${r}) {\n continue;\n }\n\n for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) {\n int dyC = dyCOffset + startDyC;\n\n // Guard against the window exceeding the bounds of dy\n if (dyC < 0 || dyC >= ${i}) {\n continue;\n }\n\n float dxR = float(dyR) * heightScale;\n int topDxRIndex = int(floor(dxR));\n int bottomDxRIndex = int(min(ceil(dxR), ${s-1}.0));\n float dxRLerp = dxR - float(topDxRIndex);\n float inverseDxRLerp = 1.0 - dxRLerp;\n\n float dxC = float(dyC) * widthScale;\n int leftDxCIndex = int(floor(dxC));\n int rightDxCIndex = int(min(ceil(dxC), ${a-1}.0));\n float dxCLerp = dxC - float(leftDxCIndex);\n float inverseDxCLerp = 1.0 - dxCLerp;\n\n if (r == topDxRIndex && c == leftDxCIndex) {\n // topLeft\n accumulator +=\n getDy(b, dyR, dyC, d) * inverseDxRLerp * inverseDxCLerp;\n }\n\n if (r == topDxRIndex && c == rightDxCIndex) {\n // topRight\n accumulator += getDy(b, dyR, dyC, d) * inverseDxRLerp * dxCLerp;\n }\n\n if (r == bottomDxRIndex && c == leftDxCIndex) {\n // bottomLeft\n accumulator += getDy(b, dyR, dyC, d) * dxRLerp * inverseDxCLerp;\n }\n\n if (r == bottomDxRIndex && c == rightDxCIndex) {\n // bottomRight\n accumulator += getDy(b, dyR, dyC, d) * dxRLerp * dxCLerp;\n }\n }\n }\n // End loop over dy\n\n setOutput(accumulator);\n }\n `}}const IF={kernelName:vn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a,dy:r}=t,{alignCorners:i}=s,o=new NF(r.shape,a.shape,i);return n.runWebGLProgram(o,[r],r.dtype)}};class $F{constructor(e,t,n,s,a){this.variableNames=["A"],this.outputShape=[];const[r,i,o,l]=e;this.outputShape=[r,t,n,l];const u=[s&&t>1?i-1:i,s&&n>1?o-1:o],c=[s&&t>1?t-1:t,s&&n>1?n-1:n],h=s?"0.5":"0.0";let p;p=a?"max((vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC, vec2(0.0))":"vec2(yRC) * effectiveInputOverOutputRatioRC",this.userCode=`\n const vec2 effectiveInputOverOutputRatioRC = vec2(\n ${u[0]/c[0]},\n ${u[1]/c[1]});\n const vec2 inputShapeRC = vec2(${i}.0, ${o}.0);\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n ivec2 yRC = coords.yz;\n\n // Fractional source index.\n vec2 sourceFracIndexRC = ${p};\n\n // Compute the coordinators of nearest neighbor point.\n ivec2 sourceNearestRC = ivec2(\n min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${h})));\n float newValue = getA(b, sourceNearestRC.x, sourceNearestRC.y, d);\n\n setOutput(newValue);\n }\n `}}const CF={kernelName:xn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a}=t,{alignCorners:r,halfPixelCenters:i,size:o}=s,[l,u]=o,c=new $F(a.shape,l,u,r,i);return n.runWebGLProgram(c,[a],a.dtype)}};class SF{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t;const[,s,a]=t,[,r,i]=e,o=[n&&r>1?s-1:s,n&&i>1?a-1:a],l=[n&&r>1?r-1:r,n&&i>1?i-1:i],u=o[0]/l[0],c=o[1]/l[1],h=1/u,p=1/c,d=2*Math.ceil(h)+2,f=2*Math.ceil(p)+2;this.userCode=`\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n int r = coords[1];\n int c = coords[2];\n\n float accumulator = 0.0;\n\n const float heightScale = float(${u});\n const float widthScale = float(${c});\n\n const float invHeightScale = float(${h});\n const float invWidthScale = float(${p});\n\n const int winHeight = int(${d});\n const int winWidth = int(${f});\n\n // Compute bounds for where in dy we will look\n float startRLerp = floor(float(r) * invHeightScale);\n int startDyR = int(floor(startRLerp - float(winHeight / 2)));\n\n float startCLerp = floor(float(c) * invWidthScale);\n int startDyC = int(floor(startCLerp - float(winWidth / 2)));\n\n // Loop over dy\n for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) {\n int dyR = dyROffset + startDyR;\n\n // Guard against the window exceeding the bounds of dy\n if (dyR < 0 || dyR >= ${r}) {\n continue;\n }\n\n for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) {\n int dyC = dyCOffset + startDyC;\n\n // Guard against the window exceeding the bounds of dy\n if (dyC < 0 || dyC >= ${i}) {\n continue;\n }\n\n float sourceFracRow =\n float(${o[0]}) *\n (float(dyR) / float(${l[0]}));\n\n float sourceFracCol =\n float(${o[1]}) *\n (float(dyC) / float(${l[1]}));\n\n int sourceNearestRow = int(min(\n float(int(${s}) - 1),\n ${n} ? float(round(sourceFracRow)) :\n float(floor(sourceFracRow))));\n\n int sourceNearestCol = int(min(\n float(int(${a}) - 1),\n ${n} ? float(round(sourceFracCol)) :\n float(floor(sourceFracCol))));\n\n if (r == sourceNearestRow && c == sourceNearestCol) {\n accumulator += getDy(b, dyR, dyC, d);\n }\n }\n }\n // End loop over dy\n\n setOutput(accumulator);\n }\n `}}const TF={kernelName:wn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{images:a,dy:r}=t,{alignCorners:i}=s,o=new SF(r.shape,a.shape,i);return n.runWebGLProgram(o,[r],r.dtype)}};class EF{constructor(e,t){this.variableNames=["x"];const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);if(this.outputShape=e,1===n)return void(this.userCode=`\n void main() {\n int coord = getOutputCoords();\n setOutput(getX(${e[0]} - coord - 1));\n }\n `);const s=e.map(((n,s)=>(n=>-1!==t.indexOf(n)&&1!==e[n]?`${e[n]} - coords[${n}] - 1`:`coords[${n}]`)(s))).join(","),a=vC(n);this.userCode=`\n void main() {\n ${a} coords = getOutputCoords();\n setOutput(getX(${s}));\n }\n `}}class AF{constructor(e,t){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0;const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);this.outputShape=e;const s=nS("rc",n),a=`${s[n-1]} + 1 < ${this.outputShape[n-1]}`,r=`${s[n-2]} + 1 < ${this.outputShape[n-2]}`,i=vC(n);function o(n){const s=e.map(((s,a)=>function(n,s){return-1!==t.indexOf(n)&&1!==e[n]?`${e[n]} - ${s[n]} - 1`:`${s[n]}`}(a,n)));return`getChannel(getX(${s.join(",")}), vec2(${s.slice(-2).join(",")}))`}this.userCode=1===n?`\n void main(){\n int rc = getOutputCoords();\n vec4 result = vec4(0.);\n result.r = getChannel(getX(${e[0]} - rc - 1),\n ${e[0]} - rc - 1);\n if(${a}){\n result.g = getChannel(getX(${e[0]} - (rc + 1) - 1),\n ${e[0]} - (rc + 1) - 1);\n }\n setOutput(result);\n }\n `:`\n void main() {\n ${i} rc = getOutputCoords();\n vec4 result = vec4(0.);\n result.r = ${function(e){return o(e)}(s.slice())};\n if(${a}){\n result.g = ${function(e){return e[n-1]="("+e[n-1]+" + 1)",o(e)}(s.slice())};\n }\n if(${r}) {\n result.b = ${function(e){return e[n-2]="("+e[n-2]+" + 1)",o(e)}(s.slice())};\n if(${a}) {\n result.a = ${function(e){return e[n-1]="("+e[n-1]+" + 1)",e[n-2]="("+e[n-2]+" + 1)",o(e)}(s.slice())};\n }\n }\n setOutput(result);\n }\n `}}const RF={kernelName:In,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{dims:r}=s,i=a.shape.length,o=B(r,a.shape);if(0===i)return xS({inputs:{x:a},backend:n});const l=ue().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new AF(a.shape,o):new EF(a.shape,o);return n.runWebGLProgram(l,[a],a.dtype)}};class FF{constructor(e,t){this.variableNames=["Image"],this.outputShape=[];const n=e[1],s=e[2];this.outputShape=e;let a="";a="number"==typeof t?`float outputValue = ${t.toFixed(2)};`:`\n vec3 fill = vec3(${t.join(",")});\n float outputValue = fill[coords[3]];`,this.userCode=`\n uniform vec4 params;\n void main() {\n ivec4 coords = getOutputCoords();\n int x = coords[2];\n int y = coords[1];\n float coordXFloat = (float(x) - params[0]) * params[3] -\n (float(y) - params[1]) * params[2];\n float coordYFloat = (float(x) - params[0]) * params[2] +\n (float(y) - params[1]) * params[3];\n int coordX = int(round(coordXFloat + params[0]));\n int coordY = int(round(coordYFloat + params[1]));\n ${a}\n if(coordX >= 0 && coordX < ${s} && coordY >= 0 && coordY < ${n}) {\n outputValue = getImage(coords[0], coordY, coordX, coords[3]);\n }\n setOutput(outputValue);\n }\n `}getCustomSetupFunc(e,t,n,s){return(a,r)=>{null==this.paramsLoc&&(this.paramsLoc=a.getUniformLocationNoThrow(r,"params")),a.gl.uniform4f(this.paramsLoc,e,t,n,s)}}}const DF={kernelName:as,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:a,fillValue:r,center:i}=t,o=n,l=new FF(s.shape,r),[u,c]=ou(i,s.shape[1],s.shape[2]),h=l.getCustomSetupFunc(u,c,Math.sin(a),Math.cos(a));return o.runWebGLProgram(l,[s],s.dtype,h)}},_F=ES({opSnippet:"\n // OpenGL ES does not support round function.\n // The algorithm is based on banker's rounding.\n float base = floor(x);\n if ((x - base) < 0.5) {\n return floor(x);\n } else if ((x - base) > 0.5) {\n return ceil(x);\n } else {\n if (mod(base, 2.0) == 0.0) {\n return base;\n } else {\n return base + 1.0;\n }\n }\n"}),OF={kernelName:$n,backendName:"webgl",kernelFunc:_F},MF=ES({opSnippet:"return inversesqrt(x);",cpuKernelImpl:jC}),LF={kernelName:Cn,backendName:"webgl",kernelFunc:MF};class zF{constructor(e,t,n,s,a,r,i=!0){this.variableNames=["updates","indices","defaultValue"],this.outputShape=r;const o=vC(a.length),l=vC(r.length);let u="";1===n?u="i":2===n&&(u="i, j");const c=`getIndices(${u})`;let h="";1===s?h="i":2===s&&(h="i, coords[1]");const p=`getUpdates(${h})`,d=t>1?"strides[j]":"strides";this.userCode=`\n ${o} strides = ${o}(${a});\n\n void main() {\n ${l} coords = getOutputCoords();\n float sum = 0.0;\n bool found = false;\n for (int i = 0; i < ${e}; i++) {\n int flattenedIndex = 0;\n for (int j = 0; j < ${t}; j++) {\n int index = round(${c});\n flattenedIndex += index * ${d};\n }\n if (flattenedIndex == coords[0]) {\n sum += ${p};\n found = true;\n }\n }\n setOutput(mix(getDefaultValue(), sum, float(found)));\n }\n `}}const PF={kernelName:Sn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{indices:a,updates:r}=t,{shape:i}=s,{sliceRank:o,numUpdates:l,sliceSize:u,strides:c,outputSize:h}=gu(0,a,i),p=[h/u,u];if(0===h)return n.makeTensorInfo(i,a.dtype);const d=LS({inputs:{x:a},backend:n,attrs:{shape:[l,o]}}),f=LS({inputs:{x:r},backend:n,attrs:{shape:[l,u]}}),m=n.makeTensorInfo([],"float32",new Float32Array([0])),g=new zF(l,o,d.shape.length,f.shape.length,c,p),y=n.runWebGLProgram(g,[f,d,m],f.dtype),b=LS({inputs:{x:y},backend:n,attrs:{shape:i}});return n.disposeIntermediateTensorInfo(d),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(y),n.disposeIntermediateTensorInfo(m),b}};class BF{constructor(e,t,n){let s,a;if(this.variableNames=["c","a","b"],this.outputShape=t,n>4)throw Error(`Where for rank ${n} is not yet supported`);if(1===n)a="resRC",s="resRC";else{const n=["resRC.x","resRC.y","resRC.z","resRC.w"],r=[],i=[];for(let s=0;s<t.length;s++)i.push(`${n[s]}`),s<e&&r.push(`${n[s]}`);s=r.join(),a=i.join()}const r=vC(n);this.userCode=`\n void main() {\n ${r} resRC = getOutputCoords();\n float cVal = getC(${s});\n if (cVal >= 1.0) {\n setOutput(getA(${a}));\n } else {\n setOutput(getB(${a}));\n }\n }\n `}}const WF={kernelName:Tn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n}=e,{condition:s,t:a,e:r}=t,i=new BF(s.shape.length,a.shape,a.shape.length);return n.runWebGLProgram(i,[s,a,r],Vs(a.dtype,r.dtype))}},VF=ES({opSnippet:`\n // Stable and Attracting Fixed Point (0, 1) for Normalized Weights.\n // see: https://arxiv.org/abs/1706.02515\n float scaleAlpha = ${yu};\n float scale = ${bu};\n return (x >= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0);\n`}),UF={kernelName:En,backendName:"webgl",kernelFunc:VF},GF=ES({opSnippet:"return 1.0 / (1.0 + exp(-1.0 * x));"}),HF={kernelName:_n,backendName:"webgl",kernelFunc:GF},jF=ES({opSnippet:"\n if (isnan(x)) { return 0.0; }\n return sign(x);\n"}),KF={kernelName:Dn,backendName:"webgl",kernelFunc:jF},qF=ES({opSnippet:"if (isnan(x)) return x;\n return sin(x);\n"}),XF={kernelName:Rn,backendName:"webgl",kernelFunc:qF},YF=ES({opSnippet:"\n float e2x = exp(x);\n return (e2x - 1.0 / e2x) / 2.0;\n"}),JF={kernelName:Fn,backendName:"webgl",kernelFunc:YF},ZF=ES({opSnippet:"\n float epsilon = 1.1920928955078125e-7;\n float threshold = log(epsilon) + 2.0;\n\n bool too_large = x > -threshold;\n bool too_small = x < threshold;\n\n float result;\n float exp_x = exp(x);\n\n if (too_large){\n result = x;\n }\n else if (too_small){\n result = exp_x;\n }\n else{\n result = log(exp_x + 1.0);\n }\n return result;\n"}),QF={kernelName:On,backendName:"webgl",kernelFunc:ZF},eD={kernelName:zn,backendName:"webgl",kernelFunc:e=>{const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{blockShape:r,paddings:i}=s;E(a.shape.length<=4,(()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet"));const o=r.reduce(((e,t)=>e*t)),l=[[0,0]];l.push(...i);for(let e=1+r.length;e<a.shape.length;++e)l.push([0,0]);const u=[],c=oF({inputs:{x:a},backend:n,attrs:{paddings:l,constantValue:0}}),h=lu(c.shape,r,o,!1),p=uu(h.length,r.length,!1),d=cu(c.shape,r,o,!1),f=LS({inputs:{x:c},backend:n,attrs:{shape:h}}),m=KS({inputs:{x:f},backend:n,attrs:{perm:p}}),g=LS({inputs:{x:m},backend:n,attrs:{shape:d}});return u.push(c),u.push(f),u.push(m),u.forEach((e=>n.disposeIntermediateTensorInfo(e))),g}},tD={kernelName:Gn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{sparseIndices:a,sparseValues:r,defaultValue:i}=t,{outputShape:o}=s,{sliceRank:l,numUpdates:u,strides:c,outputSize:h}=gu(0,a,o),p=new zF(u,l,a.shape.length,r.shape.length,c,[h,1],!1),d=n.runWebGLProgram(p,[r,a,i],r.dtype),f=LS({inputs:{x:d},backend:n,attrs:{shape:o}});return n.disposeIntermediateTensorInfo(d),f}},nD={kernelName:Pn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{numOrSizeSplits:r,axis:i}=s,o=B(i,a.shape)[0],l=Ou(a,r,o),u=a.shape.length,c=new Array(u).fill(0),h=a.shape.slice();return l.map((e=>{const t=[...h];t[o]=e;const s=UT({inputs:{x:a},backend:n,attrs:{begin:c,size:t}});return c[o]+=e,s}))}},sD=ES({opSnippet:"return sqrt(x);"}),aD={kernelName:Mn,backendName:"webgl",kernelFunc:sD},rD={kernelName:Vn,backendName:"webgl",kernelFunc:ES({opSnippet:"return x * x;"})},iD="return (a - b) * (a - b);",oD=AS({opSnippet:iD,packedOpSnippet:iD}),lD={kernelName:Wn,backendName:"webgl",kernelFunc:oD},uD={kernelName:ns,backendName:"webgl",kernelFunc:function({inputs:e,attrs:t,backend:n}){const{x:s}=e,a=`if (isnan(x)) return x;\n return x > 0.0 ? 1.0 : float(${t.alpha});\n `,r=new uS(s.shape,a);return n.runWebGLProgram(r,[s],s.dtype)}};class cD{constructor(e,t,n){this.variableNames=["x"],this.outputShape=n;const s=n.length,a=vC(n.length),r=vC(n.length);let i="";if(1===s)i="coords * strides + begin";else{let e=0;i=n.map(((t,s)=>(e++,1===n.length?`coords * strides[${s}] + begin[${s}]`:`coords[${e-1}] * strides[${s}] + begin[${s}]`))).join(",")}this.userCode=`\n ${a} begin = ${a}(${e});\n ${a} strides = ${a}(${t});\n\n void main() {\n ${r} coords = getOutputCoords();\n setOutput(getX(${i}));\n }\n `}}const hD={kernelName:Hn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{begin:r,end:i,strides:o,beginMask:l,endMask:u,ellipsisMask:c,newAxisMask:h,shrinkAxisMask:p}=s,{nonStrided:d,$begin:f,$strides:m,size:g,newShape:y,outShape:b}=wr(a.shape,r,i,o,l,u,c,h,p),x=LS({inputs:{x:a},backend:n,attrs:{shape:y}});let w;if(d){const e=UT({inputs:{x},backend:n,attrs:{begin:f,size:g}});w=LS({inputs:{x:e},backend:n,attrs:{shape:b}}),n.disposeIntermediateTensorInfo(e)}else if(b.some((e=>0===e)))w=n.makeTensorInfo(b,a.dtype,[]);else if(n.shouldExecuteOnCPU([x])){const e=n.texData.get(x.dataId).values,t=Ha(x.shape,x.dtype,e),s=XC(b,t,m,f);w=n.makeTensorInfo(b,x.dtype,s.values)}else{const e=new cD(f,m,b);w=n.runWebGLProgram(e,[x],x.dtype)}const k=LS({inputs:{x:w},backend:n,attrs:{shape:b}});return n.disposeIntermediateTensorInfo(x),n.disposeIntermediateTensorInfo(w),k}},pD=ES({opSnippet:"return tan(x);"}),dD={kernelName:jn,backendName:"webgl",kernelFunc:pD},fD=ES({opSnippet:"\n float e2x = exp(-2.0 * abs(x));\n return sign(x) * (1.0 - e2x) / (1.0 + e2x);\n"}),mD={kernelName:Kn,backendName:"webgl",kernelFunc:fD};class gD{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let s=0;s<n.length;s++)n[s]=e[s]*t[s];this.outputShape=n,this.rank=n.length;const s=vC(this.rank),a=function(e){const t=e.length;if(t>5)throw Error(`Tile for rank ${t} is not yet supported`);if(1===t)return`imod(resRC, ${e[0]})`;const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u"],s=[];for(let t=0;t<e.length;t++)s.push(`imod(${n[t]}, ${e[t]})`);return s.join()}(e);this.userCode=`\n void main() {\n ${s} resRC = getOutputCoords();\n setOutput(getA(${a}));\n }\n `}}function yD(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{reps:r}=s;if("string"===a.dtype){const e=n.readSync(a.dataId).map((e=>ws(e))),t=Ha(a.shape,a.dtype,e),s=JC(t,r);return n.makeTensorInfo(s.shape,s.dtype,s.values)}const i=new gD(a.shape,r);return n.runWebGLProgram(i,[a],a.dtype)}const bD={kernelName:qn,backendName:"webgl",kernelFunc:yD},xD={kernelName:Xn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a}=t,{k:r,sorted:i}=s,o=n.readSync(a.dataId),[l,u]=ZC(o,a.shape,a.dtype,r,i);return[n.makeTensorInfo(l.shape,l.dtype,l.values),n.makeTensorInfo(u.shape,u.dtype,u.values)]}};class wD{constructor(e,t,n,s,a,r){this.variableNames=["Image","Transforms"],this.outputShape=r;const i="nearest"===n?1:2;let o;switch(s){case"constant":o=1;break;case"reflect":o=2;break;case"wrap":o=3;break;case"nearest":o=4;break;default:o=1}this.userCode=`\n float mapCoord(float outCoord, float len) {\n float inCoord = outCoord;\n if(${o} == 2) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz2 = 2.0 * len;\n if (inCoord < sz2) {\n inCoord = sz2 * float(int(float(-inCoord / sz2))) +\n inCoord;\n }\n inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1.0;\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz2 = 2.0 * len;\n inCoord -= sz2 * float(int(float(inCoord / sz2)));\n if (inCoord >= len) {\n inCoord = sz2 - inCoord - 1.0;\n }\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (${o} == 3) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz = len - 1.0;\n inCoord += len * (float(int(float(-inCoord / sz))) + 1.0);\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz = len - 1.0;\n inCoord -= len * float(int(float(inCoord / sz)));\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (${o} == 4) {\n return clamp(outCoord, 0.0, len - 1.0);\n } else {\n return outCoord;\n }\n }\n\n float readWithFillValue(int batch, int coordY, int coordX,\n int channel) {\n float outputValue;\n if (0 <= coordY && coordY < ${e} && 0 <= coordX && coordX < ${t}) {\n outputValue = getImage(batch, coordY, coordX, channel);\n } else {\n outputValue = float(${a});\n }\n return outputValue;\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n float outputValue;\n int batch = coords[0];\n int x = coords[2];\n int y = coords[1];\n int channel = coords[3];\n float xf = float(x);\n float yf = float(y);\n float a1 = getTransforms(batch, 0);\n float a2 = getTransforms(batch, 1);\n float a3 = getTransforms(batch, 2);\n float b1 = getTransforms(batch, 3);\n float b2 = getTransforms(batch, 4);\n float b3 = getTransforms(batch, 5);\n float c1 = getTransforms(batch, 6);\n float c2 = getTransforms(batch, 7);\n float projection = c1 * xf + c2 * yf + 1.0;\n if (projection == 0.0) {\n outputValue = float(${a});\n } else {\n float inX = (a1 * xf + a2 * yf + a3) / projection;\n float inY = (b1 * xf + b2 * yf + b3) / projection;\n float mapX = mapCoord(inX, float(${t}));\n float mapY = mapCoord(inY, float(${e}));\n\n if (${i} == 1) {\n int coordY = int(round(mapY));\n int coordX = int(round(mapX));\n outputValue = readWithFillValue(batch, coordY, coordX,\n channel);\n } else {\n float yFloor = floor(mapY);\n float xFloor = floor(mapX);\n float yCeil = yFloor + 1.0;\n float xCeil = xFloor + 1.0;\n float valueYFloor = (xCeil - mapX) *\n readWithFillValue(batch, int(yFloor), int(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, int(yFloor), int(xCeil), channel);\n float valueYCeil = (xCeil - mapX) *\n readWithFillValue(batch, int(yCeil), int(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, int(yCeil), int(xCeil), channel);\n outputValue = (yCeil - mapY) * valueYFloor +\n (mapY - yFloor) * valueYCeil;\n }\n }\n setOutput(outputValue);\n }\n `}}const kD={kernelName:Yn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{image:a,transforms:r}=t,{interpolation:i,fillMode:o,fillValue:l,outputShape:u}=s,[c,h,p,d]=a.shape,[f,m]=null!=u?u:[h,p],g=new wD(h,p,i,o,l,[c,f,m,d]);return n.runWebGLProgram(g,[a,r],"float32")}},vD={kernelName:Zn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,attrs:n,backend:s}=e,{axis:a}=n,{x:r}=t;K$(r,"unique"),console.warn("WARNING: ","UI might be locked temporarily as data is being downloaded");const i=s.readSync(r.dataId),{outputValues:o,outputShape:l,indices:u}=eS(i,a,r.shape,r.dtype);return[s.makeTensorInfo(l,r.dtype,o),s.makeTensorInfo([u.length],"int32",u)]}},ND={kernelName:Qn,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{value:a}=t;let{axis:r}=s;r<0&&(r+=a.shape.length);const i=a,o=i.shape.length,l=a.shape[r],u=new Array(o-1);let c=0;for(let e=0;e<o;e++)e!==r&&(u[c++]=i.shape[e]);const h=[],p=new Array(o).fill(0),d=i.shape.slice();d[r]=1;const f=new Array(l);for(let e=0;e<f.length;e++){p[r]=e;const t=UT({inputs:{x:i},backend:n,attrs:{begin:p,size:d}}),s=LS({inputs:{x:t},backend:n,attrs:{shape:u}});f[e]=s,h.push(t)}return h.forEach((e=>n.disposeIntermediateTensorInfo(e))),f}};class ID{constructor(e,t){this.variableNames=["x","segmentIds"];const n=e.windowSize,s=e.batchSize,a=e.inSize,r=e.numSegments,i=r*Math.ceil(a/n);this.outputShape=[s,i];const o=4*Math.floor(n/4),l=n%4,u="\n sumValue += dot(values, segFilter);\n ";let c="";a%n>0&&(c=`\n if (inIdx < 0 || inIdx >= ${a}) {\n return initializationValue;\n }\n `);let h="";a%n>0&&(h=`\n if (inIdx < 0 || inIdx >= ${a}) {\n return -1.0;\n }\n `),this.userCode=`\n const float initializationValue = 0.0;\n\n float getValue(int batch, int inIdx) {\n ${c}\n return getX(batch, inIdx);\n }\n\n float getSegmentIdAtIndex(int inIdx) {\n ${h}\n return getSegmentIds(inIdx);\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = int(floor(float(outIdx) / float(\n ${r})) * float(${n}));\n int currentSeg = int(mod(float(outIdx), float(${r})));\n\n float sumValue = 0.0;\n\n for (int i = 0; i < ${o}; i += 4) {\n int inIdx = inOffset + i;\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n getValue(batch, inIdx + 3)\n );\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 3)) == currentSeg ? 1 : 0\n );\n\n ${u}\n }\n\n int inIdx = inOffset + ${o};\n if (${1===l}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n int inIdxSeg = int(getSegmentIdAtIndex(inIdx));\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n 0,\n 0,\n 0\n );\n\n ${u}\n } else if (${2===l}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n initializationValue,\n initializationValue\n );\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0,\n 0,\n 0\n );\n\n ${u}\n } else if (${3===l}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n initializationValue\n );\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0,\n 0\n );\n\n ${u}\n }\n setOutput(sumValue);\n }\n `}}const $D=[dR,mR,YS,ZS,eT,nT,rT,lT,uT,cT,gT,yT,xT,kT,$T,NT,ST,RT,AT,_T,OT,MT,PT,HT,jT,JT,eE,sE,iE,vS,mE,CE,SE,kE,EE,AE,TE,FE,_E,ME,BE,WE,UE,XE,YE,jE,ZE,eA,nA,sA,rA,oA,cA,pA,mA,bA,kA,NA,CA,TA,RA,DA,_A,MA,zA,BA,VA,wS,UA,hE,HA,KA,XA,$S,JA,QA,eR,aR,nR,iR,lR,cR,yR,kR,wR,IR,$R,CR,xR,SR,TR,AR,DR,OR,GR,MS,jR,qR,YR,ZR,qT,eF,sF,aF,lF,cF,TS,hF,dF,YT,zR,mF,xF,yF,zS,vF,IF,CF,TF,RF,DF,OF,LF,PF,WF,UF,HF,KF,XF,JF,GT,UR,QF,eD,tD,nD,aD,rD,lD,uD,hD,WR,jS,dD,mD,bD,xD,kD,qS,vD,ND,{kernelName:es,backendName:"webgl",kernelFunc:function(e){const{inputs:t,backend:n,attrs:s}=e,{x:a,segmentIds:r}=t,{numSegments:i}=s,o=a.shape.length,l=[];let u=0;const c=Zi([u],o);let h=a;null!=c&&(h=KS({inputs:{x:a},backend:n,attrs:{perm:c}}),l.push(h),u=eo(1,o)[0]);const p=Lu(h.shape,u,i),d=D([h.shape[u]]),f=LS({inputs:{x:h},backend:n,attrs:{shape:[-1,d]}});l.push(f);const m=Us(a.dtype),g=(e,t,s,a,r)=>{const i=e.shape[0],o=e.shape[1],u=Mu(o,r),c=new ID({windowSize:u,inSize:o,batchSize:i,numSegments:r},t),h=n.compileAndRun(c,[e,s],a);if(l.push(h),h.shape[1]===r)return h;const p=pF({backend:n,attrs:{start:0,stop:r,step:1,dtype:"float32"}}),d=yD({inputs:{x:p},backend:n,attrs:{reps:[o/u]}});return l.push(p),l.push(d),g(h,t,d,a,r)},y=LS({inputs:{x:g(f,"unsortedSegmentSum",r,m,i)},backend:n,attrs:{shape:p}});let b=y;if(null!=c){l.push(y);const e=Qi(c);b=KS({inputs:{x:b},backend:n,attrs:{perm:e}})}return l.forEach((e=>n.disposeIntermediateTensorInfo(e))),b}},nF];for(const e of $D)ds(e);const CD="0.1.5",SD={flipHorizontal:!1,outputStride:16,imageScaleFactor:1,maxNumBoxes:20,iouThreshold:.2,scoreThreshold:.6,modelType:"ssd320fpnlite",modelSize:"small",bboxLineWidth:"2",fontSize:17,basePath:"https://cdn.jsdelivr.net/npm/handtrackjs@latest/models/webmodel/",labelMap:{1:"open",2:"closed",3:"pinch",4:"point",5:"face",6:"pointtip",7:"pinchtip"},renderThresholds:null},TD={large:"base",medium:"fp16",small:"int8"},ED={open:"#374151",closed:"#B91C1C",pinch:"#F59E0B",point:"#10B981",face:"#3B82F6",pointtip:"#6366F1",pinchtip:"#EC4899"},AD={ssd320fpnlite:["StatefulPartitionedCall/Postprocessor/Slice","StatefulPartitionedCall/Postprocessor/ExpandDims_1"],ssd640fpnlite:["StatefulPartitionedCall/Postprocessor/Slice","StatefulPartitionedCall/Postprocessor/ExpandDims_1"],centernet512fpn:["Identity_4:0","Identity:0","Identity_2:0"]};async function RD(e){let t=Object.assign({},SD,e);const n=new _D(t);return await n.load(),n}function FD(e){return new Promise((function(t,n){e||t({status:!1,msg:"please provide a valid video element"}),e.width=e.width||640,e.height=e.width*(e.videoHeight/e.videoWidth),e.style.width="100%",navigator.mediaDevices.getUserMedia({audio:!1,video:{facingMode:"user"}}).then((n=>{window.localStream=n,e.srcObject=n,e.onloadedmetadata=()=>{e.height=e.width*(e.videoHeight/e.videoWidth),e.style.height=parseInt(e.style.width)*(e.videoHeight/e.videoWidth).toFixed(2)+"px",e.play(),t({status:!0,msg:"webcam successfully initiated."})}})).catch((function(e){t({status:!1,msg:e})}))}))}async function DD(){if(!window.localStream)return!1;window.localStream.getTracks().forEach((e=>(e.stop(),!0)))}class _D{constructor(e){this.modelPath=e.basePath+e.modelType+"/"+(TD[e.modelSize]||"base")+"/model.json ",this.modelParams=e}async load(){this.fps=0,this.model=await async function(e,t={}){if(null==e)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");null==t&&(t={}),t.fromTFHub&&null==e.load&&(e.endsWith("/")||(e+="/"),e=`${e}model.json?tfjs-format=file`);const n=new ix(e,t);return await n.load(),n}(this.modelPath);const e=io([1,300,300,3],"int32"),t=await this.model.executeAsync(e,AD[this.modelParams.modelType]);t.map((async e=>await e.data())),t.map((async e=>e.dispose())),Sr(e)}async detect(e){let t=Date.now();const[n,s]=function(e){return e instanceof Ds?[e.shape[0],e.shape[1]]:[e.height,e.width]}(e),a=OD(this.modelParams.imageScaleFactor,n,this.modelParams.outputStride),r=OD(this.modelParams.imageScaleFactor,s,this.modelParams.outputStride),i=Cr((()=>{const t=ar(e);return this.modelParams.flipHorizontal?t.reverse(1).resizeBilinear([a,r]).expandDims(0).toInt():t.resizeBilinear([a,r]).expandDims(0).toInt()})),o=this;return this.model.executeAsync(i,AD[this.modelParams.modelType]).then((function(e){i.dispose();let a=[];if("centernet512fpn"===o.modelParams.modelType){const t=e[0].dataSync(),r=e[1].arraySync(),i=e[2].dataSync();Sr(e),a=o.buildDetectObjectsCenternet(s,n,r[0],t,i),console.log(a)}else{const t=e[0].dataSync(),r=e[1].dataSync();Sr(e);const[i,l]=function(e,t,n){const s=[],a=[];for(let r=0;r<t;r++){let t=Number.MIN_VALUE,i=-1;for(let s=0;s<n;s++)e[r*n+s]>t&&(t=e[r*n+s],i=s);s[r]=t,a[r]=i}return[s,a]}(t,e[0].shape[1],e[0].shape[2]),u=Zs.backendName;Er("cpu");const c=Cr((()=>{const t=Oo(r,[e[1].shape[1],e[1].shape[3]]);return Gl.nonMaxSuppression(t,i,o.modelParams.maxNumBoxes,o.modelParams.iouThreshold,o.modelParams.scoreThreshold)})),h=c.dataSync();c.dispose(),Er(u),a=o.buildDetectedObjects(s,n,r,i,h,l)}let r=Date.now();return o.fps=Math.round(1e3/(r-t)),a}))}buildDetectedObjects(e,t,n,s,a,r){const i=a.length,o=[];for(let l=0;l<i;l++){const i=[];for(let e=0;e<4;e++)i[e]=n[4*a[l]+e];const u=i[0]*t,c=i[1]*e,h=i[2]*t,p=i[3]*e;i[0]=c,i[1]=u,i[2]=p-c,i[3]=h-u;const d=Math.round(r[a[l]])+1;o.push({bbox:i,class:d,label:this.modelParams.labelMap[d],score:s[a[l]].toFixed(2)})}return o}buildDetectObjectsCenternet(e,t,n,s,a){const r=[];for(let i=0;i<s.length;i++){const o=n[i],l=o[0]*t,u=o[1]*e,c=o[2]*t,h=o[3]*e;o[0]=u,o[1]=l,o[2]=h-u,o[3]=c-l;const p=Math.round(a[i])+1;r.push({bbox:o,class:p,label:this.modelParams.labelMap[p],score:s[i].toFixed(2)})}return r.slice(0,10)}getFPS(){return this.fps}setModelParameters(e){this.modelParams=Object.assign({},this.modelParams,e)}getModelParameters(){return this.modelParams}roundRect(e,t,n,s,a,r,i,o){if(void 0===o&&(o=!0),void 0===r&&(r=5),"number"==typeof r)r={tl:r,tr:r,br:r,bl:r};else{var l={tl:0,tr:0,br:0,bl:0};for(var u in l)r[u]=r[u]||l[u]}e.beginPath(),e.moveTo(t+r.tl,n),e.lineTo(t+s-r.tr,n),e.quadraticCurveTo(t+s,n,t+s,n+r.tr),e.lineTo(t+s,n+a-r.br),e.quadraticCurveTo(t+s,n+a,t+s-r.br,n+a),e.lineTo(t+r.bl,n+a),e.quadraticCurveTo(t,n+a,t,n+a-r.bl),e.lineTo(t,n+r.tl),e.quadraticCurveTo(t,n,t+r.tl,n),e.closePath(),i&&e.fill(),o&&e.stroke()}renderPredictions(e,t,n,s){n.clearRect(0,0,t.width,t.height),t.width=s.width,t.height=s.height,t.style.height=parseInt(t.style.width)*(s.height/s.width).toFixed(2)+"px",n.save(),this.modelParams.flipHorizontal&&(n.scale(-1,1),n.translate(-s.width,0)),n.drawImage(s,0,0,s.width,s.height),n.restore(),n.font="bold "+this.modelParams.fontSize+"px Arial";const a=this.modelParams.renderThresholds;for(let t=0;t<e.length;t++){const s=e[t];(!a||s.score>a[s.label])&&(n.beginPath(),n.fillStyle="rgba(255, 255, 255, 0.6)",n.fillRect(s.bbox[0]+1,s.bbox[1]+1,s.bbox[2]-1,1.5*this.modelParams.fontSize),n.lineWidth=this.modelParams.bboxLineWidth,this.roundRect(n,s.bbox[0],s.bbox[1],s.bbox[2],s.bbox[3],5,!1,!0),n.strokeStyle=ED[s.label],n.fillStyle=ED[s.label],n.stroke(),n.beginPath(),n.arc(s.bbox[0]+s.bbox[2]/2,s.bbox[1]+s.bbox[3]/2,2,0,2*Math.PI),n.fill(),n.stroke(),n.fillText(s.score+" | "+s.label,s.bbox[0]+5,s.bbox[1]+1.1*this.modelParams.fontSize))}n.fillStyle="rgba(255, 255, 255, 0.6)",this.roundRect(n,10,10,4.6*this.modelParams.fontSize,this.modelParams.fontSize+8,5,!0,!1),n.strokeStyle="#374151",n.fillStyle="#374151",n.font="bold "+this.modelParams.fontSize+"px Arial",n.fillText("FPS: "+this.fps,18,this.modelParams.fontSize+12)}dispose(){this.model&&this.model.dispose()}}function OD(e,t,n){const s=t*e-1;return s-s%n+1}})(),s})()})); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment