diff --git a/dist/pixpipe.js b/dist/pixpipe.js index 7e19c59..f4d867e 100644 --- a/dist/pixpipe.js +++ b/dist/pixpipe.js @@ -205,7 +205,7 @@ class Filter extends PixpipeObject { this._timer = {}; this._isOutputReady = false; - + this.setMetadata("time", true); } @@ -407,7 +407,7 @@ class Filter extends PixpipeObject { var that = this; var inputCategories = Object.keys( this._inputValidator ); var valid = true; - + if(inputCategories.length == 0){ valid = false; console.warn("No input validator was added. Filter cannot run. Use addInputValidator(...) to specify input types."); @@ -415,7 +415,7 @@ class Filter extends PixpipeObject { inputCategories.forEach( function(key){ var inputOfCategory = that._getInput( key ); - + if(inputOfCategory){ if("isOfType" in inputOfCategory){ valid = valid && inputOfCategory.isOfType( that._inputValidator[ key ] ); @@ -426,13 +426,13 @@ class Filter extends PixpipeObject { valid = false; } } - + } // input simply not existing! else{ valid = false; } - + }); if(!valid){ @@ -476,7 +476,7 @@ class Filter extends PixpipeObject { * @param {String} recordName - name of the record */ addTimeRecord( recordName ){ - this._timer[ recordName ] = performance.now(); + this._timer[ recordName ] = 0; } @@ -516,16 +516,16 @@ class Filter extends PixpipeObject { */ triggerEvent( eventName /* any other arguments to follow */ ){ var returnValue = null; - + if(this.hasEvent(eventName)){ if( arguments.length > 1 ){ - + // a-la-mano slicing argument array to comply with V8 JS engine optimization... var argToSend = []; for(var i=1; i>> 16) & 0xffff) |0, @@ -6734,6 +6815,24 @@ var adler32_1 = adler32; // So write code to minimize size - no pregenerated tables // and array tools dependencies. +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. // Use ordinary array, since untyped makes no boost here function makeTable() { @@ -6770,6 +6869,25 @@ function crc32(crc, buf, len, pos) { var crc32_1 = crc32; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + var messages = { 2: 'need dictionary', /* Z_NEED_DICT 2 */ 1: 'stream end', /* Z_STREAM_END 1 */ @@ -6782,6 +6900,31 @@ var messages = { '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + + + + + + + /* Public constants ==========================================================*/ /* ===========================================================================*/ @@ -8829,6 +8972,25 @@ var strings = { utf8border: utf8border }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + function ZStream() { /* next input byte */ this.input = null; // JS specific, because we have no pointers @@ -9255,6 +9417,25 @@ var deflate_1 = { gzip: gzip_1 }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + // See state defs from inflate.js var BAD$1 = 30; /* got a data error -- remain here until reset */ var TYPE$1 = 12; /* i: waiting for type bits, including last-flag bit */ @@ -9580,6 +9761,27 @@ var inffast = function inflate_fast(strm, start) { return; }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + + + var MAXBITS = 15; var ENOUGH_LENS$1 = 852; var ENOUGH_DISTS$1 = 592; @@ -9901,6 +10103,31 @@ var inftrees = function inflate_table(type, lens, lens_index, codes, table, tabl return 0; }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + + + + + + + var CODES = 0; var LENS = 1; var DISTS = 2; @@ -11445,6 +11672,25 @@ var inflate_1$2 = { inflateInfo: inflateInfo }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + var constants = { /* Allowed flush values; see deflate() and inflate() below for details */ @@ -11493,6 +11739,25 @@ var constants = { //Z_NULL: null // Use -1 or null inline, depending on var type }; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + function GZheader() { /* true if compressed data believed to be text */ this.text = 0; @@ -20523,16 +20788,16 @@ class QeegModFileParser { } /* END of QeegModFileParser */ class EegModDecoder extends Filter { - + constructor() { super(); this.addInputValidator(0, ArrayBuffer); this.setMetadata("debug", false); - + // a soon-to-be DataView to read the input buffer this._view = null; } - + _run(){ var inputBuffer = this._getInput(0); @@ -20540,17 +20805,17 @@ class EegModDecoder extends Filter { console.warn("EegModDecoder requires an ArrayBuffer as input \"0\". Unable to continue."); return; } - + var modParser = new QeegModFileParser(); modParser.setRawData( inputBuffer ); var qeegData = modParser.parse(); - + if( qeegData ){ this._output[0] = qeegData; } - + } - + } /* END of class EegModDecoder */ /* @@ -20657,131 +20922,2384 @@ class PixBinEncoder extends Filter { } /* END of class PixBinEncoder */ -/* -* Author Jonathan Lurie - http://me.jonahanlurie.fr -* License MIT -* Link https://github.com/jonathanlurie/pixpipejs -* Lab MCIN - Montreal Neurological Institute -*/ - -/** -* A filter of type ForEachPixelImageFilter can perform a operation on evey pixel -* of an Image2D with a simple interface. For this purpose, a per-pixel-callback -* must be specified using method -* .on( "pixel" , function( coord, color ){ ... }) -* where coord is of form {x, y} and color is of form [r, g, b, a] (with possibly) -* a different number of components per pixel. -* This callback must return, or null (original color not modified), -* or a array of color (same dimension as the one in arguments). -* -* **Usage** -* - [examples/forEachPixel.html](../examples/forEachPixel.html) -* -* @example -* var forEachPixelFilter = new pixpipe.ForEachPixelImageFilter(); -* forEachPixelFilter.on( "pixel", function(position, color){ -* -* return [ -* color[1], // red (takes the values from green) -* color[0], // green (takes the values from red) -* color[2] * 0.5, // blue get 50% darker -* 255 // alpha, at max -* ] -* -* } -* ); -* -*/ -class ForEachPixelImageFilter extends ImageToImageFilter { - - constructor(){ - super(); - this.addInputValidator(0, Image2D); +function iota(n) { + var result = new Array(n); + for(var i=0; i + * @license MIT + */ - var inputImage2D = this._getInput(); - var firstPixel = 0; - var lastPixel = inputImage2D.getWidth() * inputImage2D.getHeight(); - var increment = 1; +// The _isBuffer check is for Safari 5-7 support, because it's missing +// Object.prototype.constructor. Remove this eventually +var index$2 = function (obj) { + return obj != null && (isBuffer$1(obj) || isSlowBuffer$1(obj) || !!obj._isBuffer) +}; - var bufferCopy = inputImage2D.getDataCopy(); +function isBuffer$1 (obj) { + return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj) +} - this._forEachPixelOfSuch(bufferCopy, firstPixel, lastPixel, increment ); +// For Node v0.10 support. Remove this eventually. +function isSlowBuffer$1 (obj) { + return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer$1(obj.slice(0, 0)) +} - // 1 - init the output - var outputImg = this._addOutput( Image2D ); +var hasTypedArrays = ((typeof Float64Array) !== "undefined"); - // 2 - tune the output - outputImg.setData( - bufferCopy, - inputImage2D.getWidth(), - inputImage2D.getHeight(), - inputImage2D.getComponentsPerPixel() - ); +function compare1st(a, b) { + return a[0] - b[0] +} +function order() { + var stride = this.stride; + var terms = new Array(stride.length); + var i; + for(i=0; iMath.abs(this.stride[1]))?[1,0]:[0,1]}})"); + } else if(dimension === 3) { + code.push( +"var s0=Math.abs(this.stride[0]),s1=Math.abs(this.stride[1]),s2=Math.abs(this.stride[2]);\ +if(s0>s1){\ +if(s1>s2){\ +return [2,1,0];\ +}else if(s0>s2){\ +return [1,2,0];\ +}else{\ +return [1,0,2];\ +}\ +}else if(s0>s2){\ +return [2,0,1];\ +}else if(s2>s1){\ +return [0,1,2];\ +}else{\ +return [0,2,1];\ +}}})"); + } + } else { + code.push("ORDER})"); + } } + //view.set(i0, ..., v): + code.push( +"proto.set=function "+className+"_set("+args.join(",")+",v){"); + if(useGetters) { + code.push("return this.data.set("+index_str+",v)}"); + } else { + code.push("return this.data["+index_str+"]=v}"); + } - /** - * [PRIVATE] - * generic function for painting row, colum or whole - * @param {Number} firstPixel - Index of the first pixel in 1D array - * @param {Number} lastPixel - Index of the last pixel in 1D array - * @param {Number} increment - jump gap from a pixel to another (in a 1D style) - */ - _forEachPixelOfSuch(buffer, firstPixel, lastPixel, increment ){ - // abort if no callback per pixel - //if( ! ("pixel" in this._events)){ - if( ! ( this.hasEvent("pixel"))){ - console.warn("No function to apply per pixel was specified."); - return; + //view.get(i0, ...): + code.push("proto.get=function "+className+"_get("+args.join(",")+"){"); + if(useGetters) { + code.push("return this.data.get("+index_str+")}"); + } else { + code.push("return this.data["+index_str+"]}"); + } + + //view.index: + code.push( + "proto.index=function "+className+"_index(", args.join(), "){return "+index_str+"}"); + + //view.hi(): + code.push("proto.hi=function "+className+"_hi("+args.join(",")+"){return new "+className+"(this.data,"+ + indices.map(function(i) { + return ["(typeof i",i,"!=='number'||i",i,"<0)?this.shape[", i, "]:i", i,"|0"].join("") + }).join(",")+","+ + indices.map(function(i) { + return "this.stride["+i + "]" + }).join(",")+",this.offset)}"); + + //view.lo(): + var a_vars = indices.map(function(i) { return "a"+i+"=this.shape["+i+"]" }); + var c_vars = indices.map(function(i) { return "c"+i+"=this.stride["+i+"]" }); + code.push("proto.lo=function "+className+"_lo("+args.join(",")+"){var b=this.offset,d=0,"+a_vars.join(",")+","+c_vars.join(",")); + for(var i=0; i=0){\ +d=i"+i+"|0;\ +b+=c"+i+"*d;\ +a"+i+"-=d}"); + } + code.push("return new "+className+"(this.data,"+ + indices.map(function(i) { + return "a"+i + }).join(",")+","+ + indices.map(function(i) { + return "c"+i + }).join(",")+",b)}"); + + //view.step(): + code.push("proto.step=function "+className+"_step("+args.join(",")+"){var "+ + indices.map(function(i) { + return "a"+i+"=this.shape["+i+"]" + }).join(",")+","+ + indices.map(function(i) { + return "b"+i+"=this.stride["+i+"]" + }).join(",")+",c=this.offset,d=0,ceil=Math.ceil"); + for(var i=0; i=0){c=(c+this.stride["+i+"]*i"+i+")|0}else{a.push(this.shape["+i+"]);b.push(this.stride["+i+"])}"); + } + code.push("var ctor=CTOR_LIST[a.length+1];return ctor(this.data,a,b,c)}"); + + //Add return statement + code.push("return function construct_"+className+"(data,shape,stride,offset){return new "+className+"(data,"+ + indices.map(function(i) { + return "shape["+i+"]" + }).join(",")+","+ + indices.map(function(i) { + return "stride["+i+"]" + }).join(",")+",offset)}"); + + //Compile procedure + var procedure = new Function("CTOR_LIST", "ORDER", code.join("\n")); + return procedure(CACHED_CONSTRUCTORS[dtype], order) +} + +function arrayDType(data) { + if(index$2(data)) { + return "buffer" + } + if(hasTypedArrays) { + switch(Object.prototype.toString.call(data)) { + case "[object Float64Array]": + return "float64" + case "[object Float32Array]": + return "float32" + case "[object Int8Array]": + return "int8" + case "[object Int16Array]": + return "int16" + case "[object Int32Array]": + return "int32" + case "[object Uint8Array]": + return "uint8" + case "[object Uint16Array]": + return "uint16" + case "[object Uint32Array]": + return "uint32" + case "[object Uint8ClampedArray]": + return "uint8_clamped" + } + } + if(Array.isArray(data)) { + return "array" + } + return "generic" +} + +var CACHED_CONSTRUCTORS = { + "float32":[], + "float64":[], + "int8":[], + "int16":[], + "int32":[], + "uint8":[], + "uint16":[], + "uint32":[], + "array":[], + "uint8_clamped":[], + "buffer":[], + "generic":[] +};function wrappedNDArrayCtor(data, shape, stride, offset) { + if(data === undefined) { + var ctor = CACHED_CONSTRUCTORS.array[0]; + return ctor([]) + } else if(typeof data === "number") { + data = [data]; + } + if(shape === undefined) { + shape = [ data.length ]; + } + var d = shape.length; + if(stride === undefined) { + stride = new Array(d); + for(var i=d-1, sz=1; i>=0; --i) { + stride[i] = sz; + sz *= shape[i]; + } + } + if(offset === undefined) { + offset = 0; + for(var i=0; i0 + , code = [] + , vars = [] + , idx=0, pidx=0, i, j; + for(i=0; i 0) { + code.push("var " + vars.join(",")); + } + //Scan loop + for(i=dimension-1; i>=0; --i) { // Start at largest stride and work your way inwards + idx = order[i]; + code.push(["for(i",i,"=0;i",i," 0) { + code.push(["index[",pidx,"]-=s",pidx].join("")); + } + code.push(["++index[",idx,"]"].join("")); + } + code.push("}"); + } + return code.join("\n") +} + +// Generate "outer" loops that loop over blocks of data, applying "inner" loops to the blocks by manipulating the local variables in such a way that the inner loop only "sees" the current block. +// TODO: If this is used, then the previous declaration (done by generateCwiseOp) of s* is essentially unnecessary. +// I believe the s* are not used elsewhere (in particular, I don't think they're used in the pre/post parts and "shape" is defined independently), so it would be possible to make defining the s* dependent on what loop method is being used. +function outerFill(matched, order, proc, body) { + var dimension = order.length + , nargs = proc.arrayArgs.length + , blockSize = proc.blockSize + , has_index = proc.indexArgs.length > 0 + , code = []; + for(var i=0; i0;){"].join("")); // Iterate back to front + code.push(["if(j",i,"<",blockSize,"){"].join("")); // Either decrease j by blockSize (s = blockSize), or set it to zero (after setting s = j). + code.push(["s",order[i],"=j",i].join("")); + code.push(["j",i,"=0"].join("")); + code.push(["}else{s",order[i],"=",blockSize].join("")); + code.push(["j",i,"-=",blockSize,"}"].join("")); + if(has_index) { + code.push(["index[",order[i],"]=j",i].join("")); + } + } + for(var i=0; i 0) { + allEqual = allEqual && summary[i] === summary[i-1]; + } + } + if(allEqual) { + return summary[0] + } + return summary.join("") +} + +//Generates a cwise operator +function generateCWiseOp(proc, typesig) { + + //Compute dimension + // Arrays get put first in typesig, and there are two entries per array (dtype and order), so this gets the number of dimensions in the first array arg. + var dimension = (typesig[1].length - Math.abs(proc.arrayBlockIndices[0]))|0; + var orders = new Array(proc.arrayArgs.length); + var dtypes = new Array(proc.arrayArgs.length); + for(var i=0; i 0) { + vars.push("shape=SS.slice(0)"); // Makes the shape over which we iterate available to the user defined functions (so you can use width/height for example) + } + if(proc.indexArgs.length > 0) { + // Prepare an array to keep track of the (logical) indices, initialized to dimension zeroes. + var zeros = new Array(dimension); + for(var i=0; i 0) { + code.push("var " + vars.join(",")); + } + for(var i=0; i 3) { + code.push(processBlock(proc.pre, proc, dtypes)); + } + + //Process body + var body = processBlock(proc.body, proc, dtypes); + var matched = countMatches(loopOrders); + if(matched < dimension) { + code.push(outerFill(matched, loopOrders[0], proc, body)); // TODO: Rather than passing loopOrders[0], it might be interesting to look at passing an order that represents the majority of the arguments for example. + } else { + code.push(innerFill(loopOrders[0], proc, body)); + } + + //Inline epilog + if(proc.post.body.length > 3) { + code.push(processBlock(proc.post, proc, dtypes)); + } + + if(proc.debug) { + console.log("-----Generated cwise routine for ", typesig, ":\n" + code.join("\n") + "\n----------"); + } + + var loopName = [(proc.funcName||"unnamed"), "_cwise_loop_", orders[0].join("s"),"m",matched,typeSummary(dtypes)].join(""); + var f = new Function(["function ",loopName,"(", arglist.join(","),"){", code.join("\n"),"} return ", loopName].join("")); + return f() +} +var compile = generateCWiseOp; + +// The function below is called when constructing a cwise function object, and does the following: +// A function object is constructed which accepts as argument a compilation function and returns another function. +// It is this other function that is eventually returned by createThunk, and this function is the one that actually +// checks whether a certain pattern of arguments has already been used before and compiles new loops as needed. +// The compilation passed to the first function object is used for compiling new functions. +// Once this function object is created, it is called with compile as argument, where the first argument of compile +// is bound to "proc" (essentially containing a preprocessed version of the user arguments to cwise). +// So createThunk roughly works like this: +// function createThunk(proc) { +// var thunk = function(compileBound) { +// var CACHED = {} +// return function(arrays and scalars) { +// if (dtype and order of arrays in CACHED) { +// var func = CACHED[dtype and order of arrays] +// } else { +// var func = CACHED[dtype and order of arrays] = compileBound(dtype and order of arrays) +// } +// return func(arrays and scalars) +// } +// } +// return thunk(compile.bind1(proc)) +// } + + + +function createThunk(proc) { + var code = ["'use strict'", "var CACHED={}"]; + var vars = []; + var thunkName = proc.funcName + "_cwise_thunk"; + + //Build thunk + code.push(["return function ", thunkName, "(", proc.shimArgs.join(","), "){"].join("")); + var typesig = []; + var string_typesig = []; + var proc_args = [["array",proc.arrayArgs[0],".shape.slice(", // Slice shape so that we only retain the shape over which we iterate (which gets passed to the cwise operator as SS). + Math.max(0,proc.arrayBlockIndices[0]),proc.arrayBlockIndices[0]<0?(","+proc.arrayBlockIndices[0]+")"):")"].join("")]; + var shapeLengthConditions = [], shapeConditions = []; + // Process array arguments + for(var i=0; i0) { // Gather conditions to check for shape equality (ignoring block indices) + shapeLengthConditions.push("array" + proc.arrayArgs[0] + ".shape.length===array" + j + ".shape.length+" + (Math.abs(proc.arrayBlockIndices[0])-Math.abs(proc.arrayBlockIndices[i]))); + shapeConditions.push("array" + proc.arrayArgs[0] + ".shape[shapeIndex+" + Math.max(0,proc.arrayBlockIndices[0]) + "]===array" + j + ".shape[shapeIndex+" + Math.max(0,proc.arrayBlockIndices[i]) + "]"); + } + } + // Check for shape equality + if (proc.arrayArgs.length > 1) { + code.push("if (!(" + shapeLengthConditions.join(" && ") + ")) throw new Error('cwise: Arrays do not all have the same dimensionality!')"); + code.push("for(var shapeIndex=array" + proc.arrayArgs[0] + ".shape.length-" + Math.abs(proc.arrayBlockIndices[0]) + "; shapeIndex-->0;) {"); + code.push("if (!(" + shapeConditions.join(" && ") + ")) throw new Error('cwise: Arrays do not all have the same shape!')"); + code.push("}"); + } + // Process scalar arguments + for(var i=0; i0) { + throw new Error("cwise: pre() block may not reference array args") + } + if(i < proc.post.args.length && proc.post.args[i].count>0) { + throw new Error("cwise: post() block may not reference array args") + } + } else if(arg_type === "scalar") { + proc.scalarArgs.push(i); + proc.shimArgs.push("scalar" + i); + } else if(arg_type === "index") { + proc.indexArgs.push(i); + if(i < proc.pre.args.length && proc.pre.args[i].count > 0) { + throw new Error("cwise: pre() block may not reference array index") + } + if(i < proc.body.args.length && proc.body.args[i].lvalue) { + throw new Error("cwise: body() block may not write to array index") + } + if(i < proc.post.args.length && proc.post.args[i].count > 0) { + throw new Error("cwise: post() block may not reference array index") + } + } else if(arg_type === "shape") { + proc.shapeArgs.push(i); + if(i < proc.pre.args.length && proc.pre.args[i].lvalue) { + throw new Error("cwise: pre() block may not write to array shape") + } + if(i < proc.body.args.length && proc.body.args[i].lvalue) { + throw new Error("cwise: body() block may not write to array shape") + } + if(i < proc.post.args.length && proc.post.args[i].lvalue) { + throw new Error("cwise: post() block may not write to array shape") + } + } else if(typeof arg_type === "object" && arg_type.offset) { + proc.argTypes[i] = "offset"; + proc.offsetArgs.push({ array: arg_type.array, offset:arg_type.offset }); + proc.offsetArgIndex.push(i); + } else { + throw new Error("cwise: Unknown argument type " + proc_args[i]) + } + } + + //Make sure at least one array argument was specified + if(proc.arrayArgs.length <= 0) { + throw new Error("cwise: No array arguments specified") + } + + //Make sure arguments are correct + if(proc.pre.args.length > proc_args.length) { + throw new Error("cwise: Too many arguments in pre() block") + } + if(proc.body.args.length > proc_args.length) { + throw new Error("cwise: Too many arguments in body() block") + } + if(proc.post.args.length > proc_args.length) { + throw new Error("cwise: Too many arguments in post() block") + } + + //Check debug flag + proc.debug = !!user_args.printCode || !!user_args.debug; + + //Retrieve name + proc.funcName = user_args.funcName || "cwise"; + + //Read in block size + proc.blockSize = user_args.blockSize || 64; + + return thunk(proc) +} + +var compiler = compileCwise; + +var ndarrayOps = createCommonjsModule(function (module, exports) { +"use strict"; + + + +var EmptyProc = { + body: "", + args: [], + thisVars: [], + localVars: [] +}; + +function fixup(x) { + if(!x) { + return EmptyProc + } + for(var i=0; i>", + rrshift: ">>>" +};(function(){ + for(var id in assign_ops) { + var op = assign_ops[id]; + exports[id] = makeOp({ + args: ["array","array","array"], + body: {args:["a","b","c"], + body: "a=b"+op+"c"}, + funcName: id + }); + exports[id+"eq"] = makeOp({ + args: ["array","array"], + body: {args:["a","b"], + body:"a"+op+"=b"}, + rvalue: true, + funcName: id+"eq" + }); + exports[id+"s"] = makeOp({ + args: ["array", "array", "scalar"], + body: {args:["a","b","s"], + body:"a=b"+op+"s"}, + funcName: id+"s" + }); + exports[id+"seq"] = makeOp({ + args: ["array","scalar"], + body: {args:["a","s"], + body:"a"+op+"=s"}, + rvalue: true, + funcName: id+"seq" + }); + } +})(); + +var unary_ops = { + not: "!", + bnot: "~", + neg: "-", + recip: "1.0/" +};(function(){ + for(var id in unary_ops) { + var op = unary_ops[id]; + exports[id] = makeOp({ + args: ["array", "array"], + body: {args:["a","b"], + body:"a="+op+"b"}, + funcName: id + }); + exports[id+"eq"] = makeOp({ + args: ["array"], + body: {args:["a"], + body:"a="+op+"a"}, + rvalue: true, + count: 2, + funcName: id+"eq" + }); + } +})(); + +var binary_ops = { + and: "&&", + or: "||", + eq: "===", + neq: "!==", + lt: "<", + gt: ">", + leq: "<=", + geq: ">=" +};(function() { + for(var id in binary_ops) { + var op = binary_ops[id]; + exports[id] = makeOp({ + args: ["array","array","array"], + body: {args:["a", "b", "c"], + body:"a=b"+op+"c"}, + funcName: id + }); + exports[id+"s"] = makeOp({ + args: ["array","array","scalar"], + body: {args:["a", "b", "s"], + body:"a=b"+op+"s"}, + funcName: id+"s" + }); + exports[id+"eq"] = makeOp({ + args: ["array", "array"], + body: {args:["a", "b"], + body:"a=a"+op+"b"}, + rvalue:true, + count:2, + funcName: id+"eq" + }); + exports[id+"seq"] = makeOp({ + args: ["array", "scalar"], + body: {args:["a","s"], + body:"a=a"+op+"s"}, + rvalue:true, + count:2, + funcName: id+"seq" + }); + } +})(); + +var math_unary = [ + "abs", + "acos", + "asin", + "atan", + "ceil", + "cos", + "exp", + "floor", + "log", + "round", + "sin", + "sqrt", + "tan" +];(function() { + for(var i=0; ithis_s){this_s=-a}else if(a>this_s){this_s=a}", localVars: [], thisVars: ["this_s"]}, + post: {args:[], localVars:[], thisVars:["this_s"], body:"return this_s"}, + funcName: "norminf" +}); + +exports.norm1 = compiler({ + args:["array"], + pre: {args:[], localVars:[], thisVars:["this_s"], body:"this_s=0"}, + body: {args:[{name:"a", lvalue:false, rvalue:true, count:3}], body: "this_s+=a<0?-a:a", localVars: [], thisVars: ["this_s"]}, + post: {args:[], localVars:[], thisVars:["this_s"], body:"return this_s"}, + funcName: "norm1" +}); + +exports.sup = compiler({ + args: [ "array" ], + pre: + { body: "this_h=-Infinity", + args: [], + thisVars: [ "this_h" ], + localVars: [] }, + body: + { body: "if(_inline_1_arg0_>this_h)this_h=_inline_1_arg0_", + args: [{"name":"_inline_1_arg0_","lvalue":false,"rvalue":true,"count":2} ], + thisVars: [ "this_h" ], + localVars: [] }, + post: + { body: "return this_h", + args: [], + thisVars: [ "this_h" ], + localVars: [] } + }); + +exports.inf = compiler({ + args: [ "array" ], + pre: + { body: "this_h=Infinity", + args: [], + thisVars: [ "this_h" ], + localVars: [] }, + body: + { body: "if(_inline_1_arg0_this_v){this_v=_inline_1_arg1_;for(var _inline_1_k=0;_inline_1_k<_inline_1_arg0_.length;++_inline_1_k){this_i[_inline_1_k]=_inline_1_arg0_[_inline_1_k]}}}", + args:[ + {name:"_inline_1_arg0_",lvalue:false,rvalue:true,count:2}, + {name:"_inline_1_arg1_",lvalue:false,rvalue:true,count:2}], + thisVars:["this_i","this_v"], + localVars:["_inline_1_k"]}, + post:{ + body:"{return this_i}", + args:[], + thisVars:["this_i"], + localVars:[]} +}); + +exports.random = makeOp({ + args: ["array"], + pre: {args:[], body:"this_f=Math.random", thisVars:["this_f"]}, + body: {args: ["a"], body:"a=this_f()", thisVars:["this_f"]}, + funcName: "random" +}); + +exports.assign = makeOp({ + args:["array", "array"], + body: {args:["a", "b"], body:"a=b"}, + funcName: "assign" }); + +exports.assigns = makeOp({ + args:["array", "scalar"], + body: {args:["a", "b"], body:"a=b"}, + funcName: "assigns" }); + + +exports.equals = compiler({ + args:["array", "array"], + pre: EmptyProc, + body: {args:[{name:"x", lvalue:false, rvalue:true, count:1}, + {name:"y", lvalue:false, rvalue:true, count:1}], + body: "if(x!==y){return false}", + localVars: [], + thisVars: []}, + post: {args:[], localVars:[], thisVars:[], body:"return true"}, + funcName: "equals" +}); +}); + +/** + * Bit twiddling hacks for JavaScript. + * + * Author: Mikola Lysenko + * + * Ported from Stanford bit twiddling hack library: + * http://graphics.stanford.edu/~seander/bithacks.html + */ + +//Number of bits in an integer +var INT_BITS = 32; + +//Constants +var INT_BITS_1 = INT_BITS; +var INT_MAX = 0x7fffffff; +var INT_MIN = -1<<(INT_BITS-1); + +//Returns -1, 0, +1 depending on sign of x +var sign = function(v) { + return (v > 0) - (v < 0); +}; + +//Computes absolute value of integer +var abs = function(v) { + var mask = v >> (INT_BITS-1); + return (v ^ mask) - mask; +}; + +//Computes minimum of integers x and y +var min = function(x, y) { + return y ^ ((x ^ y) & -(x < y)); +}; + +//Computes maximum of integers x and y +var max = function(x, y) { + return x ^ ((x ^ y) & -(x < y)); +}; + +//Checks if a number is a power of two +var isPow2 = function(v) { + return !(v & (v-1)) && (!!v); +}; + +//Computes log base 2 of v +var log2 = function(v) { + var r, shift; + r = (v > 0xFFFF) << 4; v >>>= r; + shift = (v > 0xFF ) << 3; v >>>= shift; r |= shift; + shift = (v > 0xF ) << 2; v >>>= shift; r |= shift; + shift = (v > 0x3 ) << 1; v >>>= shift; r |= shift; + return r | (v >> 1); +}; + +//Computes log base 10 of v +var log10 = function(v) { + return (v >= 1000000000) ? 9 : (v >= 100000000) ? 8 : (v >= 10000000) ? 7 : + (v >= 1000000) ? 6 : (v >= 100000) ? 5 : (v >= 10000) ? 4 : + (v >= 1000) ? 3 : (v >= 100) ? 2 : (v >= 10) ? 1 : 0; +}; + +//Counts number of bits +var popCount = function(v) { + v = v - ((v >>> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >>> 2) & 0x33333333); + return ((v + (v >>> 4) & 0xF0F0F0F) * 0x1010101) >>> 24; +}; + +//Counts number of trailing zeros +function countTrailingZeros(v) { + var c = 32; + v &= -v; + if (v) c--; + if (v & 0x0000FFFF) c -= 16; + if (v & 0x00FF00FF) c -= 8; + if (v & 0x0F0F0F0F) c -= 4; + if (v & 0x33333333) c -= 2; + if (v & 0x55555555) c -= 1; + return c; +} +var countTrailingZeros_1 = countTrailingZeros; + +//Rounds to next power of 2 +var nextPow2 = function(v) { + v += v === 0; + --v; + v |= v >>> 1; + v |= v >>> 2; + v |= v >>> 4; + v |= v >>> 8; + v |= v >>> 16; + return v + 1; +}; + +//Rounds down to previous power of 2 +var prevPow2 = function(v) { + v |= v >>> 1; + v |= v >>> 2; + v |= v >>> 4; + v |= v >>> 8; + v |= v >>> 16; + return v - (v>>>1); +}; + +//Computes parity of word +var parity = function(v) { + v ^= v >>> 16; + v ^= v >>> 8; + v ^= v >>> 4; + v &= 0xf; + return (0x6996 >>> v) & 1; +}; + +var REVERSE_TABLE = new Array(256); + +(function(tab) { + for(var i=0; i<256; ++i) { + var v = i, r = i, s = 7; + for (v >>>= 1; v; v >>>= 1) { + r <<= 1; + r |= v & 1; + --s; + } + tab[i] = (r << s) & 0xff; + } +})(REVERSE_TABLE); + +//Reverse bits in a 32 bit word +var reverse = function(v) { + return (REVERSE_TABLE[ v & 0xff] << 24) | + (REVERSE_TABLE[(v >>> 8) & 0xff] << 16) | + (REVERSE_TABLE[(v >>> 16) & 0xff] << 8) | + REVERSE_TABLE[(v >>> 24) & 0xff]; +}; + +//Interleave bits of 2 coordinates with 16 bits. Useful for fast quadtree codes +var interleave2 = function(x, y) { + x &= 0xFFFF; + x = (x | (x << 8)) & 0x00FF00FF; + x = (x | (x << 4)) & 0x0F0F0F0F; + x = (x | (x << 2)) & 0x33333333; + x = (x | (x << 1)) & 0x55555555; + + y &= 0xFFFF; + y = (y | (y << 8)) & 0x00FF00FF; + y = (y | (y << 4)) & 0x0F0F0F0F; + y = (y | (y << 2)) & 0x33333333; + y = (y | (y << 1)) & 0x55555555; + + return x | (y << 1); +}; + +//Extracts the nth interleaved component +var deinterleave2 = function(v, n) { + v = (v >>> n) & 0x55555555; + v = (v | (v >>> 1)) & 0x33333333; + v = (v | (v >>> 2)) & 0x0F0F0F0F; + v = (v | (v >>> 4)) & 0x00FF00FF; + v = (v | (v >>> 16)) & 0x000FFFF; + return (v << 16) >> 16; +}; + + +//Interleave bits of 3 coordinates, each with 10 bits. Useful for fast octree codes +var interleave3 = function(x, y, z) { + x &= 0x3FF; + x = (x | (x<<16)) & 4278190335; + x = (x | (x<<8)) & 251719695; + x = (x | (x<<4)) & 3272356035; + x = (x | (x<<2)) & 1227133513; + + y &= 0x3FF; + y = (y | (y<<16)) & 4278190335; + y = (y | (y<<8)) & 251719695; + y = (y | (y<<4)) & 3272356035; + y = (y | (y<<2)) & 1227133513; + x |= (y << 1); + + z &= 0x3FF; + z = (z | (z<<16)) & 4278190335; + z = (z | (z<<8)) & 251719695; + z = (z | (z<<4)) & 3272356035; + z = (z | (z<<2)) & 1227133513; + + return x | (z << 2); +}; + +//Extracts nth interleaved component of a 3-tuple +var deinterleave3 = function(v, n) { + v = (v >>> n) & 1227133513; + v = (v | (v>>>2)) & 3272356035; + v = (v | (v>>>4)) & 251719695; + v = (v | (v>>>8)) & 4278190335; + v = (v | (v>>>16)) & 0x3FF; + return (v<<22)>>22; +}; + +//Computes next combination in colexicographic order (this is mistakenly called nextPermutation on the bit twiddling hacks page) +var nextCombination = function(v) { + var t = v | (v - 1); + return (t + 1) | (((~t & -~t) - 1) >>> (countTrailingZeros(v) + 1)); +}; + +var twiddle = { + INT_BITS: INT_BITS_1, + INT_MAX: INT_MAX, + INT_MIN: INT_MIN, + sign: sign, + abs: abs, + min: min, + max: max, + isPow2: isPow2, + log2: log2, + log10: log10, + popCount: popCount, + countTrailingZeros: countTrailingZeros_1, + nextPow2: nextPow2, + prevPow2: prevPow2, + parity: parity, + reverse: reverse, + interleave2: interleave2, + deinterleave2: deinterleave2, + interleave3: interleave3, + deinterleave3: deinterleave3, + nextCombination: nextCombination +}; + +function dupe_array(count, value, i) { + var c = count[i]|0; + if(c <= 0) { + return [] + } + var result = new Array(c), j; + if(i === count.length-1) { + for(j=0; j 0) { + return dupe_number(count|0, value) + } + break + case "object": + if(typeof (count.length) === "number") { + return dupe_array(count, value, 0) + } + break + } + return [] +} + +var dup = dupe; + +var pool = createCommonjsModule(function (module, exports) { +'use strict'; + + + + +//Legacy pool support +if(!commonjsGlobal.__TYPEDARRAY_POOL) { + commonjsGlobal.__TYPEDARRAY_POOL = { + UINT8 : dup([32, 0]) + , UINT16 : dup([32, 0]) + , UINT32 : dup([32, 0]) + , INT8 : dup([32, 0]) + , INT16 : dup([32, 0]) + , INT32 : dup([32, 0]) + , FLOAT : dup([32, 0]) + , DOUBLE : dup([32, 0]) + , DATA : dup([32, 0]) + , UINT8C : dup([32, 0]) + , BUFFER : dup([32, 0]) + }; +} + +var hasUint8C = (typeof Uint8ClampedArray) !== 'undefined'; +var POOL = commonjsGlobal.__TYPEDARRAY_POOL; + +//Upgrade pool +if(!POOL.UINT8C) { + POOL.UINT8C = dup([32, 0]); +} +if(!POOL.BUFFER) { + POOL.BUFFER = dup([32, 0]); +} + +//New technique: Only allocate from ArrayBufferView and Buffer +var DATA = POOL.DATA + , BUFFER = POOL.BUFFER; + +exports.free = function free(array) { + if(isBuffer(array)) { + BUFFER[twiddle.log2(array.length)].push(array); + } else { + if(Object.prototype.toString.call(array) !== '[object ArrayBuffer]') { + array = array.buffer; + } + if(!array) { + return + } + var n = array.length || array.byteLength; + var log_n = twiddle.log2(n)|0; + DATA[log_n].push(array); + } +}; + +function freeArrayBuffer(buffer) { + if(!buffer) { + return + } + var n = buffer.length || buffer.byteLength; + var log_n = twiddle.log2(n); + DATA[log_n].push(buffer); +} + +function freeTypedArray(array) { + freeArrayBuffer(array.buffer); +} + +exports.freeUint8 = +exports.freeUint16 = +exports.freeUint32 = +exports.freeInt8 = +exports.freeInt16 = +exports.freeInt32 = +exports.freeFloat32 = +exports.freeFloat = +exports.freeFloat64 = +exports.freeDouble = +exports.freeUint8Clamped = +exports.freeDataView = freeTypedArray; + +exports.freeArrayBuffer = freeArrayBuffer; + +exports.freeBuffer = function freeBuffer(array) { + BUFFER[twiddle.log2(array.length)].push(array); +}; + +exports.malloc = function malloc(n, dtype) { + if(dtype === undefined || dtype === 'arraybuffer') { + return mallocArrayBuffer(n) + } else { + switch(dtype) { + case 'uint8': + return mallocUint8(n) + case 'uint16': + return mallocUint16(n) + case 'uint32': + return mallocUint32(n) + case 'int8': + return mallocInt8(n) + case 'int16': + return mallocInt16(n) + case 'int32': + return mallocInt32(n) + case 'float': + case 'float32': + return mallocFloat(n) + case 'double': + case 'float64': + return mallocDouble(n) + case 'uint8_clamped': + return mallocUint8Clamped(n) + case 'buffer': + return mallocBuffer(n) + case 'data': + case 'dataview': + return mallocDataView(n) + + default: + return null + } + } + return null +}; + +function mallocArrayBuffer(n) { + var n = twiddle.nextPow2(n); + var log_n = twiddle.log2(n); + var d = DATA[log_n]; + if(d.length > 0) { + return d.pop() + } + return new ArrayBuffer(n) +} +exports.mallocArrayBuffer = mallocArrayBuffer; + +function mallocUint8(n) { + return new Uint8Array(mallocArrayBuffer(n), 0, n) +} +exports.mallocUint8 = mallocUint8; + +function mallocUint16(n) { + return new Uint16Array(mallocArrayBuffer(2*n), 0, n) +} +exports.mallocUint16 = mallocUint16; + +function mallocUint32(n) { + return new Uint32Array(mallocArrayBuffer(4*n), 0, n) +} +exports.mallocUint32 = mallocUint32; + +function mallocInt8(n) { + return new Int8Array(mallocArrayBuffer(n), 0, n) +} +exports.mallocInt8 = mallocInt8; + +function mallocInt16(n) { + return new Int16Array(mallocArrayBuffer(2*n), 0, n) +} +exports.mallocInt16 = mallocInt16; + +function mallocInt32(n) { + return new Int32Array(mallocArrayBuffer(4*n), 0, n) +} +exports.mallocInt32 = mallocInt32; + +function mallocFloat(n) { + return new Float32Array(mallocArrayBuffer(4*n), 0, n) +} +exports.mallocFloat32 = exports.mallocFloat = mallocFloat; + +function mallocDouble(n) { + return new Float64Array(mallocArrayBuffer(8*n), 0, n) +} +exports.mallocFloat64 = exports.mallocDouble = mallocDouble; + +function mallocUint8Clamped(n) { + if(hasUint8C) { + return new Uint8ClampedArray(mallocArrayBuffer(n), 0, n) + } else { + return mallocUint8(n) + } +} +exports.mallocUint8Clamped = mallocUint8Clamped; + +function mallocDataView(n) { + return new DataView(mallocArrayBuffer(n), 0, n) +} +exports.mallocDataView = mallocDataView; + +function mallocBuffer(n) { + n = twiddle.nextPow2(n); + var log_n = twiddle.log2(n); + var cache = BUFFER[log_n]; + if(cache.length > 0) { + return cache.pop() + } + return new Buffer(n) +} +exports.mallocBuffer = mallocBuffer; + +exports.clearCache = function clearCache() { + for(var i=0; i<32; ++i) { + POOL.UINT8[i].length = 0; + POOL.UINT16[i].length = 0; + POOL.UINT32[i].length = 0; + POOL.INT8[i].length = 0; + POOL.INT16[i].length = 0; + POOL.INT32[i].length = 0; + POOL.FLOAT[i].length = 0; + POOL.DOUBLE[i].length = 0; + POOL.UINT8C[i].length = 0; + DATA[i].length = 0; + BUFFER[i].length = 0; + } +}; +}); + +function fft$1(dir, nrows, ncols, buffer, x_ptr, y_ptr, scratch_ptr) { + dir |= 0; + nrows |= 0; + ncols |= 0; + x_ptr |= 0; + y_ptr |= 0; + if(twiddle.isPow2(ncols)) { + fftRadix2(dir, nrows, ncols, buffer, x_ptr, y_ptr); + } else { + fftBluestein(dir, nrows, ncols, buffer, x_ptr, y_ptr, scratch_ptr); + } +} +var fftMatrix = fft$1; + +function scratchMemory(n) { + if(twiddle.isPow2(n)) { + return 0 + } + return 2 * n + 4 * twiddle.nextPow2(2*n + 1) +} +var scratchMemory_1 = scratchMemory; + + +//Radix 2 FFT Adapted from Paul Bourke's C Implementation +function fftRadix2(dir, nrows, ncols, buffer, x_ptr, y_ptr) { + dir |= 0; + nrows |= 0; + ncols |= 0; + x_ptr |= 0; + y_ptr |= 0; + var nn,m,i,i1,j,k,i2,l,l1,l2; + var c1,c2,t,t1,t2,u1,u2,z,row,a,b,c,d,k1,k2,k3; + + // Calculate the number of points + nn = ncols; + m = twiddle.log2(nn); + + for(row=0; row> 1; + j = 0; + for(i=0;i>= 1; + } + j += k; + } + + // Compute the FFT + c1 = -1.0; + c2 = 0.0; + l2 = 1; + for(l=0;l=0; --i) { + stride[i] = size; + size *= shape[i]; + pad = Math.max(pad, fftMatrix.scratchMemory(shape[i])); + if(x.shape[i] !== y.shape[i]) { + throw new Error('Shape mismatch, real and imaginary arrays must have same size') + } + } + var buf_size = 4 * size + pad; + var buffer; + if( x.dtype === 'array' || + x.dtype === 'float64' || + x.dtype === 'custom' ) { + buffer = pool.mallocDouble(buf_size); + } else { + buffer = pool.mallocFloat(buf_size); + } + var x1 = ndarray(buffer, shape.slice(0), stride, 0) + , y1 = ndarray(buffer, shape.slice(0), stride.slice(0), size) + , x2 = ndarray(buffer, shape.slice(0), stride.slice(0), 2*size) + , y2 = ndarray(buffer, shape.slice(0), stride.slice(0), 3*size) + , tmp, n, s1, s2 + , scratch_ptr = 4 * size; + + //Copy into x1/y1 + ndarrayOps.assign(x1, x); + ndarrayOps.assign(y1, y); + + for(i=d-1; i>=0; --i) { + fftMatrix(dir, size/shape[i], shape[i], buffer, x1.offset, y1.offset, scratch_ptr); + if(i === 0) { + break + } + + //Compute new stride for x2/y2 + n = 1; + s1 = x2.stride; + s2 = y2.stride; + for(j=i-1; j=0; --j) { + s2[j] = s1[j] = n; + n *= shape[j]; + } + + //Transpose + ndarrayOps.assign(x2, x1); + ndarrayOps.assign(y2, y1); + + //Swap buffers + tmp = x1; + x1 = x2; + x2 = tmp; + tmp = y1; + y1 = y2; + y2 = tmp; + } + + //Copy result back into x + ndarrayOps.assign(x, x1); + ndarrayOps.assign(y, y1); + + pool.free(buffer); +} + +var fft = ndfft; + +const DIRECTIONS = { + 'FORWARD': 1, + 'INVERSE': -1, +}; + +class BaseFourierSignalFilter extends Filter { + constructor(direction) { + super(); + this.direction = direction; + if (DIRECTIONS[this.direction] === undefined) { + throw new Error(`${this.direction} is not a valid fourier transform direction. Please try one of: ${Object.keys(DIRECTIONS)}`); + } + this.addInputValidator(0, Signal1D); + } + _run() { + if( ! this.hasValidInput()){ + console.warn("A filter of type BaseFourierSignalFilter requires 1 input of Signal1D."); + return; + } + const inputSignal = this._getInput(0); + const length = inputSignal.getMetadata('length'); + const real = ndarray(inputSignal.clone().getData(), [length]); + const img = ndarray(inputSignal.hollowClone().getData(), [length]); + this.setMetadata('direction', this.direction); + + fft(DIRECTIONS[this.direction], real, img); + this._output[0] = new Signal1D(); + this._output[0].setData(real.data); + this._output[1] = new Signal1D(); + this._output[1].setData(img.data); + } +} + +class ForwardFourierSignalFilter extends BaseFourierSignalFilter { + constructor() { + super('FORWARD'); + } +} + +class InverseFourerSignalFilter extends BaseFourierSignalFilter { + constructor() { + super('INVERSE'); + } +} + +/* +* Author Jonathan Lurie - http://me.jonahanlurie.fr +* License MIT +* Link https://github.com/jonathanlurie/pixpipejs +* Lab MCIN - Montreal Neurological Institute +*/ + +/** +* A filter of type ForEachPixelImageFilter can perform a operation on evey pixel +* of an Image2D with a simple interface. For this purpose, a per-pixel-callback +* must be specified using method +* .on( "pixel" , function( coord, color ){ ... }) +* where coord is of form {x, y} and color is of form [r, g, b, a] (with possibly) +* a different number of components per pixel. +* This callback must return, or null (original color not modified), +* or a array of color (same dimension as the one in arguments). +* +* **Usage** +* - [examples/forEachPixel.html](../examples/forEachPixel.html) +* +* @example +* var forEachPixelFilter = new pixpipe.ForEachPixelImageFilter(); +* forEachPixelFilter.on( "pixel", function(position, color){ +* +* return [ +* color[1], // red (takes the values from green) +* color[0], // green (takes the values from red) +* color[2] * 0.5, // blue get 50% darker +* 255 // alpha, at max +* ] +* +* } +* ); +* +*/ +class ForEachPixelImageFilter extends ImageToImageFilter { + + constructor(){ + super(); + this.addInputValidator(0, Image2D); + } + + + /** + * Run the filter + */ + _run(){ + if( ! this.hasValidInput() ) + return; + + var inputImage2D = this._getInput(); + var firstPixel = 0; + var lastPixel = inputImage2D.getWidth() * inputImage2D.getHeight(); + var increment = 1; + + var bufferCopy = inputImage2D.getDataCopy(); + + this._forEachPixelOfSuch(bufferCopy, firstPixel, lastPixel, increment ); + + // 1 - init the output + var outputImg = this._addOutput( Image2D ); + + // 2 - tune the output + outputImg.setData( + bufferCopy, + inputImage2D.getWidth(), + inputImage2D.getHeight(), + inputImage2D.getComponentsPerPixel() + ); + + } + + + /** + * [PRIVATE] + * generic function for painting row, colum or whole + * @param {Number} firstPixel - Index of the first pixel in 1D array + * @param {Number} lastPixel - Index of the last pixel in 1D array + * @param {Number} increment - jump gap from a pixel to another (in a 1D style) + */ + _forEachPixelOfSuch(buffer, firstPixel, lastPixel, increment ){ + // abort if no callback per pixel + //if( ! ("pixel" in this._events)){ + if( ! ( this.hasEvent("pixel"))){ + console.warn("No function to apply per pixel was specified."); + return; + } + + var inputImage2D = this._getInput(); + var inputBuffer = inputImage2D.getData(); + var componentPerPixel = inputImage2D.getComponentsPerPixel(); + + var currentColor = null; + + for(var p=firstPixel; p Time: [\" + fromRecord + \" , \" + toRecord + \"] is \" + t + \" millisec.\");\n }\n\n return t;\n }else{\n console.warn(\"The two given record name must exist in the time record table.\");\n return -1;\n }\n }\n\n\n /**\n * Defines a callback. By defautl, no callback is called.\n */\n on(eventId, callback){\n this._events[ eventId ] = callback;\n }\n\n\n /**\n * Call an event with arguments.\n * Inside the callback, the \"this\" object will be the filter.\n * @param {String} eventName - name of the event to trigger\n * @param {Object} any other param can follow\n */\n triggerEvent( eventName /* any other arguments to follow */ ){\n var returnValue = null;\n \n if(this.hasEvent(eventName)){\n if( arguments.length > 1 ){\n \n // a-la-mano slicing argument array to comply with V8 JS engine optimization...\n var argToSend = [];\n for(var i=1; i 0 && options.height > 0){\n this.setMetadata(\"width\", options.width);\n this.setMetadata(\"height\", options.height);\n\n if(\"color\" in options){\n this.setMetadata(\"ncpp\", options.color.length );\n }\n\n this._data = new Float32Array( options.width * options.height * this.getMetadata(\"ncpp\") );\n var ncpp = this.getMetadata(\"ncpp\");\n\n // init with the given color\n if(\"color\" in options){\n var color = options.color;\n for(var i=0; i=0 && position.x < this._metadata.width &&\n \"y\" in position && position.y >=0 && position.y < this._metadata.height )\n {\n\n if(color.length == ncpp){\n var pos1D = this.get1dIndexFrom2dPosition( position );\n\n if(ncpp == 1){\n this._data[ pos1D ] = color[0];\n }else{\n pos1D *= ncpp;\n for(var i=0; i=0 && position.x < this._metadata.width &&\n \"y\" in position && position.y >=0 && position.y < this._metadata.height )\n {\n //var ncpp = this.getMetadata(\"ncpp\");\n var ncpp = this._metadata.ncpp;\n var color = null;\n var pos1D = this.get1dIndexFrom2dPosition( position );\n\n // \n if(ncpp == 1){\n color = [this._data[pos1D]];\n }else{\n pos1D *= ncpp;\n color = this._data.slice(pos1D, pos1D + ncpp);\n }\n \n return color;\n\n }else{\n console.warn(\"The requested position is outside the image.\");\n return null;\n }\n }\n\n\n /**\n * Get the width of the image\n * @return {Number} the width of the Image2D\n */\n getWidth(){\n return this._metadata.width;\n }\n\n\n /**\n * Get the height of the image\n * @return {Number} the height of the Image2D\n */\n getHeight(){\n return this._metadata.height;\n }\n\n\n /**\n * Get the number of components per pixel\n * @return {Number} the number of components per pixel\n */\n getComponentsPerPixel(){\n return this._metadata.ncpp;\n }\n\n \n /**\n * Alias to getComponentsPerPixel. Return the number of components per pixel.\n * @return {Number} ncpp\n */\n getNcpp(){\n return this.getComponentsPerPixel();\n }\n\n\n /**\n * Get the internal image data (pointer)\n * @return {TypedArray} the original data (most likely a Float32Array), dont mess up with this one.\n * in case of doubt, use getDataCopy()\n */\n getData(){\n return this._data; // return the actual array, editable!\n }\n\n\n /**\n * Get a copy of the data\n * @return {TypedArray} a deep copy of the data (most likely a Float32Array)\n */\n getDataCopy(){\n return new this._data.constructor( this._data );\n }\n\n\n /**\n * No matter the original type of the internal data, scale it into a [0, 255] uInt8Array\n * @return {Uint8Array} scaled data\n */\n getDataAsUInt8Array(){\n if(! this._data){\n console.warn(\"No data, cannot make a copy of it.\");\n return;\n }\n\n var min = this.getMin();\n var max = this.getMax();\n\n var uintData = new Uint8Array(this._data.length);\n\n for(var i=0; i= 0 && pos.x < this._metadata.width &&\n pos.y >= 0 && pos.y < this._metadata.height\n )\n }\n \n /**\n * Sample the color along a segment\n * @param {Object} posFrom - starting position of type {x: Number, y: Number}\n * @param {Object} posFrom - ending position of type {x: Number, y: Number}\n * @return {Object} array of Array like that: {\n positions: [\n {x: x0, y: y0},\n {x: x1, y: y1},\n {x: x2, y: y2},\n ...\n ],\n labels: [\n \"(x0, y0)\", \"(x1, y1)\", \"(x2, y2)\", ...\n ],\n colors: [\n [r0, r1, r2 ...],\n [g0, g1, g2 ...],\n [b0, b1, b2 ...]\n ]\n }\n return null if posFrom or posTo is outside\n */\n getSegmentSample( posFrom, posTo ){\n // both position must be inside the image\n if( !this.isInside(posFrom) || !this.isInside(posTo) )\n return null;\n \n var dx = posTo.x - posFrom.x;\n var dy = posTo.y - posFrom.y;\n var euclidianDistance = Math.sqrt( Math.pow(dx , 2) + Math.pow(dy , 2) );\n var numberOfSamples = Math.floor( euclidianDistance + 1 );\n \n // we want to sample every unit distance along the segment\n var stepX = dx / euclidianDistance;\n var stepY = dy / euclidianDistance;\n \n var ncpp = this._metadata.ncpp;\n var positions = new Array(numberOfSamples).fill(0);\n var colors = new Array(ncpp).fill(0);\n var labels = new Array(numberOfSamples).fill(0);\n \n // creating empty arrays for colors\n for(var c=0; c 0 && options.ySize > 0 && options.zSize > 0 ){\n xspace.space_length = options.xSize;\n yspace.space_length = options.ySize;\n zspace.space_length = options.zSize;\n\n yspace.offset = xspace.space_length;\n zspace.offset = xspace.space_length * yspace.space_length;\n\n this._data = new Float32Array( options.xSize * options.ySize * options.zSize * this.getMetadata(\"ncpp\") );\n this._data.fill(0);\n\n this._scanDataRange();\n this._finishHeader();\n }\n }\n }\n\n\n /**\n * Hardcode the datatype\n */\n static TYPE(){\n return \"IMAGE3D\";\n }\n\n\n /**\n * @return {Image3D} a deep copy instance of this Image3D\n */\n clone(){\n var cpImg = new Image3D();\n\n cpImg.setData(\n this._data,\n this.getMetadata(\"xspace\").space_length,\n this.getMetadata(\"yspace\").space_length,\n this.getMetadata(\"zspace\").space_length,\n {\n ncpp: this.getMetadata(\"ncpp\"),\n order: this.getMetadata(\"order\").slice(),\n deepCopy: true,\n }\n );\n\n cpImg.copyMetadataFrom( this );\n\n return cpImg;\n }\n\n\n /**\n * Set the data to this Image3D.\n * @param {Float32Array} array - 1D array of raw data stored as RGBARGBA...\n * @param {Number} xSize - length along x dimension of the Image3D\n * @param {Number} ySize - length along y dimension of the Image3D\n * @param {Number} zSize - length along z dimension of the Image3D\n * @param {Number} ncpp - number of components per pixel (default: 4)\n * @param {Boolean} deepCopy - if true, a copy of the data is given, if false we jsut give the pointer\n * @param {Object} options, among them:\n * - ncpp {Number} number of components per pixel. Default = 1\n * - order {Array} dimensionality order. Default = [\"zspace\", \"yspace\", \"xspace\"]\n * - deepCopy {Boolean} copy the whole array if true, or just the pointer if false. Default = false\n *\n */\n setData( array, xSize, ySize, zSize, options){\n var ncpp = 1;\n\n // number of components per pixel\n if(options && \"ncpp\" in options){\n ncpp = options.ncpp;\n }\n\n if( array.length != xSize*ySize*zSize*ncpp){\n console.warn(\"The array size does not match the width and height. Cannot init the Image3D.\");\n return;\n }\n\n // number of components per pixel\n if(options && \"ncpp\" in options){\n this.setMetadata(\"ncpp\", options.ncpp);\n }\n\n // dimensionality order\n if(options && \"order\" in options){\n this.setMetadata(\"order\", options.order);\n }\n\n // deep of shallow copy\n if(options && \"deepCopy\" in options && options.deepCopy){\n this._data = new array.constructor( array );\n }else{\n this._data = array;\n }\n\n var xspace = this.getMetadata(\"xspace\");\n var yspace = this.getMetadata(\"yspace\");\n var zspace = this.getMetadata(\"zspace\");\n\n xspace.space_length = xSize;\n yspace.space_length = ySize;\n zspace.space_length = zSize;\n\n yspace.offset = xspace.space_length;\n zspace.offset = xspace.space_length * yspace.space_length;\n\n this._scanDataRange();\n this._finishHeader();\n }\n\n\n /**\n * [PRIVATE]\n * Creates common fields all headers must contain.\n */\n _finishHeader() {\n var xspace = this.getMetadata(\"xspace\");\n var yspace = this.getMetadata(\"yspace\");\n var zspace = this.getMetadata(\"zspace\");\n\n xspace.name = \"xspace\";\n yspace.name = \"yspace\";\n zspace.name = \"zspace\";\n\n xspace.width_space = JSON.parse( JSON.stringify( yspace ) );//yspace;\n xspace.width = yspace.space_length;\n xspace.height_space = JSON.parse( JSON.stringify( zspace ) );//zspace;\n xspace.height = zspace.space_length;\n\n yspace.width_space = JSON.parse( JSON.stringify( xspace ) );//xspace;\n yspace.width = xspace.space_length;\n yspace.height_space = JSON.parse( JSON.stringify( zspace ) );//zspace;\n yspace.height = zspace.space_length;\n\n zspace.width_space = JSON.parse( JSON.stringify( xspace ) );//xspace;\n zspace.width = xspace.space_length;\n zspace.height_space = JSON.parse( JSON.stringify( yspace ) );//yspace;\n zspace.height = yspace.space_length;\n }\n\n\n /**\n * [PRIVATE]\n * Look for min and max on the dataset and add them to the header metadata\n */\n _scanDataRange(){\n var min = +Infinity;\n var max = -Infinity;\n\n for(var i=0; i 0;\n var y_positive = height_space.step > 0;\n var z_positive = axis_space.step > 0;\n\n // iterator for the result slice.\n var i = 0;\n var intensity = 0;\n var intensitySum = 0;\n var min = Infinity;\n var max = -Infinity;\n\n var maxOfVolume = this.getMetadata(\"voxel_max\");\n\n z = z_positive ? slice_num : axis_space.space_length - slice_num - 1;\n if (z >= 0 && z < axis_space.space_length) {\n tz_offset = time_offset + z * axis_space_offset;\n\n for (row = height - 1; row >= 0; row--) {\n y = y_positive ? row : height - row - 1;\n tzy_offset = tz_offset + y * height_space_offset;\n\n for (col = 0; col < width; col++) {\n x = x_positive ? col : width - col - 1;\n tzyx_offset = tzy_offset + x * width_space_offset;\n\n intensity = this._data[tzyx_offset];\n\n min = Math.min(min, intensity);\n max = Math.max(max, intensity);\n intensitySum += intensity;\n\n slice_data[i++] = intensity;\n }\n }\n }\n\n var outputImage = new Image2D();\n outputImage.setData( slice_data, width, height, 1);\n outputImage.setMetadata(\"min\", min);\n outputImage.setMetadata(\"max\", max);\n outputImage.setMetadata(\"avg\", intensitySum / (i-1) );\n return outputImage;\n\n }\n\n\n /**\n * Get the intensity of a given voxel, addressed by dimensionality order.\n * In case of doubt, use getIntensity_xyz instead.\n * @param {Number} i - Position within the biggest dimensionality order\n * @param {Number} j - Position within the in-the-middle dimensionality order\n * @param {Number} k - Position within the smallest dimensionality order\n */\n getIntensity_ijk(i, j, k, time = 0) {\n var order = this.getMetadata(\"order\");\n\n if (i < 0 || i >= this.getMetadata( order[0] ).space_length ||\n j < 0 || j >= this.getMetadata( order[1] ).space_length ||\n k < 0 || k >= this.getMetadata( order[2] ).space_length)\n {\n console.warn(\"getIntensity_ijk position is out of range.\");\n return 0;\n }\n\n //var time_offset = this.hasMetadata( \"time\" ) ? time * this.getMetadata( \"time\" ).offset : 0;\n var time_offset = this._metadata.time.offset * time;\n\n var xyzt_offset = (\n i * this.getMetadata( order[0] ).offset +\n j * this.getMetadata( order[1] ).offset +\n k * this.getMetadata( order[2] ).offset +\n time_offset);\n\n return this._data[xyzt_offset];\n }\n\n\n /**\n * Get the intensity of a given voxel, addressed by dimension names.\n * @param {Number} x - position within xspace\n * @param {Number} y - position within yspace\n * @param {Number} z - position within zspace\n * @param {Number} time - position in time (optional)\n */\n getIntensity_xyz(x, y, z, time = 0) {\n\n if (x < 0 || x >= this._metadata.xspace.space_length ||\n y < 0 || y >= this._metadata.yspace.space_length ||\n z < 0 || z >= this._metadata.zspace.space_length)\n {\n console.warn(\"getIntensity_xyz position is out of range.\");\n return 0;\n }\n\n //var time_offset = this.hasMetadata( \"time\" ) ? time * this.getMetadata( \"time\" ).offset : 0;\n var time_offset = this._metadata.time.offset * time;\n \n var xyzt_offset = (\n x * this._metadata.xspace.offset +\n y * this._metadata.yspace.offset +\n z * this._metadata.zspace.offset +\n time_offset);\n\n return this._data[xyzt_offset];\n }\n\n \n /**\n * Get the number of samples over time\n */\n getTimeLength(){\n return ( this.hasMetadata(\"time\") ? this.getMetadata(\"time\").space_length : 1 );\n }\n\n\n /**\n * Tells if a given point is inside or outside the image\n * @param {Object} pos - position like {x: Number, y: Number, z: Number}\n * @return {Boolean} true for inside, false for outside\n */\n isInside( pos ){\n return !(pos.x < 0 || pos.x >= this._metadata.xspace.space_length ||\n pos.y < 0 || pos.y >= this._metadata.yspace.space_length ||\n pos.z < 0 || pos.z >= this._metadata.zspace.space_length)\n }\n \n\n /**\n * Sample the color along a segment\n * @param {Object} posFrom - starting position of type {x: Number, y: Number, z: Number}\n * @param {Object} posFrom - ending position of type {x: Number, y: Number, z: Number}\n * @return {Object} array of Array like that: {\n positions: [\n {x: x0, y: y0, z: z0},\n {x: x1, y: y1, z: z1},\n {x: x2, y: y2, z: z2},\n ...\n ],\n labels: [\n \"(x0, y0, z0)\", \"(x1, y1, z1)\", \"(x2, y2, z2)\", ...\n ],\n colors: [\n [r0, r1, r2 ...],\n [g0, g1, g2 ...],\n [b0, b1, b2 ...]\n ]\n }\n return null if posFrom or posTo is outside\n */\n getSegmentSample( posFrom, posTo, time = 0 ){\n // both position must be inside the image\n if( !this.isInside(posFrom) || !this.isInside(posTo) )\n return null;\n \n var dx = posTo.x - posFrom.x;\n var dy = posTo.y - posFrom.y;\n var dz = posTo.z - posFrom.z;\n var euclidianDistance = Math.sqrt( Math.pow(dx , 2) + Math.pow(dy , 2) + Math.pow(dz , 2) );\n var numberOfSamples = Math.floor( euclidianDistance + 1 );\n \n // we want to sample every unit distance along the segment\n var stepX = dx / euclidianDistance;\n var stepY = dy / euclidianDistance;\n var stepZ = dz / euclidianDistance;\n \n var ncpp = this._metadata.ncpp;\n var positions = new Array(numberOfSamples).fill(0);\n var colors = new Array(ncpp).fill(0);\n var labels = new Array(numberOfSamples).fill(0);\n \n // creating empty arrays for colors\n for(var c=0; c lo_offset) {\n var tmp = byte_data[d + hi_offset];\n byte_data[d + hi_offset] = byte_data[d + lo_offset];\n byte_data[d + lo_offset] = tmp;\n hi_offset--;\n lo_offset++;\n }\n }\n }\n\n\n /**\n * Initialize a MniVolume with the data and the header.\n * @param {Array} data - TypedArray containing the data\n */\n setData( data, header ){\n var that = this;\n this._data = data;\n\n this.setMetadata( \"position\", {} );\n this.setMetadata( \"current_time\", 0 );\n\n // copying header into metadata\n var headerKeys = Object.keys(header);\n headerKeys.forEach( function(key){\n that.setMetadata( key, header[key] );\n })\n\n // find min/max\n this._scanDataRange();\n\n // set W2v matrix\n this._saveOriginAndTransform();\n\n // adding some fields to metadata header\n this._finishHeader()\n\n console.log(this._metadata);\n }\n\n\n\n\n\n /**\n * [PRIVATE}\n * Calculate the world to voxel transform and save it, so we\n * can access it efficiently. The transform is:\n * cxx / stepx | cxy / stepx | cxz / stepx | (-o.x * cxx - o.y * cxy - o.z * cxz) / stepx\n * cyx / stepy | cyy / stepy | cyz / stepy | (-o.x * cyx - o.y * cyy - o.z * cyz) / stepy\n * czx / stepz | czy / stepz | czz / stepz | (-o.x * czx - o.y * czy - o.z * czz) / stepz\n * 0 | 0 | 0 | 1\n *\n * Origin equation taken from (http://www.bic.mni.mcgill.ca/software/minc/minc2_format/node4.html)\n */\n _saveOriginAndTransform() {\n\n var xspace = this.getMetadata(\"xspace\");\n var yspace = this.getMetadata(\"yspace\");\n var zspace = this.getMetadata(\"zspace\");\n\n var startx = xspace.start;\n var starty = yspace.start;\n var startz = zspace.start;\n var cx = xspace.direction_cosines;\n var cy = yspace.direction_cosines;\n var cz = zspace.direction_cosines;\n var stepx = xspace.step;\n var stepy = yspace.step;\n var stepz = zspace.step;\n\n // voxel_origin\n var o = {\n x: startx * cx[0] + starty * cy[0] + startz * cz[0],\n y: startx * cx[1] + starty * cy[1] + startz * cz[1],\n z: startx * cx[2] + starty * cy[2] + startz * cz[2]\n };\n\n this.setMetadata(\"voxel_origin\", o);\n\n var tx = (-o.x * cx[0] - o.y * cx[1] - o.z * cx[2]) / stepx;\n var ty = (-o.x * cy[0] - o.y * cy[1] - o.z * cy[2]) / stepy;\n var tz = (-o.x * cz[0] - o.y * cz[1] - o.z * cz[2]) / stepz;\n\n var w2v = [\n [cx[0] / stepx, cx[1] / stepx, cx[2] / stepx, tx],\n [cy[0] / stepy, cy[1] / stepy, cy[2] / stepy, ty],\n [cz[0] / stepz, cz[1] / stepz, cz[2] / stepz, tz]\n ];\n\n this.setMetadata(\"w2v\", w2v);\n }\n\n\n} /* END of class Image3D */\n\nexport { MniVolume }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport { PixpipeContainer } from './PixpipeContainer.js';\n\n\n/**\n* A LineString is a vectorial reprensation of a line or polyline, open or closed.\n* When closed, it can be considered as a polygon. \n* By default, a LineString is 2 dimensional but the dimension can be changed when\n* using the `.setData(...)` method or before any point addition with `.setNod()`.\n* To close a LineString, use `.setMetadata(\"closed\", true);`, this will not add\n* any point but will flag this LineString as \"closed\".\n*\n*/\nclass LineString extends PixpipeContainer {\n \n constructor() {\n super();\n this.setMetadata(\"closed\", false);\n this.setMetadata(\"defaultNod\", 2);\n this.setMetadata(\"nod\", 2);\n \n this._type = LineString.TYPE();\n \n // local record for saving the last point\n this._lastPoint = null;\n }\n \n \n /**\n * Hardcode the datatype\n */\n static TYPE(){\n return \"LINESTRING\";\n }\n \n \n /**\n * Set/replace the point data.\n * @param {points} points - 1D array containing coord [x, y, x, y, x, y, ...]\n * @param {Number} nod - Number of Dimensions, default = 2\n * @param {Boolean} deepCopy - pointer copy if false, deep copy if true.\n */\n setData(points, nod=-1, deepCopy=false){\n if( nod != -1){\n this.setMetadata(\"nod\", nod);\n }\n \n if(points.length % this.getMetadata(\"nod\") != 0 ){\n console.warn(\"The number of points is not compatible with the number of dimensions (nod).\");\n return;\n }\n \n if(deepCopy){\n this._data = new points.constructor( points );\n }else{\n this._data = points;\n }\n \n this._setLastPoint();\n }\n \n \n /**\n * Define the number of dimensions. This can be done only when this LineString\n * is still empty.\n * @param {Number} nod - Number of dimensions\n */\n setNod( nod ){\n if(!this._data || !this._data.length){\n console.warn(\"The number of dimension can be set only when this LineString is empty.\");\n return;\n }\n \n this.setMetadata(\"nod\", nod);\n }\n \n \n /**\n * Get the internal image data (pointer)\n * @return {Array} the original data, dont mess up with this one.\n * in case of doubt, use getDataCopy()\n */\n getData(){\n return this._data; // return the actual array, editable!\n }\n\n\n /**\n * Get a copy of the data\n * @return {Array} a deep copy of the data\n */\n getDataCopy(){\n return new this._data.constructor( this._data );\n }\n \n \n /**\n * Get the number of points in this linestring\n * @return {Number} nb of points\n */\n getNumberOfPoints(){\n if(!this._data){\n return 0;\n }\n \n return this._data.length / this.getMetadata(\"nod\");\n }\n \n \n /**\n * Get a point of this LineString\n * @return {Array} a point, being [x, y] if 2D or [x, y, z] if 3D\n */\n getPoint( index ){\n if(index >=0 && index < getNumberOfPoints){\n var nod = this._metadata.nod;\n return this._data.slice(index*nod, index*nod + nod);\n }else{\n console.warn(\"Index of point is out of range.\");\n return null;\n }\n }\n \n \n /**\n * Considere this LineString as closed, making it a polygon\n */\n close(){\n this.setMetadata(\"closed\", true);\n }\n \n \n /**\n * Considere this LineString as open\n */\n open(){\n this.setMetadata(\"closed\", false);\n }\n \n \n /**\n * Add a point at the end of the LineString. Keeps the polygon open.\n * @param {Array} position - [x, y] if 2D or [x, y, z] if 3D\n */\n addPoint( position ){\n if( position.length != this._metadata.nod ){\n console.warn(\"Cannot add the point becase it has a diferent number of dimensions.\");\n return;\n }\n \n if( !this._data ){\n this._data = new Array();\n }\n \n for(var i=0; i 1) {\n for (var i = 1; i < arguments.length; i++) {\n args[i - 1] = arguments[i];\n }\n }\n queue.push(new Item(fun, args));\n if (queue.length === 1 && !draining) {\n runTimeout(drainQueue);\n }\n}\n// v8 likes predictible objects\nfunction Item(fun, array) {\n this.fun = fun;\n this.array = array;\n}\nItem.prototype.run = function () {\n this.fun.apply(null, this.array);\n};\nexport var title = 'browser';\nexport var platform = 'browser';\nexport var browser = true;\nexport var env = {};\nexport var argv = [];\nexport var version = ''; // empty string to avoid regexp issues\nexport var versions = {};\nexport var release = {};\nexport var config = {};\n\nfunction noop() {}\n\nexport var on = noop;\nexport var addListener = noop;\nexport var once = noop;\nexport var off = noop;\nexport var removeListener = noop;\nexport var removeAllListeners = noop;\nexport var emit = noop;\n\nexport function binding(name) {\n throw new Error('process.binding is not supported');\n}\n\nexport function cwd () { return '/' }\nexport function chdir (dir) {\n throw new Error('process.chdir is not supported');\n};\nexport function umask() { return 0; }\n\n// from https://github.com/kumavis/browser-process-hrtime/blob/master/index.js\nvar performance = global.performance || {}\nvar performanceNow =\n performance.now ||\n performance.mozNow ||\n performance.msNow ||\n performance.oNow ||\n performance.webkitNow ||\n function(){ return (new Date()).getTime() }\n\n// generate timestamp or delta\n// see http://nodejs.org/api/process.html#process_process_hrtime\nexport function hrtime(previousTimestamp){\n var clocktime = performanceNow.call(performance)*1e-3\n var seconds = Math.floor(clocktime)\n var nanoseconds = Math.floor((clocktime%1)*1e9)\n if (previousTimestamp) {\n seconds = seconds - previousTimestamp[0]\n nanoseconds = nanoseconds - previousTimestamp[1]\n if (nanoseconds<0) {\n seconds--\n nanoseconds += 1e9\n }\n }\n return [seconds,nanoseconds]\n}\n\nvar startTime = new Date();\nexport function uptime() {\n var currentTime = new Date();\n var dif = currentTime - startTime;\n return dif / 1000;\n}\n\nexport default {\n nextTick: nextTick,\n title: title,\n browser: browser,\n env: env,\n argv: argv,\n version: version,\n versions: versions,\n on: on,\n addListener: addListener,\n once: once,\n off: off,\n removeListener: removeListener,\n removeAllListeners: removeAllListeners,\n emit: emit,\n binding: binding,\n cwd: cwd,\n chdir: chdir,\n umask: umask,\n hrtime: hrtime,\n platform: platform,\n release: release,\n config: config,\n uptime: uptime\n};\n","export default {};\n","\nvar lookup = []\nvar revLookup = []\nvar Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array\nvar inited = false;\nfunction init () {\n inited = true;\n var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n for (var i = 0, len = code.length; i < len; ++i) {\n lookup[i] = code[i]\n revLookup[code.charCodeAt(i)] = i\n }\n\n revLookup['-'.charCodeAt(0)] = 62\n revLookup['_'.charCodeAt(0)] = 63\n}\n\nexport function toByteArray (b64) {\n if (!inited) {\n init();\n }\n var i, j, l, tmp, placeHolders, arr\n var len = b64.length\n\n if (len % 4 > 0) {\n throw new Error('Invalid string. Length must be a multiple of 4')\n }\n\n // the number of equal signs (place holders)\n // if there are two placeholders, than the two characters before it\n // represent one byte\n // if there is only one, then the three characters before it represent 2 bytes\n // this is just a cheap hack to not do indexOf twice\n placeHolders = b64[len - 2] === '=' ? 2 : b64[len - 1] === '=' ? 1 : 0\n\n // base64 is 4/3 + up to two characters of the original data\n arr = new Arr(len * 3 / 4 - placeHolders)\n\n // if there are placeholders, only get up to the last complete 4 chars\n l = placeHolders > 0 ? len - 4 : len\n\n var L = 0\n\n for (i = 0, j = 0; i < l; i += 4, j += 3) {\n tmp = (revLookup[b64.charCodeAt(i)] << 18) | (revLookup[b64.charCodeAt(i + 1)] << 12) | (revLookup[b64.charCodeAt(i + 2)] << 6) | revLookup[b64.charCodeAt(i + 3)]\n arr[L++] = (tmp >> 16) & 0xFF\n arr[L++] = (tmp >> 8) & 0xFF\n arr[L++] = tmp & 0xFF\n }\n\n if (placeHolders === 2) {\n tmp = (revLookup[b64.charCodeAt(i)] << 2) | (revLookup[b64.charCodeAt(i + 1)] >> 4)\n arr[L++] = tmp & 0xFF\n } else if (placeHolders === 1) {\n tmp = (revLookup[b64.charCodeAt(i)] << 10) | (revLookup[b64.charCodeAt(i + 1)] << 4) | (revLookup[b64.charCodeAt(i + 2)] >> 2)\n arr[L++] = (tmp >> 8) & 0xFF\n arr[L++] = tmp & 0xFF\n }\n\n return arr\n}\n\nfunction tripletToBase64 (num) {\n return lookup[num >> 18 & 0x3F] + lookup[num >> 12 & 0x3F] + lookup[num >> 6 & 0x3F] + lookup[num & 0x3F]\n}\n\nfunction encodeChunk (uint8, start, end) {\n var tmp\n var output = []\n for (var i = start; i < end; i += 3) {\n tmp = (uint8[i] << 16) + (uint8[i + 1] << 8) + (uint8[i + 2])\n output.push(tripletToBase64(tmp))\n }\n return output.join('')\n}\n\nexport function fromByteArray (uint8) {\n if (!inited) {\n init();\n }\n var tmp\n var len = uint8.length\n var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes\n var output = ''\n var parts = []\n var maxChunkLength = 16383 // must be multiple of 3\n\n // go through the array every three bytes, we'll deal with trailing stuff later\n for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {\n parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)))\n }\n\n // pad the end with zeros, but make sure to not forget the extra bytes\n if (extraBytes === 1) {\n tmp = uint8[len - 1]\n output += lookup[tmp >> 2]\n output += lookup[(tmp << 4) & 0x3F]\n output += '=='\n } else if (extraBytes === 2) {\n tmp = (uint8[len - 2] << 8) + (uint8[len - 1])\n output += lookup[tmp >> 10]\n output += lookup[(tmp >> 4) & 0x3F]\n output += lookup[(tmp << 2) & 0x3F]\n output += '='\n }\n\n parts.push(output)\n\n return parts.join('')\n}\n","\nexport function read (buffer, offset, isLE, mLen, nBytes) {\n var e, m\n var eLen = nBytes * 8 - mLen - 1\n var eMax = (1 << eLen) - 1\n var eBias = eMax >> 1\n var nBits = -7\n var i = isLE ? (nBytes - 1) : 0\n var d = isLE ? -1 : 1\n var s = buffer[offset + i]\n\n i += d\n\n e = s & ((1 << (-nBits)) - 1)\n s >>= (-nBits)\n nBits += eLen\n for (; nBits > 0; e = e * 256 + buffer[offset + i], i += d, nBits -= 8) {}\n\n m = e & ((1 << (-nBits)) - 1)\n e >>= (-nBits)\n nBits += mLen\n for (; nBits > 0; m = m * 256 + buffer[offset + i], i += d, nBits -= 8) {}\n\n if (e === 0) {\n e = 1 - eBias\n } else if (e === eMax) {\n return m ? NaN : ((s ? -1 : 1) * Infinity)\n } else {\n m = m + Math.pow(2, mLen)\n e = e - eBias\n }\n return (s ? -1 : 1) * m * Math.pow(2, e - mLen)\n}\n\nexport function write (buffer, value, offset, isLE, mLen, nBytes) {\n var e, m, c\n var eLen = nBytes * 8 - mLen - 1\n var eMax = (1 << eLen) - 1\n var eBias = eMax >> 1\n var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0)\n var i = isLE ? 0 : (nBytes - 1)\n var d = isLE ? 1 : -1\n var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0\n\n value = Math.abs(value)\n\n if (isNaN(value) || value === Infinity) {\n m = isNaN(value) ? 1 : 0\n e = eMax\n } else {\n e = Math.floor(Math.log(value) / Math.LN2)\n if (value * (c = Math.pow(2, -e)) < 1) {\n e--\n c *= 2\n }\n if (e + eBias >= 1) {\n value += rt / c\n } else {\n value += rt * Math.pow(2, 1 - eBias)\n }\n if (value * c >= 2) {\n e++\n c /= 2\n }\n\n if (e + eBias >= eMax) {\n m = 0\n e = eMax\n } else if (e + eBias >= 1) {\n m = (value * c - 1) * Math.pow(2, mLen)\n e = e + eBias\n } else {\n m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen)\n e = 0\n }\n }\n\n for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}\n\n e = (e << mLen) | m\n eLen += mLen\n for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}\n\n buffer[offset + i - d] |= s * 128\n}\n","var toString = {}.toString;\n\nexport default Array.isArray || function (arr) {\n return toString.call(arr) == '[object Array]';\n};\n","/*!\n * The buffer module from node.js, for the browser.\n *\n * @author Feross Aboukhadijeh \n * @license MIT\n */\n/* eslint-disable no-proto */\n\n\nimport * as base64 from './base64'\nimport * as ieee754 from './ieee754'\nimport isArray from './isArray'\n\nexport var INSPECT_MAX_BYTES = 50\n\n/**\n * If `Buffer.TYPED_ARRAY_SUPPORT`:\n * === true Use Uint8Array implementation (fastest)\n * === false Use Object implementation (most compatible, even IE6)\n *\n * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,\n * Opera 11.6+, iOS 4.2+.\n *\n * Due to various browser bugs, sometimes the Object implementation will be used even\n * when the browser supports typed arrays.\n *\n * Note:\n *\n * - Firefox 4-29 lacks support for adding new properties to `Uint8Array` instances,\n * See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438.\n *\n * - Chrome 9-10 is missing the `TypedArray.prototype.subarray` function.\n *\n * - IE10 has a broken `TypedArray.prototype.subarray` function which returns arrays of\n * incorrect length in some situations.\n\n * We detect these buggy browsers and set `Buffer.TYPED_ARRAY_SUPPORT` to `false` so they\n * get the Object implementation, which is slower but behaves correctly.\n */\nBuffer.TYPED_ARRAY_SUPPORT = global.TYPED_ARRAY_SUPPORT !== undefined\n ? global.TYPED_ARRAY_SUPPORT\n : true\n\n/*\n * Export kMaxLength after typed array support is determined.\n */\nvar _kMaxLength = kMaxLength()\nexport {_kMaxLength as kMaxLength};\nfunction typedArraySupport () {\n return true;\n // rollup issues\n // try {\n // var arr = new Uint8Array(1)\n // arr.__proto__ = {\n // __proto__: Uint8Array.prototype,\n // foo: function () { return 42 }\n // }\n // return arr.foo() === 42 && // typed array instances can be augmented\n // typeof arr.subarray === 'function' && // chrome 9-10 lack `subarray`\n // arr.subarray(1, 1).byteLength === 0 // ie10 has broken `subarray`\n // } catch (e) {\n // return false\n // }\n}\n\nfunction kMaxLength () {\n return Buffer.TYPED_ARRAY_SUPPORT\n ? 0x7fffffff\n : 0x3fffffff\n}\n\nfunction createBuffer (that, length) {\n if (kMaxLength() < length) {\n throw new RangeError('Invalid typed array length')\n }\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n // Return an augmented `Uint8Array` instance, for best performance\n that = new Uint8Array(length)\n that.__proto__ = Buffer.prototype\n } else {\n // Fallback: Return an object instance of the Buffer class\n if (that === null) {\n that = new Buffer(length)\n }\n that.length = length\n }\n\n return that\n}\n\n/**\n * The Buffer constructor returns instances of `Uint8Array` that have their\n * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of\n * `Uint8Array`, so the returned instances will have all the node `Buffer` methods\n * and the `Uint8Array` methods. Square bracket notation works as expected -- it\n * returns a single octet.\n *\n * The `Uint8Array` prototype remains unmodified.\n */\n\nexport function Buffer (arg, encodingOrOffset, length) {\n if (!Buffer.TYPED_ARRAY_SUPPORT && !(this instanceof Buffer)) {\n return new Buffer(arg, encodingOrOffset, length)\n }\n\n // Common case.\n if (typeof arg === 'number') {\n if (typeof encodingOrOffset === 'string') {\n throw new Error(\n 'If encoding is specified then the first argument must be a string'\n )\n }\n return allocUnsafe(this, arg)\n }\n return from(this, arg, encodingOrOffset, length)\n}\n\nBuffer.poolSize = 8192 // not used by this implementation\n\n// TODO: Legacy, not needed anymore. Remove in next major version.\nBuffer._augment = function (arr) {\n arr.__proto__ = Buffer.prototype\n return arr\n}\n\nfunction from (that, value, encodingOrOffset, length) {\n if (typeof value === 'number') {\n throw new TypeError('\"value\" argument must not be a number')\n }\n\n if (typeof ArrayBuffer !== 'undefined' && value instanceof ArrayBuffer) {\n return fromArrayBuffer(that, value, encodingOrOffset, length)\n }\n\n if (typeof value === 'string') {\n return fromString(that, value, encodingOrOffset)\n }\n\n return fromObject(that, value)\n}\n\n/**\n * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError\n * if value is a number.\n * Buffer.from(str[, encoding])\n * Buffer.from(array)\n * Buffer.from(buffer)\n * Buffer.from(arrayBuffer[, byteOffset[, length]])\n **/\nBuffer.from = function (value, encodingOrOffset, length) {\n return from(null, value, encodingOrOffset, length)\n}\n\nif (Buffer.TYPED_ARRAY_SUPPORT) {\n Buffer.prototype.__proto__ = Uint8Array.prototype\n Buffer.__proto__ = Uint8Array\n if (typeof Symbol !== 'undefined' && Symbol.species &&\n Buffer[Symbol.species] === Buffer) {\n // Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97\n // Object.defineProperty(Buffer, Symbol.species, {\n // value: null,\n // configurable: true\n // })\n }\n}\n\nfunction assertSize (size) {\n if (typeof size !== 'number') {\n throw new TypeError('\"size\" argument must be a number')\n } else if (size < 0) {\n throw new RangeError('\"size\" argument must not be negative')\n }\n}\n\nfunction alloc (that, size, fill, encoding) {\n assertSize(size)\n if (size <= 0) {\n return createBuffer(that, size)\n }\n if (fill !== undefined) {\n // Only pay attention to encoding if it's a string. This\n // prevents accidentally sending in a number that would\n // be interpretted as a start offset.\n return typeof encoding === 'string'\n ? createBuffer(that, size).fill(fill, encoding)\n : createBuffer(that, size).fill(fill)\n }\n return createBuffer(that, size)\n}\n\n/**\n * Creates a new filled Buffer instance.\n * alloc(size[, fill[, encoding]])\n **/\nBuffer.alloc = function (size, fill, encoding) {\n return alloc(null, size, fill, encoding)\n}\n\nfunction allocUnsafe (that, size) {\n assertSize(size)\n that = createBuffer(that, size < 0 ? 0 : checked(size) | 0)\n if (!Buffer.TYPED_ARRAY_SUPPORT) {\n for (var i = 0; i < size; ++i) {\n that[i] = 0\n }\n }\n return that\n}\n\n/**\n * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.\n * */\nBuffer.allocUnsafe = function (size) {\n return allocUnsafe(null, size)\n}\n/**\n * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.\n */\nBuffer.allocUnsafeSlow = function (size) {\n return allocUnsafe(null, size)\n}\n\nfunction fromString (that, string, encoding) {\n if (typeof encoding !== 'string' || encoding === '') {\n encoding = 'utf8'\n }\n\n if (!Buffer.isEncoding(encoding)) {\n throw new TypeError('\"encoding\" must be a valid string encoding')\n }\n\n var length = byteLength(string, encoding) | 0\n that = createBuffer(that, length)\n\n var actual = that.write(string, encoding)\n\n if (actual !== length) {\n // Writing a hex string, for example, that contains invalid characters will\n // cause everything after the first invalid character to be ignored. (e.g.\n // 'abxxcd' will be treated as 'ab')\n that = that.slice(0, actual)\n }\n\n return that\n}\n\nfunction fromArrayLike (that, array) {\n var length = array.length < 0 ? 0 : checked(array.length) | 0\n that = createBuffer(that, length)\n for (var i = 0; i < length; i += 1) {\n that[i] = array[i] & 255\n }\n return that\n}\n\nfunction fromArrayBuffer (that, array, byteOffset, length) {\n array.byteLength // this throws if `array` is not a valid ArrayBuffer\n\n if (byteOffset < 0 || array.byteLength < byteOffset) {\n throw new RangeError('\\'offset\\' is out of bounds')\n }\n\n if (array.byteLength < byteOffset + (length || 0)) {\n throw new RangeError('\\'length\\' is out of bounds')\n }\n\n if (byteOffset === undefined && length === undefined) {\n array = new Uint8Array(array)\n } else if (length === undefined) {\n array = new Uint8Array(array, byteOffset)\n } else {\n array = new Uint8Array(array, byteOffset, length)\n }\n\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n // Return an augmented `Uint8Array` instance, for best performance\n that = array\n that.__proto__ = Buffer.prototype\n } else {\n // Fallback: Return an object instance of the Buffer class\n that = fromArrayLike(that, array)\n }\n return that\n}\n\nfunction fromObject (that, obj) {\n if (internalIsBuffer(obj)) {\n var len = checked(obj.length) | 0\n that = createBuffer(that, len)\n\n if (that.length === 0) {\n return that\n }\n\n obj.copy(that, 0, 0, len)\n return that\n }\n\n if (obj) {\n if ((typeof ArrayBuffer !== 'undefined' &&\n obj.buffer instanceof ArrayBuffer) || 'length' in obj) {\n if (typeof obj.length !== 'number' || isnan(obj.length)) {\n return createBuffer(that, 0)\n }\n return fromArrayLike(that, obj)\n }\n\n if (obj.type === 'Buffer' && isArray(obj.data)) {\n return fromArrayLike(that, obj.data)\n }\n }\n\n throw new TypeError('First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.')\n}\n\nfunction checked (length) {\n // Note: cannot use `length < kMaxLength()` here because that fails when\n // length is NaN (which is otherwise coerced to zero.)\n if (length >= kMaxLength()) {\n throw new RangeError('Attempt to allocate Buffer larger than maximum ' +\n 'size: 0x' + kMaxLength().toString(16) + ' bytes')\n }\n return length | 0\n}\n\nexport function SlowBuffer (length) {\n if (+length != length) { // eslint-disable-line eqeqeq\n length = 0\n }\n return Buffer.alloc(+length)\n}\nBuffer.isBuffer = isBuffer;\nfunction internalIsBuffer (b) {\n return !!(b != null && b._isBuffer)\n}\n\nBuffer.compare = function compare (a, b) {\n if (!internalIsBuffer(a) || !internalIsBuffer(b)) {\n throw new TypeError('Arguments must be Buffers')\n }\n\n if (a === b) return 0\n\n var x = a.length\n var y = b.length\n\n for (var i = 0, len = Math.min(x, y); i < len; ++i) {\n if (a[i] !== b[i]) {\n x = a[i]\n y = b[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\nBuffer.isEncoding = function isEncoding (encoding) {\n switch (String(encoding).toLowerCase()) {\n case 'hex':\n case 'utf8':\n case 'utf-8':\n case 'ascii':\n case 'latin1':\n case 'binary':\n case 'base64':\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return true\n default:\n return false\n }\n}\n\nBuffer.concat = function concat (list, length) {\n if (!isArray(list)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n\n if (list.length === 0) {\n return Buffer.alloc(0)\n }\n\n var i\n if (length === undefined) {\n length = 0\n for (i = 0; i < list.length; ++i) {\n length += list[i].length\n }\n }\n\n var buffer = Buffer.allocUnsafe(length)\n var pos = 0\n for (i = 0; i < list.length; ++i) {\n var buf = list[i]\n if (!internalIsBuffer(buf)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n buf.copy(buffer, pos)\n pos += buf.length\n }\n return buffer\n}\n\nfunction byteLength (string, encoding) {\n if (internalIsBuffer(string)) {\n return string.length\n }\n if (typeof ArrayBuffer !== 'undefined' && typeof ArrayBuffer.isView === 'function' &&\n (ArrayBuffer.isView(string) || string instanceof ArrayBuffer)) {\n return string.byteLength\n }\n if (typeof string !== 'string') {\n string = '' + string\n }\n\n var len = string.length\n if (len === 0) return 0\n\n // Use a for loop to avoid recursion\n var loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'ascii':\n case 'latin1':\n case 'binary':\n return len\n case 'utf8':\n case 'utf-8':\n case undefined:\n return utf8ToBytes(string).length\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return len * 2\n case 'hex':\n return len >>> 1\n case 'base64':\n return base64ToBytes(string).length\n default:\n if (loweredCase) return utf8ToBytes(string).length // assume utf8\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\nBuffer.byteLength = byteLength\n\nfunction slowToString (encoding, start, end) {\n var loweredCase = false\n\n // No need to verify that \"this.length <= MAX_UINT32\" since it's a read-only\n // property of a typed array.\n\n // This behaves neither like String nor Uint8Array in that we set start/end\n // to their upper/lower bounds if the value passed is out of range.\n // undefined is handled specially as per ECMA-262 6th Edition,\n // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.\n if (start === undefined || start < 0) {\n start = 0\n }\n // Return early if start > this.length. Done here to prevent potential uint32\n // coercion fail below.\n if (start > this.length) {\n return ''\n }\n\n if (end === undefined || end > this.length) {\n end = this.length\n }\n\n if (end <= 0) {\n return ''\n }\n\n // Force coersion to uint32. This will also coerce falsey/NaN values to 0.\n end >>>= 0\n start >>>= 0\n\n if (end <= start) {\n return ''\n }\n\n if (!encoding) encoding = 'utf8'\n\n while (true) {\n switch (encoding) {\n case 'hex':\n return hexSlice(this, start, end)\n\n case 'utf8':\n case 'utf-8':\n return utf8Slice(this, start, end)\n\n case 'ascii':\n return asciiSlice(this, start, end)\n\n case 'latin1':\n case 'binary':\n return latin1Slice(this, start, end)\n\n case 'base64':\n return base64Slice(this, start, end)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return utf16leSlice(this, start, end)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = (encoding + '').toLowerCase()\n loweredCase = true\n }\n }\n}\n\n// The property is used by `Buffer.isBuffer` and `is-buffer` (in Safari 5-7) to detect\n// Buffer instances.\nBuffer.prototype._isBuffer = true\n\nfunction swap (b, n, m) {\n var i = b[n]\n b[n] = b[m]\n b[m] = i\n}\n\nBuffer.prototype.swap16 = function swap16 () {\n var len = this.length\n if (len % 2 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 16-bits')\n }\n for (var i = 0; i < len; i += 2) {\n swap(this, i, i + 1)\n }\n return this\n}\n\nBuffer.prototype.swap32 = function swap32 () {\n var len = this.length\n if (len % 4 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 32-bits')\n }\n for (var i = 0; i < len; i += 4) {\n swap(this, i, i + 3)\n swap(this, i + 1, i + 2)\n }\n return this\n}\n\nBuffer.prototype.swap64 = function swap64 () {\n var len = this.length\n if (len % 8 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 64-bits')\n }\n for (var i = 0; i < len; i += 8) {\n swap(this, i, i + 7)\n swap(this, i + 1, i + 6)\n swap(this, i + 2, i + 5)\n swap(this, i + 3, i + 4)\n }\n return this\n}\n\nBuffer.prototype.toString = function toString () {\n var length = this.length | 0\n if (length === 0) return ''\n if (arguments.length === 0) return utf8Slice(this, 0, length)\n return slowToString.apply(this, arguments)\n}\n\nBuffer.prototype.equals = function equals (b) {\n if (!internalIsBuffer(b)) throw new TypeError('Argument must be a Buffer')\n if (this === b) return true\n return Buffer.compare(this, b) === 0\n}\n\nBuffer.prototype.inspect = function inspect () {\n var str = ''\n var max = INSPECT_MAX_BYTES\n if (this.length > 0) {\n str = this.toString('hex', 0, max).match(/.{2}/g).join(' ')\n if (this.length > max) str += ' ... '\n }\n return ''\n}\n\nBuffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {\n if (!internalIsBuffer(target)) {\n throw new TypeError('Argument must be a Buffer')\n }\n\n if (start === undefined) {\n start = 0\n }\n if (end === undefined) {\n end = target ? target.length : 0\n }\n if (thisStart === undefined) {\n thisStart = 0\n }\n if (thisEnd === undefined) {\n thisEnd = this.length\n }\n\n if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {\n throw new RangeError('out of range index')\n }\n\n if (thisStart >= thisEnd && start >= end) {\n return 0\n }\n if (thisStart >= thisEnd) {\n return -1\n }\n if (start >= end) {\n return 1\n }\n\n start >>>= 0\n end >>>= 0\n thisStart >>>= 0\n thisEnd >>>= 0\n\n if (this === target) return 0\n\n var x = thisEnd - thisStart\n var y = end - start\n var len = Math.min(x, y)\n\n var thisCopy = this.slice(thisStart, thisEnd)\n var targetCopy = target.slice(start, end)\n\n for (var i = 0; i < len; ++i) {\n if (thisCopy[i] !== targetCopy[i]) {\n x = thisCopy[i]\n y = targetCopy[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\n// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,\n// OR the last index of `val` in `buffer` at offset <= `byteOffset`.\n//\n// Arguments:\n// - buffer - a Buffer to search\n// - val - a string, Buffer, or number\n// - byteOffset - an index into `buffer`; will be clamped to an int32\n// - encoding - an optional encoding, relevant is val is a string\n// - dir - true for indexOf, false for lastIndexOf\nfunction bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {\n // Empty buffer means no match\n if (buffer.length === 0) return -1\n\n // Normalize byteOffset\n if (typeof byteOffset === 'string') {\n encoding = byteOffset\n byteOffset = 0\n } else if (byteOffset > 0x7fffffff) {\n byteOffset = 0x7fffffff\n } else if (byteOffset < -0x80000000) {\n byteOffset = -0x80000000\n }\n byteOffset = +byteOffset // Coerce to Number.\n if (isNaN(byteOffset)) {\n // byteOffset: it it's undefined, null, NaN, \"foo\", etc, search whole buffer\n byteOffset = dir ? 0 : (buffer.length - 1)\n }\n\n // Normalize byteOffset: negative offsets start from the end of the buffer\n if (byteOffset < 0) byteOffset = buffer.length + byteOffset\n if (byteOffset >= buffer.length) {\n if (dir) return -1\n else byteOffset = buffer.length - 1\n } else if (byteOffset < 0) {\n if (dir) byteOffset = 0\n else return -1\n }\n\n // Normalize val\n if (typeof val === 'string') {\n val = Buffer.from(val, encoding)\n }\n\n // Finally, search either indexOf (if dir is true) or lastIndexOf\n if (internalIsBuffer(val)) {\n // Special case: looking for empty string/buffer always fails\n if (val.length === 0) {\n return -1\n }\n return arrayIndexOf(buffer, val, byteOffset, encoding, dir)\n } else if (typeof val === 'number') {\n val = val & 0xFF // Search for a byte value [0-255]\n if (Buffer.TYPED_ARRAY_SUPPORT &&\n typeof Uint8Array.prototype.indexOf === 'function') {\n if (dir) {\n return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)\n } else {\n return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)\n }\n }\n return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)\n }\n\n throw new TypeError('val must be string, number or Buffer')\n}\n\nfunction arrayIndexOf (arr, val, byteOffset, encoding, dir) {\n var indexSize = 1\n var arrLength = arr.length\n var valLength = val.length\n\n if (encoding !== undefined) {\n encoding = String(encoding).toLowerCase()\n if (encoding === 'ucs2' || encoding === 'ucs-2' ||\n encoding === 'utf16le' || encoding === 'utf-16le') {\n if (arr.length < 2 || val.length < 2) {\n return -1\n }\n indexSize = 2\n arrLength /= 2\n valLength /= 2\n byteOffset /= 2\n }\n }\n\n function read (buf, i) {\n if (indexSize === 1) {\n return buf[i]\n } else {\n return buf.readUInt16BE(i * indexSize)\n }\n }\n\n var i\n if (dir) {\n var foundIndex = -1\n for (i = byteOffset; i < arrLength; i++) {\n if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {\n if (foundIndex === -1) foundIndex = i\n if (i - foundIndex + 1 === valLength) return foundIndex * indexSize\n } else {\n if (foundIndex !== -1) i -= i - foundIndex\n foundIndex = -1\n }\n }\n } else {\n if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength\n for (i = byteOffset; i >= 0; i--) {\n var found = true\n for (var j = 0; j < valLength; j++) {\n if (read(arr, i + j) !== read(val, j)) {\n found = false\n break\n }\n }\n if (found) return i\n }\n }\n\n return -1\n}\n\nBuffer.prototype.includes = function includes (val, byteOffset, encoding) {\n return this.indexOf(val, byteOffset, encoding) !== -1\n}\n\nBuffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, true)\n}\n\nBuffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, false)\n}\n\nfunction hexWrite (buf, string, offset, length) {\n offset = Number(offset) || 0\n var remaining = buf.length - offset\n if (!length) {\n length = remaining\n } else {\n length = Number(length)\n if (length > remaining) {\n length = remaining\n }\n }\n\n // must be an even number of digits\n var strLen = string.length\n if (strLen % 2 !== 0) throw new TypeError('Invalid hex string')\n\n if (length > strLen / 2) {\n length = strLen / 2\n }\n for (var i = 0; i < length; ++i) {\n var parsed = parseInt(string.substr(i * 2, 2), 16)\n if (isNaN(parsed)) return i\n buf[offset + i] = parsed\n }\n return i\n}\n\nfunction utf8Write (buf, string, offset, length) {\n return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nfunction asciiWrite (buf, string, offset, length) {\n return blitBuffer(asciiToBytes(string), buf, offset, length)\n}\n\nfunction latin1Write (buf, string, offset, length) {\n return asciiWrite(buf, string, offset, length)\n}\n\nfunction base64Write (buf, string, offset, length) {\n return blitBuffer(base64ToBytes(string), buf, offset, length)\n}\n\nfunction ucs2Write (buf, string, offset, length) {\n return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nBuffer.prototype.write = function write (string, offset, length, encoding) {\n // Buffer#write(string)\n if (offset === undefined) {\n encoding = 'utf8'\n length = this.length\n offset = 0\n // Buffer#write(string, encoding)\n } else if (length === undefined && typeof offset === 'string') {\n encoding = offset\n length = this.length\n offset = 0\n // Buffer#write(string, offset[, length][, encoding])\n } else if (isFinite(offset)) {\n offset = offset | 0\n if (isFinite(length)) {\n length = length | 0\n if (encoding === undefined) encoding = 'utf8'\n } else {\n encoding = length\n length = undefined\n }\n // legacy write(string, encoding, offset, length) - remove in v0.13\n } else {\n throw new Error(\n 'Buffer.write(string, encoding, offset[, length]) is no longer supported'\n )\n }\n\n var remaining = this.length - offset\n if (length === undefined || length > remaining) length = remaining\n\n if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {\n throw new RangeError('Attempt to write outside buffer bounds')\n }\n\n if (!encoding) encoding = 'utf8'\n\n var loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'hex':\n return hexWrite(this, string, offset, length)\n\n case 'utf8':\n case 'utf-8':\n return utf8Write(this, string, offset, length)\n\n case 'ascii':\n return asciiWrite(this, string, offset, length)\n\n case 'latin1':\n case 'binary':\n return latin1Write(this, string, offset, length)\n\n case 'base64':\n // Warning: maxLength not taken into account in base64Write\n return base64Write(this, string, offset, length)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return ucs2Write(this, string, offset, length)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\n\nBuffer.prototype.toJSON = function toJSON () {\n return {\n type: 'Buffer',\n data: Array.prototype.slice.call(this._arr || this, 0)\n }\n}\n\nfunction base64Slice (buf, start, end) {\n if (start === 0 && end === buf.length) {\n return base64.fromByteArray(buf)\n } else {\n return base64.fromByteArray(buf.slice(start, end))\n }\n}\n\nfunction utf8Slice (buf, start, end) {\n end = Math.min(buf.length, end)\n var res = []\n\n var i = start\n while (i < end) {\n var firstByte = buf[i]\n var codePoint = null\n var bytesPerSequence = (firstByte > 0xEF) ? 4\n : (firstByte > 0xDF) ? 3\n : (firstByte > 0xBF) ? 2\n : 1\n\n if (i + bytesPerSequence <= end) {\n var secondByte, thirdByte, fourthByte, tempCodePoint\n\n switch (bytesPerSequence) {\n case 1:\n if (firstByte < 0x80) {\n codePoint = firstByte\n }\n break\n case 2:\n secondByte = buf[i + 1]\n if ((secondByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)\n if (tempCodePoint > 0x7F) {\n codePoint = tempCodePoint\n }\n }\n break\n case 3:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)\n if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {\n codePoint = tempCodePoint\n }\n }\n break\n case 4:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n fourthByte = buf[i + 3]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)\n if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {\n codePoint = tempCodePoint\n }\n }\n }\n }\n\n if (codePoint === null) {\n // we did not generate a valid codePoint so insert a\n // replacement char (U+FFFD) and advance only 1 byte\n codePoint = 0xFFFD\n bytesPerSequence = 1\n } else if (codePoint > 0xFFFF) {\n // encode to utf16 (surrogate pair dance)\n codePoint -= 0x10000\n res.push(codePoint >>> 10 & 0x3FF | 0xD800)\n codePoint = 0xDC00 | codePoint & 0x3FF\n }\n\n res.push(codePoint)\n i += bytesPerSequence\n }\n\n return decodeCodePointsArray(res)\n}\n\n// Based on http://stackoverflow.com/a/22747272/680742, the browser with\n// the lowest limit is Chrome, with 0x10000 args.\n// We go 1 magnitude less, for safety\nvar MAX_ARGUMENTS_LENGTH = 0x1000\n\nfunction decodeCodePointsArray (codePoints) {\n var len = codePoints.length\n if (len <= MAX_ARGUMENTS_LENGTH) {\n return String.fromCharCode.apply(String, codePoints) // avoid extra slice()\n }\n\n // Decode in chunks to avoid \"call stack size exceeded\".\n var res = ''\n var i = 0\n while (i < len) {\n res += String.fromCharCode.apply(\n String,\n codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)\n )\n }\n return res\n}\n\nfunction asciiSlice (buf, start, end) {\n var ret = ''\n end = Math.min(buf.length, end)\n\n for (var i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i] & 0x7F)\n }\n return ret\n}\n\nfunction latin1Slice (buf, start, end) {\n var ret = ''\n end = Math.min(buf.length, end)\n\n for (var i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i])\n }\n return ret\n}\n\nfunction hexSlice (buf, start, end) {\n var len = buf.length\n\n if (!start || start < 0) start = 0\n if (!end || end < 0 || end > len) end = len\n\n var out = ''\n for (var i = start; i < end; ++i) {\n out += toHex(buf[i])\n }\n return out\n}\n\nfunction utf16leSlice (buf, start, end) {\n var bytes = buf.slice(start, end)\n var res = ''\n for (var i = 0; i < bytes.length; i += 2) {\n res += String.fromCharCode(bytes[i] + bytes[i + 1] * 256)\n }\n return res\n}\n\nBuffer.prototype.slice = function slice (start, end) {\n var len = this.length\n start = ~~start\n end = end === undefined ? len : ~~end\n\n if (start < 0) {\n start += len\n if (start < 0) start = 0\n } else if (start > len) {\n start = len\n }\n\n if (end < 0) {\n end += len\n if (end < 0) end = 0\n } else if (end > len) {\n end = len\n }\n\n if (end < start) end = start\n\n var newBuf\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n newBuf = this.subarray(start, end)\n newBuf.__proto__ = Buffer.prototype\n } else {\n var sliceLen = end - start\n newBuf = new Buffer(sliceLen, undefined)\n for (var i = 0; i < sliceLen; ++i) {\n newBuf[i] = this[i + start]\n }\n }\n\n return newBuf\n}\n\n/*\n * Need to make sure that buffer isn't trying to write out of bounds.\n */\nfunction checkOffset (offset, ext, length) {\n if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')\n if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')\n}\n\nBuffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var val = this[offset]\n var mul = 1\n var i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n checkOffset(offset, byteLength, this.length)\n }\n\n var val = this[offset + --byteLength]\n var mul = 1\n while (byteLength > 0 && (mul *= 0x100)) {\n val += this[offset + --byteLength] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 1, this.length)\n return this[offset]\n}\n\nBuffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n return this[offset] | (this[offset + 1] << 8)\n}\n\nBuffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n return (this[offset] << 8) | this[offset + 1]\n}\n\nBuffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return ((this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16)) +\n (this[offset + 3] * 0x1000000)\n}\n\nBuffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] * 0x1000000) +\n ((this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n this[offset + 3])\n}\n\nBuffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var val = this[offset]\n var mul = 1\n var i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var i = byteLength\n var mul = 1\n var val = this[offset + --i]\n while (i > 0 && (mul *= 0x100)) {\n val += this[offset + --i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readInt8 = function readInt8 (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 1, this.length)\n if (!(this[offset] & 0x80)) return (this[offset])\n return ((0xff - this[offset] + 1) * -1)\n}\n\nBuffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n var val = this[offset] | (this[offset + 1] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n var val = this[offset + 1] | (this[offset] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16) |\n (this[offset + 3] << 24)\n}\n\nBuffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] << 24) |\n (this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n (this[offset + 3])\n}\n\nBuffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, true, 23, 4)\n}\n\nBuffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, false, 23, 4)\n}\n\nBuffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, true, 52, 8)\n}\n\nBuffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, false, 52, 8)\n}\n\nfunction checkInt (buf, value, offset, ext, max, min) {\n if (!internalIsBuffer(buf)) throw new TypeError('\"buffer\" argument must be a Buffer instance')\n if (value > max || value < min) throw new RangeError('\"value\" argument is out of bounds')\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n}\n\nBuffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n var maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n var mul = 1\n var i = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n var maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n var i = byteLength - 1\n var mul = 1\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)\n if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nfunction objectWriteUInt16 (buf, value, offset, littleEndian) {\n if (value < 0) value = 0xffff + value + 1\n for (var i = 0, j = Math.min(buf.length - offset, 2); i < j; ++i) {\n buf[offset + i] = (value & (0xff << (8 * (littleEndian ? i : 1 - i)))) >>>\n (littleEndian ? i : 1 - i) * 8\n }\n}\n\nBuffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n } else {\n objectWriteUInt16(this, value, offset, true)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n } else {\n objectWriteUInt16(this, value, offset, false)\n }\n return offset + 2\n}\n\nfunction objectWriteUInt32 (buf, value, offset, littleEndian) {\n if (value < 0) value = 0xffffffff + value + 1\n for (var i = 0, j = Math.min(buf.length - offset, 4); i < j; ++i) {\n buf[offset + i] = (value >>> (littleEndian ? i : 3 - i) * 8) & 0xff\n }\n}\n\nBuffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset + 3] = (value >>> 24)\n this[offset + 2] = (value >>> 16)\n this[offset + 1] = (value >>> 8)\n this[offset] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, true)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, false)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) {\n var limit = Math.pow(2, 8 * byteLength - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n var i = 0\n var mul = 1\n var sub = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) {\n var limit = Math.pow(2, 8 * byteLength - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n var i = byteLength - 1\n var mul = 1\n var sub = 0\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)\n if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)\n if (value < 0) value = 0xff + value + 1\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nBuffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n } else {\n objectWriteUInt16(this, value, offset, true)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n } else {\n objectWriteUInt16(this, value, offset, false)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n this[offset + 2] = (value >>> 16)\n this[offset + 3] = (value >>> 24)\n } else {\n objectWriteUInt32(this, value, offset, true)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (value < 0) value = 0xffffffff + value + 1\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, false)\n }\n return offset + 4\n}\n\nfunction checkIEEE754 (buf, value, offset, ext, max, min) {\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n if (offset < 0) throw new RangeError('Index out of range')\n}\n\nfunction writeFloat (buf, value, offset, littleEndian, noAssert) {\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)\n }\n ieee754.write(buf, value, offset, littleEndian, 23, 4)\n return offset + 4\n}\n\nBuffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {\n return writeFloat(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {\n return writeFloat(this, value, offset, false, noAssert)\n}\n\nfunction writeDouble (buf, value, offset, littleEndian, noAssert) {\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)\n }\n ieee754.write(buf, value, offset, littleEndian, 52, 8)\n return offset + 8\n}\n\nBuffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {\n return writeDouble(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {\n return writeDouble(this, value, offset, false, noAssert)\n}\n\n// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)\nBuffer.prototype.copy = function copy (target, targetStart, start, end) {\n if (!start) start = 0\n if (!end && end !== 0) end = this.length\n if (targetStart >= target.length) targetStart = target.length\n if (!targetStart) targetStart = 0\n if (end > 0 && end < start) end = start\n\n // Copy 0 bytes; we're done\n if (end === start) return 0\n if (target.length === 0 || this.length === 0) return 0\n\n // Fatal error conditions\n if (targetStart < 0) {\n throw new RangeError('targetStart out of bounds')\n }\n if (start < 0 || start >= this.length) throw new RangeError('sourceStart out of bounds')\n if (end < 0) throw new RangeError('sourceEnd out of bounds')\n\n // Are we oob?\n if (end > this.length) end = this.length\n if (target.length - targetStart < end - start) {\n end = target.length - targetStart + start\n }\n\n var len = end - start\n var i\n\n if (this === target && start < targetStart && targetStart < end) {\n // descending copy from end\n for (i = len - 1; i >= 0; --i) {\n target[i + targetStart] = this[i + start]\n }\n } else if (len < 1000 || !Buffer.TYPED_ARRAY_SUPPORT) {\n // ascending copy from start\n for (i = 0; i < len; ++i) {\n target[i + targetStart] = this[i + start]\n }\n } else {\n Uint8Array.prototype.set.call(\n target,\n this.subarray(start, start + len),\n targetStart\n )\n }\n\n return len\n}\n\n// Usage:\n// buffer.fill(number[, offset[, end]])\n// buffer.fill(buffer[, offset[, end]])\n// buffer.fill(string[, offset[, end]][, encoding])\nBuffer.prototype.fill = function fill (val, start, end, encoding) {\n // Handle string cases:\n if (typeof val === 'string') {\n if (typeof start === 'string') {\n encoding = start\n start = 0\n end = this.length\n } else if (typeof end === 'string') {\n encoding = end\n end = this.length\n }\n if (val.length === 1) {\n var code = val.charCodeAt(0)\n if (code < 256) {\n val = code\n }\n }\n if (encoding !== undefined && typeof encoding !== 'string') {\n throw new TypeError('encoding must be a string')\n }\n if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {\n throw new TypeError('Unknown encoding: ' + encoding)\n }\n } else if (typeof val === 'number') {\n val = val & 255\n }\n\n // Invalid ranges are not set to a default, so can range check early.\n if (start < 0 || this.length < start || this.length < end) {\n throw new RangeError('Out of range index')\n }\n\n if (end <= start) {\n return this\n }\n\n start = start >>> 0\n end = end === undefined ? this.length : end >>> 0\n\n if (!val) val = 0\n\n var i\n if (typeof val === 'number') {\n for (i = start; i < end; ++i) {\n this[i] = val\n }\n } else {\n var bytes = internalIsBuffer(val)\n ? val\n : utf8ToBytes(new Buffer(val, encoding).toString())\n var len = bytes.length\n for (i = 0; i < end - start; ++i) {\n this[i + start] = bytes[i % len]\n }\n }\n\n return this\n}\n\n// HELPER FUNCTIONS\n// ================\n\nvar INVALID_BASE64_RE = /[^+\\/0-9A-Za-z-_]/g\n\nfunction base64clean (str) {\n // Node strips out invalid characters like \\n and \\t from the string, base64-js does not\n str = stringtrim(str).replace(INVALID_BASE64_RE, '')\n // Node converts strings with length < 2 to ''\n if (str.length < 2) return ''\n // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not\n while (str.length % 4 !== 0) {\n str = str + '='\n }\n return str\n}\n\nfunction stringtrim (str) {\n if (str.trim) return str.trim()\n return str.replace(/^\\s+|\\s+$/g, '')\n}\n\nfunction toHex (n) {\n if (n < 16) return '0' + n.toString(16)\n return n.toString(16)\n}\n\nfunction utf8ToBytes (string, units) {\n units = units || Infinity\n var codePoint\n var length = string.length\n var leadSurrogate = null\n var bytes = []\n\n for (var i = 0; i < length; ++i) {\n codePoint = string.charCodeAt(i)\n\n // is surrogate component\n if (codePoint > 0xD7FF && codePoint < 0xE000) {\n // last char was a lead\n if (!leadSurrogate) {\n // no lead yet\n if (codePoint > 0xDBFF) {\n // unexpected trail\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n } else if (i + 1 === length) {\n // unpaired lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n }\n\n // valid lead\n leadSurrogate = codePoint\n\n continue\n }\n\n // 2 leads in a row\n if (codePoint < 0xDC00) {\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n leadSurrogate = codePoint\n continue\n }\n\n // valid surrogate pair\n codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000\n } else if (leadSurrogate) {\n // valid bmp char, but last char was a lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n }\n\n leadSurrogate = null\n\n // encode utf8\n if (codePoint < 0x80) {\n if ((units -= 1) < 0) break\n bytes.push(codePoint)\n } else if (codePoint < 0x800) {\n if ((units -= 2) < 0) break\n bytes.push(\n codePoint >> 0x6 | 0xC0,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x10000) {\n if ((units -= 3) < 0) break\n bytes.push(\n codePoint >> 0xC | 0xE0,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x110000) {\n if ((units -= 4) < 0) break\n bytes.push(\n codePoint >> 0x12 | 0xF0,\n codePoint >> 0xC & 0x3F | 0x80,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else {\n throw new Error('Invalid code point')\n }\n }\n\n return bytes\n}\n\nfunction asciiToBytes (str) {\n var byteArray = []\n for (var i = 0; i < str.length; ++i) {\n // Node's code seems to be doing this and not & 0x7F..\n byteArray.push(str.charCodeAt(i) & 0xFF)\n }\n return byteArray\n}\n\nfunction utf16leToBytes (str, units) {\n var c, hi, lo\n var byteArray = []\n for (var i = 0; i < str.length; ++i) {\n if ((units -= 2) < 0) break\n\n c = str.charCodeAt(i)\n hi = c >> 8\n lo = c % 256\n byteArray.push(lo)\n byteArray.push(hi)\n }\n\n return byteArray\n}\n\n\nfunction base64ToBytes (str) {\n return base64.toByteArray(base64clean(str))\n}\n\nfunction blitBuffer (src, dst, offset, length) {\n for (var i = 0; i < length; ++i) {\n if ((i + offset >= dst.length) || (i >= src.length)) break\n dst[i + offset] = src[i]\n }\n return i\n}\n\nfunction isnan (val) {\n return val !== val // eslint-disable-line no-self-compare\n}\n\n\n// the following is from is-buffer, also by Feross Aboukhadijeh and with same lisence\n// The _isBuffer check is for Safari 5-7 support, because it's missing\n// Object.prototype.constructor. Remove this eventually\nexport function isBuffer(obj) {\n return obj != null && (!!obj._isBuffer || isFastBuffer(obj) || isSlowBuffer(obj))\n}\n\nfunction isFastBuffer (obj) {\n return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj)\n}\n\n// For Node v0.10 support. Remove this eventually.\nfunction isSlowBuffer (obj) {\n return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isFastBuffer(obj.slice(0, 0))\n}\n","/**\r\n * [js-md5]{@link https://github.com/emn178/js-md5}\r\n *\r\n * @namespace md5\r\n * @version 0.4.2\r\n * @author Chen, Yi-Cyuan [emn178@gmail.com]\r\n * @copyright Chen, Yi-Cyuan 2014-2017\r\n * @license MIT\r\n */\r\n(function () {\r\n 'use strict';\r\n\r\n var root = typeof window === 'object' ? window : {};\r\n var NODE_JS = !root.JS_MD5_NO_NODE_JS && typeof process === 'object' && process.versions && process.versions.node;\r\n if (NODE_JS) {\r\n root = global;\r\n }\r\n var COMMON_JS = !root.JS_MD5_NO_COMMON_JS && typeof module === 'object' && module.exports;\r\n var AMD = typeof define === 'function' && define.amd;\r\n var ARRAY_BUFFER = !root.JS_MD5_NO_ARRAY_BUFFER && typeof ArrayBuffer !== 'undefined';\r\n var HEX_CHARS = '0123456789abcdef'.split('');\r\n var EXTRA = [128, 32768, 8388608, -2147483648];\r\n var SHIFT = [0, 8, 16, 24];\r\n var OUTPUT_TYPES = ['hex', 'array', 'digest', 'buffer', 'arrayBuffer'];\r\n\r\n var blocks = [], buffer8;\r\n if (ARRAY_BUFFER) {\r\n var buffer = new ArrayBuffer(68);\r\n buffer8 = new Uint8Array(buffer);\r\n blocks = new Uint32Array(buffer);\r\n }\r\n\r\n /**\r\n * @method hex\r\n * @memberof md5\r\n * @description Output hash as hex string\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {String} Hex string\r\n * @example\r\n * md5.hex('The quick brown fox jumps over the lazy dog');\r\n * // equal to\r\n * md5('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method digest\r\n * @memberof md5\r\n * @description Output hash as bytes array\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Array} Bytes array\r\n * @example\r\n * md5.digest('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method array\r\n * @memberof md5\r\n * @description Output hash as bytes array\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Array} Bytes array\r\n * @example\r\n * md5.array('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method arrayBuffer\r\n * @memberof md5\r\n * @description Output hash as ArrayBuffer\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @example\r\n * md5.arrayBuffer('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method buffer\r\n * @deprecated This maybe confuse with Buffer in node.js. Please use arrayBuffer instead.\r\n * @memberof md5\r\n * @description Output hash as ArrayBuffer\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @example\r\n * md5.buffer('The quick brown fox jumps over the lazy dog');\r\n */\r\n var createOutputMethod = function (outputType) {\r\n return function (message) {\r\n return new Md5(true).update(message)[outputType]();\r\n };\r\n };\r\n\r\n /**\r\n * @method create\r\n * @memberof md5\r\n * @description Create Md5 object\r\n * @returns {Md5} Md5 object.\r\n * @example\r\n * var hash = md5.create();\r\n */\r\n /**\r\n * @method update\r\n * @memberof md5\r\n * @description Create and update Md5 object\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Md5} Md5 object.\r\n * @example\r\n * var hash = md5.update('The quick brown fox jumps over the lazy dog');\r\n * // equal to\r\n * var hash = md5.create();\r\n * hash.update('The quick brown fox jumps over the lazy dog');\r\n */\r\n var createMethod = function () {\r\n var method = createOutputMethod('hex');\r\n if (NODE_JS) {\r\n method = nodeWrap(method);\r\n }\r\n method.create = function () {\r\n return new Md5();\r\n };\r\n method.update = function (message) {\r\n return method.create().update(message);\r\n };\r\n for (var i = 0; i < OUTPUT_TYPES.length; ++i) {\r\n var type = OUTPUT_TYPES[i];\r\n method[type] = createOutputMethod(type);\r\n }\r\n return method;\r\n };\r\n\r\n var nodeWrap = function (method) {\r\n var crypto = require('crypto');\r\n var Buffer = require('buffer').Buffer;\r\n var nodeMethod = function (message) {\r\n if (typeof message === 'string') {\r\n return crypto.createHash('md5').update(message, 'utf8').digest('hex');\r\n } else if (message.constructor === ArrayBuffer) {\r\n message = new Uint8Array(message);\r\n } else if (message.length === undefined) {\r\n return method(message);\r\n }\r\n return crypto.createHash('md5').update(new Buffer(message)).digest('hex');\r\n };\r\n return nodeMethod;\r\n };\r\n\r\n /**\r\n * Md5 class\r\n * @class Md5\r\n * @description This is internal class.\r\n * @see {@link md5.create}\r\n */\r\n function Md5(sharedMemory) {\r\n if (sharedMemory) {\r\n blocks[0] = blocks[16] = blocks[1] = blocks[2] = blocks[3] =\r\n blocks[4] = blocks[5] = blocks[6] = blocks[7] =\r\n blocks[8] = blocks[9] = blocks[10] = blocks[11] =\r\n blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0;\r\n this.blocks = blocks;\r\n this.buffer8 = buffer8;\r\n } else {\r\n if (ARRAY_BUFFER) {\r\n var buffer = new ArrayBuffer(68);\r\n this.buffer8 = new Uint8Array(buffer);\r\n this.blocks = new Uint32Array(buffer);\r\n } else {\r\n this.blocks = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];\r\n }\r\n }\r\n this.h0 = this.h1 = this.h2 = this.h3 = this.start = this.bytes = 0;\r\n this.finalized = this.hashed = false;\r\n this.first = true;\r\n }\r\n\r\n /**\r\n * @method update\r\n * @memberof Md5\r\n * @instance\r\n * @description Update hash\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Md5} Md5 object.\r\n * @see {@link md5.update}\r\n */\r\n Md5.prototype.update = function (message) {\r\n if (this.finalized) {\r\n return;\r\n }\r\n var notString = typeof(message) != 'string';\r\n if (notString && message.constructor == root.ArrayBuffer) {\r\n message = new Uint8Array(message);\r\n }\r\n var code, index = 0, i, length = message.length || 0, blocks = this.blocks;\r\n var buffer8 = this.buffer8;\r\n\r\n while (index < length) {\r\n if (this.hashed) {\r\n this.hashed = false;\r\n blocks[0] = blocks[16];\r\n blocks[16] = blocks[1] = blocks[2] = blocks[3] =\r\n blocks[4] = blocks[5] = blocks[6] = blocks[7] =\r\n blocks[8] = blocks[9] = blocks[10] = blocks[11] =\r\n blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0;\r\n }\r\n\r\n if (notString) {\r\n if (ARRAY_BUFFER) {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n buffer8[i++] = message[index];\r\n }\r\n } else {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n blocks[i >> 2] |= message[index] << SHIFT[i++ & 3];\r\n }\r\n }\r\n } else {\r\n if (ARRAY_BUFFER) {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n code = message.charCodeAt(index);\r\n if (code < 0x80) {\r\n buffer8[i++] = code;\r\n } else if (code < 0x800) {\r\n buffer8[i++] = 0xc0 | (code >> 6);\r\n buffer8[i++] = 0x80 | (code & 0x3f);\r\n } else if (code < 0xd800 || code >= 0xe000) {\r\n buffer8[i++] = 0xe0 | (code >> 12);\r\n buffer8[i++] = 0x80 | ((code >> 6) & 0x3f);\r\n buffer8[i++] = 0x80 | (code & 0x3f);\r\n } else {\r\n code = 0x10000 + (((code & 0x3ff) << 10) | (message.charCodeAt(++index) & 0x3ff));\r\n buffer8[i++] = 0xf0 | (code >> 18);\r\n buffer8[i++] = 0x80 | ((code >> 12) & 0x3f);\r\n buffer8[i++] = 0x80 | ((code >> 6) & 0x3f);\r\n buffer8[i++] = 0x80 | (code & 0x3f);\r\n }\r\n }\r\n } else {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n code = message.charCodeAt(index);\r\n if (code < 0x80) {\r\n blocks[i >> 2] |= code << SHIFT[i++ & 3];\r\n } else if (code < 0x800) {\r\n blocks[i >> 2] |= (0xc0 | (code >> 6)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3];\r\n } else if (code < 0xd800 || code >= 0xe000) {\r\n blocks[i >> 2] |= (0xe0 | (code >> 12)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | ((code >> 6) & 0x3f)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3];\r\n } else {\r\n code = 0x10000 + (((code & 0x3ff) << 10) | (message.charCodeAt(++index) & 0x3ff));\r\n blocks[i >> 2] |= (0xf0 | (code >> 18)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | ((code >> 12) & 0x3f)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | ((code >> 6) & 0x3f)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3];\r\n }\r\n }\r\n }\r\n }\r\n this.lastByteIndex = i;\r\n this.bytes += i - this.start;\r\n if (i >= 64) {\r\n this.start = i - 64;\r\n this.hash();\r\n this.hashed = true;\r\n } else {\r\n this.start = i;\r\n }\r\n }\r\n return this;\r\n };\r\n\r\n Md5.prototype.finalize = function () {\r\n if (this.finalized) {\r\n return;\r\n }\r\n this.finalized = true;\r\n var blocks = this.blocks, i = this.lastByteIndex;\r\n blocks[i >> 2] |= EXTRA[i & 3];\r\n if (i >= 56) {\r\n if (!this.hashed) {\r\n this.hash();\r\n }\r\n blocks[0] = blocks[16];\r\n blocks[16] = blocks[1] = blocks[2] = blocks[3] =\r\n blocks[4] = blocks[5] = blocks[6] = blocks[7] =\r\n blocks[8] = blocks[9] = blocks[10] = blocks[11] =\r\n blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0;\r\n }\r\n blocks[14] = this.bytes << 3;\r\n this.hash();\r\n };\r\n\r\n Md5.prototype.hash = function () {\r\n var a, b, c, d, bc, da, blocks = this.blocks;\r\n\r\n if (this.first) {\r\n a = blocks[0] - 680876937;\r\n a = (a << 7 | a >>> 25) - 271733879 << 0;\r\n d = (-1732584194 ^ a & 2004318071) + blocks[1] - 117830708;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c = (-271733879 ^ (d & (a ^ -271733879))) + blocks[2] - 1126478375;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b = (a ^ (c & (d ^ a))) + blocks[3] - 1316259209;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n } else {\r\n a = this.h0;\r\n b = this.h1;\r\n c = this.h2;\r\n d = this.h3;\r\n a += (d ^ (b & (c ^ d))) + blocks[0] - 680876936;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[1] - 389564586;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[2] + 606105819;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[3] - 1044525330;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n }\r\n\r\n a += (d ^ (b & (c ^ d))) + blocks[4] - 176418897;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[5] + 1200080426;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[6] - 1473231341;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[7] - 45705983;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n a += (d ^ (b & (c ^ d))) + blocks[8] + 1770035416;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[9] - 1958414417;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[10] - 42063;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[11] - 1990404162;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n a += (d ^ (b & (c ^ d))) + blocks[12] + 1804603682;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[13] - 40341101;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[14] - 1502002290;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[15] + 1236535329;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[1] - 165796510;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[6] - 1069501632;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[11] + 643717713;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[0] - 373897302;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[5] - 701558691;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[10] + 38016083;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[15] - 660478335;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[4] - 405537848;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[9] + 568446438;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[14] - 1019803690;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[3] - 187363961;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[8] + 1163531501;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[13] - 1444681467;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[2] - 51403784;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[7] + 1735328473;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[12] - 1926607734;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[5] - 378558;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[8] - 2022574463;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[11] + 1839030562;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[14] - 35309556;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[1] - 1530992060;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[4] + 1272893353;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[7] - 155497632;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[10] - 1094730640;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[13] + 681279174;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[0] - 358537222;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[3] - 722521979;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[6] + 76029189;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[9] - 640364487;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[12] - 421815835;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[15] + 530742520;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[2] - 995338651;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[0] - 198630844;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[7] + 1126891415;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[14] - 1416354905;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[5] - 57434055;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[12] + 1700485571;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[3] - 1894986606;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[10] - 1051523;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[1] - 2054922799;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[8] + 1873313359;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[15] - 30611744;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[6] - 1560198380;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[13] + 1309151649;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[4] - 145523070;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[11] - 1120210379;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[2] + 718787259;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[9] - 343485551;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n\r\n if (this.first) {\r\n this.h0 = a + 1732584193 << 0;\r\n this.h1 = b - 271733879 << 0;\r\n this.h2 = c - 1732584194 << 0;\r\n this.h3 = d + 271733878 << 0;\r\n this.first = false;\r\n } else {\r\n this.h0 = this.h0 + a << 0;\r\n this.h1 = this.h1 + b << 0;\r\n this.h2 = this.h2 + c << 0;\r\n this.h3 = this.h3 + d << 0;\r\n }\r\n };\r\n\r\n /**\r\n * @method hex\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as hex string\r\n * @returns {String} Hex string\r\n * @see {@link md5.hex}\r\n * @example\r\n * hash.hex();\r\n */\r\n Md5.prototype.hex = function () {\r\n this.finalize();\r\n\r\n var h0 = this.h0, h1 = this.h1, h2 = this.h2, h3 = this.h3;\r\n\r\n return HEX_CHARS[(h0 >> 4) & 0x0F] + HEX_CHARS[h0 & 0x0F] +\r\n HEX_CHARS[(h0 >> 12) & 0x0F] + HEX_CHARS[(h0 >> 8) & 0x0F] +\r\n HEX_CHARS[(h0 >> 20) & 0x0F] + HEX_CHARS[(h0 >> 16) & 0x0F] +\r\n HEX_CHARS[(h0 >> 28) & 0x0F] + HEX_CHARS[(h0 >> 24) & 0x0F] +\r\n HEX_CHARS[(h1 >> 4) & 0x0F] + HEX_CHARS[h1 & 0x0F] +\r\n HEX_CHARS[(h1 >> 12) & 0x0F] + HEX_CHARS[(h1 >> 8) & 0x0F] +\r\n HEX_CHARS[(h1 >> 20) & 0x0F] + HEX_CHARS[(h1 >> 16) & 0x0F] +\r\n HEX_CHARS[(h1 >> 28) & 0x0F] + HEX_CHARS[(h1 >> 24) & 0x0F] +\r\n HEX_CHARS[(h2 >> 4) & 0x0F] + HEX_CHARS[h2 & 0x0F] +\r\n HEX_CHARS[(h2 >> 12) & 0x0F] + HEX_CHARS[(h2 >> 8) & 0x0F] +\r\n HEX_CHARS[(h2 >> 20) & 0x0F] + HEX_CHARS[(h2 >> 16) & 0x0F] +\r\n HEX_CHARS[(h2 >> 28) & 0x0F] + HEX_CHARS[(h2 >> 24) & 0x0F] +\r\n HEX_CHARS[(h3 >> 4) & 0x0F] + HEX_CHARS[h3 & 0x0F] +\r\n HEX_CHARS[(h3 >> 12) & 0x0F] + HEX_CHARS[(h3 >> 8) & 0x0F] +\r\n HEX_CHARS[(h3 >> 20) & 0x0F] + HEX_CHARS[(h3 >> 16) & 0x0F] +\r\n HEX_CHARS[(h3 >> 28) & 0x0F] + HEX_CHARS[(h3 >> 24) & 0x0F];\r\n };\r\n\r\n /**\r\n * @method toString\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as hex string\r\n * @returns {String} Hex string\r\n * @see {@link md5.hex}\r\n * @example\r\n * hash.toString();\r\n */\r\n Md5.prototype.toString = Md5.prototype.hex;\r\n\r\n /**\r\n * @method digest\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as bytes array\r\n * @returns {Array} Bytes array\r\n * @see {@link md5.digest}\r\n * @example\r\n * hash.digest();\r\n */\r\n Md5.prototype.digest = function () {\r\n this.finalize();\r\n\r\n var h0 = this.h0, h1 = this.h1, h2 = this.h2, h3 = this.h3;\r\n return [\r\n h0 & 0xFF, (h0 >> 8) & 0xFF, (h0 >> 16) & 0xFF, (h0 >> 24) & 0xFF,\r\n h1 & 0xFF, (h1 >> 8) & 0xFF, (h1 >> 16) & 0xFF, (h1 >> 24) & 0xFF,\r\n h2 & 0xFF, (h2 >> 8) & 0xFF, (h2 >> 16) & 0xFF, (h2 >> 24) & 0xFF,\r\n h3 & 0xFF, (h3 >> 8) & 0xFF, (h3 >> 16) & 0xFF, (h3 >> 24) & 0xFF\r\n ];\r\n };\r\n\r\n /**\r\n * @method array\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as bytes array\r\n * @returns {Array} Bytes array\r\n * @see {@link md5.array}\r\n * @example\r\n * hash.array();\r\n */\r\n Md5.prototype.array = Md5.prototype.digest;\r\n\r\n /**\r\n * @method arrayBuffer\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as ArrayBuffer\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @see {@link md5.arrayBuffer}\r\n * @example\r\n * hash.arrayBuffer();\r\n */\r\n Md5.prototype.arrayBuffer = function () {\r\n this.finalize();\r\n\r\n var buffer = new ArrayBuffer(16);\r\n var blocks = new Uint32Array(buffer);\r\n blocks[0] = this.h0;\r\n blocks[1] = this.h1;\r\n blocks[2] = this.h2;\r\n blocks[3] = this.h3;\r\n return buffer;\r\n };\r\n\r\n /**\r\n * @method buffer\r\n * @deprecated This maybe confuse with Buffer in node.js. Please use arrayBuffer instead.\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as ArrayBuffer\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @see {@link md5.buffer}\r\n * @example\r\n * hash.buffer();\r\n */\r\n Md5.prototype.buffer = Md5.prototype.arrayBuffer;\r\n\r\n var exports = createMethod();\r\n\r\n if (COMMON_JS) {\r\n module.exports = exports;\r\n } else {\r\n /**\r\n * @method md5\b\r\n * @description Md5 hash function, export to global in browsers.\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {String} md5 hashes\r\n * @example\r\n * md5(''); // d41d8cd98f00b204e9800998ecf8427e\r\n * md5('The quick brown fox jumps over the lazy dog'); // 9e107d9d372bb6826bd81d3542a419d6\r\n * md5('The quick brown fox jumps over the lazy dog.'); // e4d909c290d0fb1ca068ffaddf22cbd0\r\n *\r\n * // It also supports UTF-8 encoding\r\n * md5('中文'); // a7bac2239fcdcb3a067903d8077c4a07\r\n *\r\n * // It also supports byte `Array`, `Uint8Array`, `ArrayBuffer`\r\n * md5([]); // d41d8cd98f00b204e9800998ecf8427e\r\n * md5(new Uint8Array([])); // d41d8cd98f00b204e9800998ecf8427e\r\n */\r\n root.md5 = exports;\r\n if (AMD) {\r\n define(function () {\r\n return exports;\r\n });\r\n }\r\n }\r\n})();\r\n","'use strict';\n\n\nvar TYPED_OK = (typeof Uint8Array !== 'undefined') &&\n (typeof Uint16Array !== 'undefined') &&\n (typeof Int32Array !== 'undefined');\n\n\nexports.assign = function (obj /*from1, from2, from3, ...*/) {\n var sources = Array.prototype.slice.call(arguments, 1);\n while (sources.length) {\n var source = sources.shift();\n if (!source) { continue; }\n\n if (typeof source !== 'object') {\n throw new TypeError(source + 'must be non-object');\n }\n\n for (var p in source) {\n if (source.hasOwnProperty(p)) {\n obj[p] = source[p];\n }\n }\n }\n\n return obj;\n};\n\n\n// reduce buffer size, avoiding mem copy\nexports.shrinkBuf = function (buf, size) {\n if (buf.length === size) { return buf; }\n if (buf.subarray) { return buf.subarray(0, size); }\n buf.length = size;\n return buf;\n};\n\n\nvar fnTyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n if (src.subarray && dest.subarray) {\n dest.set(src.subarray(src_offs, src_offs + len), dest_offs);\n return;\n }\n // Fallback to ordinary array\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n var i, l, len, pos, chunk, result;\n\n // calculate data length\n len = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n len += chunks[i].length;\n }\n\n // join chunks\n result = new Uint8Array(len);\n pos = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n chunk = chunks[i];\n result.set(chunk, pos);\n pos += chunk.length;\n }\n\n return result;\n }\n};\n\nvar fnUntyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n return [].concat.apply([], chunks);\n }\n};\n\n\n// Enable/Disable typed arrays use, for testing\n//\nexports.setTyped = function (on) {\n if (on) {\n exports.Buf8 = Uint8Array;\n exports.Buf16 = Uint16Array;\n exports.Buf32 = Int32Array;\n exports.assign(exports, fnTyped);\n } else {\n exports.Buf8 = Array;\n exports.Buf16 = Array;\n exports.Buf32 = Array;\n exports.assign(exports, fnUntyped);\n }\n};\n\nexports.setTyped(TYPED_OK);\n","'use strict';\n\n\nvar utils = require('../utils/common');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n//var Z_FILTERED = 1;\n//var Z_HUFFMAN_ONLY = 2;\n//var Z_RLE = 3;\nvar Z_FIXED = 4;\n//var Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\nvar Z_BINARY = 0;\nvar Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n/*============================================================================*/\n\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n// From zutil.h\n\nvar STORED_BLOCK = 0;\nvar STATIC_TREES = 1;\nvar DYN_TREES = 2;\n/* The three kinds of block type */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\n/* The minimum and maximum match lengths */\n\n// From deflate.h\n/* ===========================================================================\n * Internal compression state.\n */\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\n\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\n\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\n\nvar D_CODES = 30;\n/* number of distance codes */\n\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\n\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\n\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar Buf_size = 16;\n/* size of bit buffer in bi_buf */\n\n\n/* ===========================================================================\n * Constants\n */\n\nvar MAX_BL_BITS = 7;\n/* Bit length codes must not exceed MAX_BL_BITS bits */\n\nvar END_BLOCK = 256;\n/* end of block literal code */\n\nvar REP_3_6 = 16;\n/* repeat previous bit length 3-6 times (2 bits of repeat count) */\n\nvar REPZ_3_10 = 17;\n/* repeat a zero length 3-10 times (3 bits of repeat count) */\n\nvar REPZ_11_138 = 18;\n/* repeat a zero length 11-138 times (7 bits of repeat count) */\n\n/* eslint-disable comma-spacing,array-bracket-spacing */\nvar extra_lbits = /* extra bits for each length code */\n [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0];\n\nvar extra_dbits = /* extra bits for each distance code */\n [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13];\n\nvar extra_blbits = /* extra bits for each bit length code */\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7];\n\nvar bl_order =\n [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];\n/* eslint-enable comma-spacing,array-bracket-spacing */\n\n/* The lengths of the bit length codes are sent in order of decreasing\n * probability, to avoid transmitting the lengths for unused bit length codes.\n */\n\n/* ===========================================================================\n * Local data. These are initialized only once.\n */\n\n// We pre-fill arrays with 0 to avoid uninitialized gaps\n\nvar DIST_CODE_LEN = 512; /* see definition of array dist_code below */\n\n// !!!! Use flat array insdead of structure, Freq = i*2, Len = i*2+1\nvar static_ltree = new Array((L_CODES + 2) * 2);\nzero(static_ltree);\n/* The static literal tree. Since the bit lengths are imposed, there is no\n * need for the L_CODES extra codes used during heap construction. However\n * The codes 286 and 287 are needed to build a canonical tree (see _tr_init\n * below).\n */\n\nvar static_dtree = new Array(D_CODES * 2);\nzero(static_dtree);\n/* The static distance tree. (Actually a trivial tree since all codes use\n * 5 bits.)\n */\n\nvar _dist_code = new Array(DIST_CODE_LEN);\nzero(_dist_code);\n/* Distance codes. The first 256 values correspond to the distances\n * 3 .. 258, the last 256 values correspond to the top 8 bits of\n * the 15 bit distances.\n */\n\nvar _length_code = new Array(MAX_MATCH - MIN_MATCH + 1);\nzero(_length_code);\n/* length code for each normalized match length (0 == MIN_MATCH) */\n\nvar base_length = new Array(LENGTH_CODES);\nzero(base_length);\n/* First normalized length for each code (0 = MIN_MATCH) */\n\nvar base_dist = new Array(D_CODES);\nzero(base_dist);\n/* First normalized distance for each code (0 = distance of 1) */\n\n\nfunction StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {\n\n this.static_tree = static_tree; /* static tree or NULL */\n this.extra_bits = extra_bits; /* extra bits for each code or NULL */\n this.extra_base = extra_base; /* base index for extra_bits */\n this.elems = elems; /* max number of elements in the tree */\n this.max_length = max_length; /* max bit length for the codes */\n\n // show if `static_tree` has data or dummy - needed for monomorphic objects\n this.has_stree = static_tree && static_tree.length;\n}\n\n\nvar static_l_desc;\nvar static_d_desc;\nvar static_bl_desc;\n\n\nfunction TreeDesc(dyn_tree, stat_desc) {\n this.dyn_tree = dyn_tree; /* the dynamic tree */\n this.max_code = 0; /* largest code with non zero frequency */\n this.stat_desc = stat_desc; /* the corresponding static tree */\n}\n\n\n\nfunction d_code(dist) {\n return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];\n}\n\n\n/* ===========================================================================\n * Output a short LSB first on the stream.\n * IN assertion: there is enough room in pendingBuf.\n */\nfunction put_short(s, w) {\n// put_byte(s, (uch)((w) & 0xff));\n// put_byte(s, (uch)((ush)(w) >> 8));\n s.pending_buf[s.pending++] = (w) & 0xff;\n s.pending_buf[s.pending++] = (w >>> 8) & 0xff;\n}\n\n\n/* ===========================================================================\n * Send a value on a given number of bits.\n * IN assertion: length <= 16 and value fits in length bits.\n */\nfunction send_bits(s, value, length) {\n if (s.bi_valid > (Buf_size - length)) {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n put_short(s, s.bi_buf);\n s.bi_buf = value >> (Buf_size - s.bi_valid);\n s.bi_valid += length - Buf_size;\n } else {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n s.bi_valid += length;\n }\n}\n\n\nfunction send_code(s, c, tree) {\n send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);\n}\n\n\n/* ===========================================================================\n * Reverse the first len bits of a code, using straightforward code (a faster\n * method would use a table)\n * IN assertion: 1 <= len <= 15\n */\nfunction bi_reverse(code, len) {\n var res = 0;\n do {\n res |= code & 1;\n code >>>= 1;\n res <<= 1;\n } while (--len > 0);\n return res >>> 1;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer, keeping at most 7 bits in it.\n */\nfunction bi_flush(s) {\n if (s.bi_valid === 16) {\n put_short(s, s.bi_buf);\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n } else if (s.bi_valid >= 8) {\n s.pending_buf[s.pending++] = s.bi_buf & 0xff;\n s.bi_buf >>= 8;\n s.bi_valid -= 8;\n }\n}\n\n\n/* ===========================================================================\n * Compute the optimal bit lengths for a tree and update the total bit length\n * for the current block.\n * IN assertion: the fields freq and dad are set, heap[heap_max] and\n * above are the tree nodes sorted by increasing frequency.\n * OUT assertions: the field len is set to the optimal bit length, the\n * array bl_count contains the frequencies for each bit length.\n * The length opt_len is updated; static_len is also updated if stree is\n * not null.\n */\nfunction gen_bitlen(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var max_code = desc.max_code;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var extra = desc.stat_desc.extra_bits;\n var base = desc.stat_desc.extra_base;\n var max_length = desc.stat_desc.max_length;\n var h; /* heap index */\n var n, m; /* iterate over the tree elements */\n var bits; /* bit length */\n var xbits; /* extra bits */\n var f; /* frequency */\n var overflow = 0; /* number of elements with bit length too large */\n\n for (bits = 0; bits <= MAX_BITS; bits++) {\n s.bl_count[bits] = 0;\n }\n\n /* In a first pass, compute the optimal bit lengths (which may\n * overflow in the case of the bit length tree).\n */\n tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */\n\n for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {\n n = s.heap[h];\n bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;\n if (bits > max_length) {\n bits = max_length;\n overflow++;\n }\n tree[n * 2 + 1]/*.Len*/ = bits;\n /* We overwrite tree[n].Dad which is no longer needed */\n\n if (n > max_code) { continue; } /* not a leaf node */\n\n s.bl_count[bits]++;\n xbits = 0;\n if (n >= base) {\n xbits = extra[n - base];\n }\n f = tree[n * 2]/*.Freq*/;\n s.opt_len += f * (bits + xbits);\n if (has_stree) {\n s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);\n }\n }\n if (overflow === 0) { return; }\n\n // Trace((stderr,\"\\nbit length overflow\\n\"));\n /* This happens for example on obj2 and pic of the Calgary corpus */\n\n /* Find the first bit length which could increase: */\n do {\n bits = max_length - 1;\n while (s.bl_count[bits] === 0) { bits--; }\n s.bl_count[bits]--; /* move one leaf down the tree */\n s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */\n s.bl_count[max_length]--;\n /* The brother of the overflow item also moves one step up,\n * but this does not affect bl_count[max_length]\n */\n overflow -= 2;\n } while (overflow > 0);\n\n /* Now recompute all bit lengths, scanning in increasing frequency.\n * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all\n * lengths instead of fixing only the wrong ones. This idea is taken\n * from 'ar' written by Haruhiko Okumura.)\n */\n for (bits = max_length; bits !== 0; bits--) {\n n = s.bl_count[bits];\n while (n !== 0) {\n m = s.heap[--h];\n if (m > max_code) { continue; }\n if (tree[m * 2 + 1]/*.Len*/ !== bits) {\n // Trace((stderr,\"code %d bits %d->%d\\n\", m, tree[m].Len, bits));\n s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;\n tree[m * 2 + 1]/*.Len*/ = bits;\n }\n n--;\n }\n }\n}\n\n\n/* ===========================================================================\n * Generate the codes for a given tree and bit counts (which need not be\n * optimal).\n * IN assertion: the array bl_count contains the bit length statistics for\n * the given tree and the field len is set for all tree elements.\n * OUT assertion: the field code is set for all tree elements of non\n * zero code length.\n */\nfunction gen_codes(tree, max_code, bl_count)\n// ct_data *tree; /* the tree to decorate */\n// int max_code; /* largest code with non zero frequency */\n// ushf *bl_count; /* number of codes at each bit length */\n{\n var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */\n var code = 0; /* running code value */\n var bits; /* bit index */\n var n; /* code index */\n\n /* The distribution counts are first used to generate the code values\n * without bit reversal.\n */\n for (bits = 1; bits <= MAX_BITS; bits++) {\n next_code[bits] = code = (code + bl_count[bits - 1]) << 1;\n }\n /* Check that the bit counts in bl_count are consistent. The last code\n * must be all ones.\n */\n //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */\n length = 0;\n for (code = 0; code < LENGTH_CODES - 1; code++) {\n base_length[code] = length;\n for (n = 0; n < (1 << extra_lbits[code]); n++) {\n _length_code[length++] = code;\n }\n }\n //Assert (length == 256, \"tr_static_init: length != 256\");\n /* Note that the length 255 (match length 258) can be represented\n * in two different ways: code 284 + 5 bits or code 285, so we\n * overwrite length_code[255] to use the best encoding:\n */\n _length_code[length - 1] = code;\n\n /* Initialize the mapping dist (0..32K) -> dist code (0..29) */\n dist = 0;\n for (code = 0; code < 16; code++) {\n base_dist[code] = dist;\n for (n = 0; n < (1 << extra_dbits[code]); n++) {\n _dist_code[dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: dist != 256\");\n dist >>= 7; /* from now on, all distances are divided by 128 */\n for (; code < D_CODES; code++) {\n base_dist[code] = dist << 7;\n for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {\n _dist_code[256 + dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: 256+dist != 512\");\n\n /* Construct the codes of the static literal tree */\n for (bits = 0; bits <= MAX_BITS; bits++) {\n bl_count[bits] = 0;\n }\n\n n = 0;\n while (n <= 143) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n while (n <= 255) {\n static_ltree[n * 2 + 1]/*.Len*/ = 9;\n n++;\n bl_count[9]++;\n }\n while (n <= 279) {\n static_ltree[n * 2 + 1]/*.Len*/ = 7;\n n++;\n bl_count[7]++;\n }\n while (n <= 287) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n /* Codes 286 and 287 do not exist, but we must include them in the\n * tree construction to get a canonical Huffman tree (longest code\n * all ones)\n */\n gen_codes(static_ltree, L_CODES + 1, bl_count);\n\n /* The static distance tree is trivial: */\n for (n = 0; n < D_CODES; n++) {\n static_dtree[n * 2 + 1]/*.Len*/ = 5;\n static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);\n }\n\n // Now data ready and we can init static trees\n static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);\n static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS);\n static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS);\n\n //static_init_done = true;\n}\n\n\n/* ===========================================================================\n * Initialize a new block.\n */\nfunction init_block(s) {\n var n; /* iterates over tree elements */\n\n /* Initialize the trees. */\n for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }\n\n s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;\n s.opt_len = s.static_len = 0;\n s.last_lit = s.matches = 0;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer and align the output on a byte boundary\n */\nfunction bi_windup(s)\n{\n if (s.bi_valid > 8) {\n put_short(s, s.bi_buf);\n } else if (s.bi_valid > 0) {\n //put_byte(s, (Byte)s->bi_buf);\n s.pending_buf[s.pending++] = s.bi_buf;\n }\n s.bi_buf = 0;\n s.bi_valid = 0;\n}\n\n/* ===========================================================================\n * Copy a stored block, storing first the length and its\n * one's complement if requested.\n */\nfunction copy_block(s, buf, len, header)\n//DeflateState *s;\n//charf *buf; /* the input data */\n//unsigned len; /* its length */\n//int header; /* true if block header must be written */\n{\n bi_windup(s); /* align on byte boundary */\n\n if (header) {\n put_short(s, len);\n put_short(s, ~len);\n }\n// while (len--) {\n// put_byte(s, *buf++);\n// }\n utils.arraySet(s.pending_buf, s.window, buf, len, s.pending);\n s.pending += len;\n}\n\n/* ===========================================================================\n * Compares to subtrees, using the tree depth as tie breaker when\n * the subtrees have equal frequency. This minimizes the worst case length.\n */\nfunction smaller(tree, n, m, depth) {\n var _n2 = n * 2;\n var _m2 = m * 2;\n return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||\n (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));\n}\n\n/* ===========================================================================\n * Restore the heap property by moving down the tree starting at node k,\n * exchanging a node with the smallest of its two sons if necessary, stopping\n * when the heap property is re-established (each father smaller than its\n * two sons).\n */\nfunction pqdownheap(s, tree, k)\n// deflate_state *s;\n// ct_data *tree; /* the tree to restore */\n// int k; /* node to move down */\n{\n var v = s.heap[k];\n var j = k << 1; /* left son of k */\n while (j <= s.heap_len) {\n /* Set j to the smallest of the two sons: */\n if (j < s.heap_len &&\n smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {\n j++;\n }\n /* Exit if v is smaller than both sons */\n if (smaller(tree, v, s.heap[j], s.depth)) { break; }\n\n /* Exchange v with the smallest son */\n s.heap[k] = s.heap[j];\n k = j;\n\n /* And continue down the tree, setting j to the left son of k */\n j <<= 1;\n }\n s.heap[k] = v;\n}\n\n\n// inlined manually\n// var SMALLEST = 1;\n\n/* ===========================================================================\n * Send the block data compressed using the given Huffman trees\n */\nfunction compress_block(s, ltree, dtree)\n// deflate_state *s;\n// const ct_data *ltree; /* literal tree */\n// const ct_data *dtree; /* distance tree */\n{\n var dist; /* distance of matched string */\n var lc; /* match length or unmatched char (if dist == 0) */\n var lx = 0; /* running index in l_buf */\n var code; /* the code to send */\n var extra; /* number of extra bits to send */\n\n if (s.last_lit !== 0) {\n do {\n dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]);\n lc = s.pending_buf[s.l_buf + lx];\n lx++;\n\n if (dist === 0) {\n send_code(s, lc, ltree); /* send a literal byte */\n //Tracecv(isgraph(lc), (stderr,\" '%c' \", lc));\n } else {\n /* Here, lc is the match length - MIN_MATCH */\n code = _length_code[lc];\n send_code(s, code + LITERALS + 1, ltree); /* send the length code */\n extra = extra_lbits[code];\n if (extra !== 0) {\n lc -= base_length[code];\n send_bits(s, lc, extra); /* send the extra length bits */\n }\n dist--; /* dist is now the match distance - 1 */\n code = d_code(dist);\n //Assert (code < D_CODES, \"bad d_code\");\n\n send_code(s, code, dtree); /* send the distance code */\n extra = extra_dbits[code];\n if (extra !== 0) {\n dist -= base_dist[code];\n send_bits(s, dist, extra); /* send the extra distance bits */\n }\n } /* literal or match pair ? */\n\n /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */\n //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,\n // \"pendingBuf overflow\");\n\n } while (lx < s.last_lit);\n }\n\n send_code(s, END_BLOCK, ltree);\n}\n\n\n/* ===========================================================================\n * Construct one Huffman tree and assigns the code bit strings and lengths.\n * Update the total bit length for the current block.\n * IN assertion: the field freq is set for all tree elements.\n * OUT assertions: the fields len and code are set to the optimal bit length\n * and corresponding code. The length opt_len is updated; static_len is\n * also updated if stree is not null. The field max_code is set.\n */\nfunction build_tree(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var elems = desc.stat_desc.elems;\n var n, m; /* iterate over heap elements */\n var max_code = -1; /* largest code with non zero frequency */\n var node; /* new node being created */\n\n /* Construct the initial heap, with least frequent element in\n * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].\n * heap[0] is not used.\n */\n s.heap_len = 0;\n s.heap_max = HEAP_SIZE;\n\n for (n = 0; n < elems; n++) {\n if (tree[n * 2]/*.Freq*/ !== 0) {\n s.heap[++s.heap_len] = max_code = n;\n s.depth[n] = 0;\n\n } else {\n tree[n * 2 + 1]/*.Len*/ = 0;\n }\n }\n\n /* The pkzip format requires that at least one distance code exists,\n * and that at least one bit should be sent even if there is only one\n * possible code. So to avoid special checks later on we force at least\n * two codes of non zero frequency.\n */\n while (s.heap_len < 2) {\n node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);\n tree[node * 2]/*.Freq*/ = 1;\n s.depth[node] = 0;\n s.opt_len--;\n\n if (has_stree) {\n s.static_len -= stree[node * 2 + 1]/*.Len*/;\n }\n /* node is 0 or 1 so it does not have extra bits */\n }\n desc.max_code = max_code;\n\n /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,\n * establish sub-heaps of increasing lengths:\n */\n for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }\n\n /* Construct the Huffman tree by repeatedly combining the least two\n * frequent nodes.\n */\n node = elems; /* next internal node of the tree */\n do {\n //pqremove(s, tree, n); /* n = node of least frequency */\n /*** pqremove ***/\n n = s.heap[1/*SMALLEST*/];\n s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];\n pqdownheap(s, tree, 1/*SMALLEST*/);\n /***/\n\n m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */\n\n s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */\n s.heap[--s.heap_max] = m;\n\n /* Create a new node father of n and m */\n tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;\n s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;\n tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;\n\n /* and insert the new node in the heap */\n s.heap[1/*SMALLEST*/] = node++;\n pqdownheap(s, tree, 1/*SMALLEST*/);\n\n } while (s.heap_len >= 2);\n\n s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];\n\n /* At this point, the fields freq and dad are set. We can now\n * generate the bit lengths.\n */\n gen_bitlen(s, desc);\n\n /* The field len is now set, we can generate the bit codes */\n gen_codes(tree, max_code, s.bl_count);\n}\n\n\n/* ===========================================================================\n * Scan a literal or distance tree to determine the frequencies of the codes\n * in the bit length tree.\n */\nfunction scan_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n s.bl_tree[curlen * 2]/*.Freq*/ += count;\n\n } else if (curlen !== 0) {\n\n if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }\n s.bl_tree[REP_3_6 * 2]/*.Freq*/++;\n\n } else if (count <= 10) {\n s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;\n\n } else {\n s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;\n }\n\n count = 0;\n prevlen = curlen;\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Send a literal or distance tree in compressed form, using the codes in\n * bl_tree.\n */\nfunction send_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n /* tree[max_code+1].Len = -1; */ /* guard already set */\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);\n\n } else if (curlen !== 0) {\n if (curlen !== prevlen) {\n send_code(s, curlen, s.bl_tree);\n count--;\n }\n //Assert(count >= 3 && count <= 6, \" 3_6?\");\n send_code(s, REP_3_6, s.bl_tree);\n send_bits(s, count - 3, 2);\n\n } else if (count <= 10) {\n send_code(s, REPZ_3_10, s.bl_tree);\n send_bits(s, count - 3, 3);\n\n } else {\n send_code(s, REPZ_11_138, s.bl_tree);\n send_bits(s, count - 11, 7);\n }\n\n count = 0;\n prevlen = curlen;\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Construct the Huffman tree for the bit lengths and return the index in\n * bl_order of the last bit length code to send.\n */\nfunction build_bl_tree(s) {\n var max_blindex; /* index of last bit length code of non zero freq */\n\n /* Determine the bit length frequencies for literal and distance trees */\n scan_tree(s, s.dyn_ltree, s.l_desc.max_code);\n scan_tree(s, s.dyn_dtree, s.d_desc.max_code);\n\n /* Build the bit length tree: */\n build_tree(s, s.bl_desc);\n /* opt_len now includes the length of the tree representations, except\n * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.\n */\n\n /* Determine the number of bit length codes to send. The pkzip format\n * requires that at least 4 bit length codes be sent. (appnote.txt says\n * 3 but the actual value used is 4.)\n */\n for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {\n if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {\n break;\n }\n }\n /* Update opt_len to include the bit length tree and counts */\n s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;\n //Tracev((stderr, \"\\ndyn trees: dyn %ld, stat %ld\",\n // s->opt_len, s->static_len));\n\n return max_blindex;\n}\n\n\n/* ===========================================================================\n * Send the header for a block using dynamic Huffman trees: the counts, the\n * lengths of the bit length codes, the literal tree and the distance tree.\n * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.\n */\nfunction send_all_trees(s, lcodes, dcodes, blcodes)\n// deflate_state *s;\n// int lcodes, dcodes, blcodes; /* number of codes for each tree */\n{\n var rank; /* index in bl_order */\n\n //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, \"not enough codes\");\n //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,\n // \"too many codes\");\n //Tracev((stderr, \"\\nbl counts: \"));\n send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */\n send_bits(s, dcodes - 1, 5);\n send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */\n for (rank = 0; rank < blcodes; rank++) {\n //Tracev((stderr, \"\\nbl code %2d \", bl_order[rank]));\n send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);\n }\n //Tracev((stderr, \"\\nbl tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */\n //Tracev((stderr, \"\\nlit tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */\n //Tracev((stderr, \"\\ndist tree: sent %ld\", s->bits_sent));\n}\n\n\n/* ===========================================================================\n * Check if the data type is TEXT or BINARY, using the following algorithm:\n * - TEXT if the two conditions below are satisfied:\n * a) There are no non-portable control characters belonging to the\n * \"black list\" (0..6, 14..25, 28..31).\n * b) There is at least one printable character belonging to the\n * \"white list\" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).\n * - BINARY otherwise.\n * - The following partially-portable control characters form a\n * \"gray list\" that is ignored in this detection algorithm:\n * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).\n * IN assertion: the fields Freq of dyn_ltree are set.\n */\nfunction detect_data_type(s) {\n /* black_mask is the bit mask of black-listed bytes\n * set bits 0..6, 14..25, and 28..31\n * 0xf3ffc07f = binary 11110011111111111100000001111111\n */\n var black_mask = 0xf3ffc07f;\n var n;\n\n /* Check for non-textual (\"black-listed\") bytes. */\n for (n = 0; n <= 31; n++, black_mask >>>= 1) {\n if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {\n return Z_BINARY;\n }\n }\n\n /* Check for textual (\"white-listed\") bytes. */\n if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||\n s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n for (n = 32; n < LITERALS; n++) {\n if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n }\n\n /* There are no \"black-listed\" or \"white-listed\" bytes:\n * this stream either is empty or has tolerated (\"gray-listed\") bytes only.\n */\n return Z_BINARY;\n}\n\n\nvar static_init_done = false;\n\n/* ===========================================================================\n * Initialize the tree data structures for a new zlib stream.\n */\nfunction _tr_init(s)\n{\n\n if (!static_init_done) {\n tr_static_init();\n static_init_done = true;\n }\n\n s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);\n s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);\n s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);\n\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n /* Initialize the first block of the first file: */\n init_block(s);\n}\n\n\n/* ===========================================================================\n * Send a stored block\n */\nfunction _tr_stored_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */\n copy_block(s, buf, stored_len, true); /* with header */\n}\n\n\n/* ===========================================================================\n * Send one empty static block to give enough lookahead for inflate.\n * This takes 10 bits, of which 7 may remain in the bit buffer.\n */\nfunction _tr_align(s) {\n send_bits(s, STATIC_TREES << 1, 3);\n send_code(s, END_BLOCK, static_ltree);\n bi_flush(s);\n}\n\n\n/* ===========================================================================\n * Determine the best encoding for the current block: dynamic trees, static\n * trees or store, and output the encoded block to the zip file.\n */\nfunction _tr_flush_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block, or NULL if too old */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n var opt_lenb, static_lenb; /* opt_len and static_len in bytes */\n var max_blindex = 0; /* index of last bit length code of non zero freq */\n\n /* Build the Huffman trees unless a stored block is forced */\n if (s.level > 0) {\n\n /* Check if the file is binary or text */\n if (s.strm.data_type === Z_UNKNOWN) {\n s.strm.data_type = detect_data_type(s);\n }\n\n /* Construct the literal and distance trees */\n build_tree(s, s.l_desc);\n // Tracev((stderr, \"\\nlit data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n\n build_tree(s, s.d_desc);\n // Tracev((stderr, \"\\ndist data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n /* At this point, opt_len and static_len are the total bit lengths of\n * the compressed block data, excluding the tree representations.\n */\n\n /* Build the bit length tree for the above two trees, and get the index\n * in bl_order of the last bit length code to send.\n */\n max_blindex = build_bl_tree(s);\n\n /* Determine the best encoding. Compute the block lengths in bytes. */\n opt_lenb = (s.opt_len + 3 + 7) >>> 3;\n static_lenb = (s.static_len + 3 + 7) >>> 3;\n\n // Tracev((stderr, \"\\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u \",\n // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,\n // s->last_lit));\n\n if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }\n\n } else {\n // Assert(buf != (char*)0, \"lost buf\");\n opt_lenb = static_lenb = stored_len + 5; /* force a stored block */\n }\n\n if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {\n /* 4: two words for the lengths */\n\n /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.\n * Otherwise we can't have processed more than WSIZE input bytes since\n * the last block flush, because compression would have been\n * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to\n * transform a block into a stored block.\n */\n _tr_stored_block(s, buf, stored_len, last);\n\n } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) {\n\n send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);\n compress_block(s, static_ltree, static_dtree);\n\n } else {\n send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);\n send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);\n compress_block(s, s.dyn_ltree, s.dyn_dtree);\n }\n // Assert (s->compressed_len == s->bits_sent, \"bad compressed size\");\n /* The above check is made mod 2^32, for files larger than 512 MB\n * and uLong implemented on 32 bits.\n */\n init_block(s);\n\n if (last) {\n bi_windup(s);\n }\n // Tracev((stderr,\"\\ncomprlen %lu(%lu) \", s->compressed_len>>3,\n // s->compressed_len-7*last));\n}\n\n/* ===========================================================================\n * Save the match info and tally the frequency counts. Return true if\n * the current block must be flushed.\n */\nfunction _tr_tally(s, dist, lc)\n// deflate_state *s;\n// unsigned dist; /* distance of matched string */\n// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */\n{\n //var out_length, in_length, dcode;\n\n s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff;\n s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff;\n\n s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff;\n s.last_lit++;\n\n if (dist === 0) {\n /* lc is the unmatched char */\n s.dyn_ltree[lc * 2]/*.Freq*/++;\n } else {\n s.matches++;\n /* Here, lc is the match length - MIN_MATCH */\n dist--; /* dist = match distance - 1 */\n //Assert((ush)dist < (ush)MAX_DIST(s) &&\n // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&\n // (ush)d_code(dist) < (ush)D_CODES, \"_tr_tally: bad match\");\n\n s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++;\n s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;\n }\n\n// (!) This block is disabled in zlib defailts,\n// don't enable it for binary compatibility\n\n//#ifdef TRUNCATE_BLOCK\n// /* Try to guess if it is profitable to stop the current block here */\n// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {\n// /* Compute an upper bound for the compressed length */\n// out_length = s.last_lit*8;\n// in_length = s.strstart - s.block_start;\n//\n// for (dcode = 0; dcode < D_CODES; dcode++) {\n// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);\n// }\n// out_length >>>= 3;\n// //Tracev((stderr,\"\\nlast_lit %u, in %ld, out ~%ld(%ld%%) \",\n// // s->last_lit, in_length, out_length,\n// // 100L - out_length*100L/in_length));\n// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) {\n// return true;\n// }\n// }\n//#endif\n\n return (s.last_lit === s.lit_bufsize - 1);\n /* We avoid equality with lit_bufsize because of wraparound at 64K\n * on 16 bit machines and because stored blocks are restricted to\n * 64K-1 bytes.\n */\n}\n\nexports._tr_init = _tr_init;\nexports._tr_stored_block = _tr_stored_block;\nexports._tr_flush_block = _tr_flush_block;\nexports._tr_tally = _tr_tally;\nexports._tr_align = _tr_align;\n","'use strict';\n\n// Note: adler32 takes 12% for level 0 and 2% for level 6.\n// It doesn't worth to make additional optimizationa as in original.\n// Small size is preferable.\n\nfunction adler32(adler, buf, len, pos) {\n var s1 = (adler & 0xffff) |0,\n s2 = ((adler >>> 16) & 0xffff) |0,\n n = 0;\n\n while (len !== 0) {\n // Set limit ~ twice less than 5552, to keep\n // s2 in 31-bits, because we force signed ints.\n // in other case %= will fail.\n n = len > 2000 ? 2000 : len;\n len -= n;\n\n do {\n s1 = (s1 + buf[pos++]) |0;\n s2 = (s2 + s1) |0;\n } while (--n);\n\n s1 %= 65521;\n s2 %= 65521;\n }\n\n return (s1 | (s2 << 16)) |0;\n}\n\n\nmodule.exports = adler32;\n","'use strict';\n\n// Note: we can't get significant speed boost here.\n// So write code to minimize size - no pregenerated tables\n// and array tools dependencies.\n\n\n// Use ordinary array, since untyped makes no boost here\nfunction makeTable() {\n var c, table = [];\n\n for (var n = 0; n < 256; n++) {\n c = n;\n for (var k = 0; k < 8; k++) {\n c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));\n }\n table[n] = c;\n }\n\n return table;\n}\n\n// Create table on load. Just 255 signed longs. Not a problem.\nvar crcTable = makeTable();\n\n\nfunction crc32(crc, buf, len, pos) {\n var t = crcTable,\n end = pos + len;\n\n crc ^= -1;\n\n for (var i = pos; i < end; i++) {\n crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];\n }\n\n return (crc ^ (-1)); // >>> 0;\n}\n\n\nmodule.exports = crc32;\n","'use strict';\n\nmodule.exports = {\n 2: 'need dictionary', /* Z_NEED_DICT 2 */\n 1: 'stream end', /* Z_STREAM_END 1 */\n 0: '', /* Z_OK 0 */\n '-1': 'file error', /* Z_ERRNO (-1) */\n '-2': 'stream error', /* Z_STREAM_ERROR (-2) */\n '-3': 'data error', /* Z_DATA_ERROR (-3) */\n '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */\n '-5': 'buffer error', /* Z_BUF_ERROR (-5) */\n '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */\n};\n","'use strict';\n\nvar utils = require('../utils/common');\nvar trees = require('./trees');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar msg = require('./messages');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\nvar Z_NO_FLUSH = 0;\nvar Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\nvar Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\n//var Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\n//var Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\n//var Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n\n/* compression levels */\n//var Z_NO_COMPRESSION = 0;\n//var Z_BEST_SPEED = 1;\n//var Z_BEST_COMPRESSION = 9;\nvar Z_DEFAULT_COMPRESSION = -1;\n\n\nvar Z_FILTERED = 1;\nvar Z_HUFFMAN_ONLY = 2;\nvar Z_RLE = 3;\nvar Z_FIXED = 4;\nvar Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\n//var Z_BINARY = 0;\n//var Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n/*============================================================================*/\n\n\nvar MAX_MEM_LEVEL = 9;\n/* Maximum value for memLevel in deflateInit2 */\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_MEM_LEVEL = 8;\n\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\nvar D_CODES = 30;\n/* number of distance codes */\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\nvar MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);\n\nvar PRESET_DICT = 0x20;\n\nvar INIT_STATE = 42;\nvar EXTRA_STATE = 69;\nvar NAME_STATE = 73;\nvar COMMENT_STATE = 91;\nvar HCRC_STATE = 103;\nvar BUSY_STATE = 113;\nvar FINISH_STATE = 666;\n\nvar BS_NEED_MORE = 1; /* block not completed, need more input or more output */\nvar BS_BLOCK_DONE = 2; /* block flush performed */\nvar BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */\nvar BS_FINISH_DONE = 4; /* finish done, accept no more input or output */\n\nvar OS_CODE = 0x03; // Unix :) . Don't detect, use this default.\n\nfunction err(strm, errorCode) {\n strm.msg = msg[errorCode];\n return errorCode;\n}\n\nfunction rank(f) {\n return ((f) << 1) - ((f) > 4 ? 9 : 0);\n}\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n\n/* =========================================================================\n * Flush as much pending output as possible. All deflate() output goes\n * through this function so some applications may wish to modify it\n * to avoid allocating a large strm->output buffer and copying into it.\n * (See also read_buf()).\n */\nfunction flush_pending(strm) {\n var s = strm.state;\n\n //_tr_flush_bits(s);\n var len = s.pending;\n if (len > strm.avail_out) {\n len = strm.avail_out;\n }\n if (len === 0) { return; }\n\n utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out);\n strm.next_out += len;\n s.pending_out += len;\n strm.total_out += len;\n strm.avail_out -= len;\n s.pending -= len;\n if (s.pending === 0) {\n s.pending_out = 0;\n }\n}\n\n\nfunction flush_block_only(s, last) {\n trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);\n s.block_start = s.strstart;\n flush_pending(s.strm);\n}\n\n\nfunction put_byte(s, b) {\n s.pending_buf[s.pending++] = b;\n}\n\n\n/* =========================================================================\n * Put a short in the pending buffer. The 16-bit value is put in MSB order.\n * IN assertion: the stream state is correct and there is enough room in\n * pending_buf.\n */\nfunction putShortMSB(s, b) {\n// put_byte(s, (Byte)(b >> 8));\n// put_byte(s, (Byte)(b & 0xff));\n s.pending_buf[s.pending++] = (b >>> 8) & 0xff;\n s.pending_buf[s.pending++] = b & 0xff;\n}\n\n\n/* ===========================================================================\n * Read a new buffer from the current input stream, update the adler32\n * and total number of bytes read. All deflate() input goes through\n * this function so some applications may wish to modify it to avoid\n * allocating a large strm->input buffer and copying from it.\n * (See also flush_pending()).\n */\nfunction read_buf(strm, buf, start, size) {\n var len = strm.avail_in;\n\n if (len > size) { len = size; }\n if (len === 0) { return 0; }\n\n strm.avail_in -= len;\n\n // zmemcpy(buf, strm->next_in, len);\n utils.arraySet(buf, strm.input, strm.next_in, len, start);\n if (strm.state.wrap === 1) {\n strm.adler = adler32(strm.adler, buf, len, start);\n }\n\n else if (strm.state.wrap === 2) {\n strm.adler = crc32(strm.adler, buf, len, start);\n }\n\n strm.next_in += len;\n strm.total_in += len;\n\n return len;\n}\n\n\n/* ===========================================================================\n * Set match_start to the longest match starting at the given string and\n * return its length. Matches shorter or equal to prev_length are discarded,\n * in which case the result is equal to prev_length and match_start is\n * garbage.\n * IN assertions: cur_match is the head of the hash chain for the current\n * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1\n * OUT assertion: the match length is not greater than s->lookahead.\n */\nfunction longest_match(s, cur_match) {\n var chain_length = s.max_chain_length; /* max hash chain length */\n var scan = s.strstart; /* current string */\n var match; /* matched string */\n var len; /* length of current match */\n var best_len = s.prev_length; /* best match length so far */\n var nice_match = s.nice_match; /* stop if match long enough */\n var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?\n s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;\n\n var _win = s.window; // shortcut\n\n var wmask = s.w_mask;\n var prev = s.prev;\n\n /* Stop when cur_match becomes <= limit. To simplify the code,\n * we prevent matches with the string of window index 0.\n */\n\n var strend = s.strstart + MAX_MATCH;\n var scan_end1 = _win[scan + best_len - 1];\n var scan_end = _win[scan + best_len];\n\n /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.\n * It is easy to get rid of this optimization if necessary.\n */\n // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, \"Code too clever\");\n\n /* Do not waste too much time if we already have a good match: */\n if (s.prev_length >= s.good_match) {\n chain_length >>= 2;\n }\n /* Do not look for matches beyond the end of the input. This is necessary\n * to make deflate deterministic.\n */\n if (nice_match > s.lookahead) { nice_match = s.lookahead; }\n\n // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, \"need lookahead\");\n\n do {\n // Assert(cur_match < s->strstart, \"no future\");\n match = cur_match;\n\n /* Skip to next match if the match length cannot increase\n * or if the match length is less than 2. Note that the checks below\n * for insufficient lookahead only occur occasionally for performance\n * reasons. Therefore uninitialized memory will be accessed, and\n * conditional jumps will be made that depend on those values.\n * However the length of the match is limited to the lookahead, so\n * the output of deflate is not affected by the uninitialized values.\n */\n\n if (_win[match + best_len] !== scan_end ||\n _win[match + best_len - 1] !== scan_end1 ||\n _win[match] !== _win[scan] ||\n _win[++match] !== _win[scan + 1]) {\n continue;\n }\n\n /* The check at best_len-1 can be removed because it will be made\n * again later. (This heuristic is not always a win.)\n * It is not necessary to compare scan[2] and match[2] since they\n * are always equal when the other bytes match, given that\n * the hash keys are equal and that HASH_BITS >= 8.\n */\n scan += 2;\n match++;\n // Assert(*scan == *match, \"match[2]?\");\n\n /* We check for insufficient lookahead only every 8th comparison;\n * the 256th check will be made at strstart+258.\n */\n do {\n /*jshint noempty:false*/\n } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n scan < strend);\n\n // Assert(scan <= s->window+(unsigned)(s->window_size-1), \"wild scan\");\n\n len = MAX_MATCH - (strend - scan);\n scan = strend - MAX_MATCH;\n\n if (len > best_len) {\n s.match_start = cur_match;\n best_len = len;\n if (len >= nice_match) {\n break;\n }\n scan_end1 = _win[scan + best_len - 1];\n scan_end = _win[scan + best_len];\n }\n } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);\n\n if (best_len <= s.lookahead) {\n return best_len;\n }\n return s.lookahead;\n}\n\n\n/* ===========================================================================\n * Fill the window when the lookahead becomes insufficient.\n * Updates strstart and lookahead.\n *\n * IN assertion: lookahead < MIN_LOOKAHEAD\n * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD\n * At least one byte has been read, or avail_in == 0; reads are\n * performed for at least two bytes (required for the zip translate_eol\n * option -- not supported here).\n */\nfunction fill_window(s) {\n var _w_size = s.w_size;\n var p, n, m, more, str;\n\n //Assert(s->lookahead < MIN_LOOKAHEAD, \"already enough lookahead\");\n\n do {\n more = s.window_size - s.lookahead - s.strstart;\n\n // JS ints have 32 bit, block below not needed\n /* Deal with !@#$% 64K limit: */\n //if (sizeof(int) <= 2) {\n // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {\n // more = wsize;\n //\n // } else if (more == (unsigned)(-1)) {\n // /* Very unlikely, but possible on 16 bit machine if\n // * strstart == 0 && lookahead == 1 (input done a byte at time)\n // */\n // more--;\n // }\n //}\n\n\n /* If the window is almost full and there is insufficient lookahead,\n * move the upper half to the lower one to make room in the upper half.\n */\n if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {\n\n utils.arraySet(s.window, s.window, _w_size, _w_size, 0);\n s.match_start -= _w_size;\n s.strstart -= _w_size;\n /* we now have strstart >= MAX_DIST */\n s.block_start -= _w_size;\n\n /* Slide the hash table (could be avoided with 32 bit values\n at the expense of memory usage). We slide even when level == 0\n to keep the hash table consistent if we switch back to level > 0\n later. (Using level 0 permanently is not an optimal usage of\n zlib, so we don't care about this pathological case.)\n */\n\n n = s.hash_size;\n p = n;\n do {\n m = s.head[--p];\n s.head[p] = (m >= _w_size ? m - _w_size : 0);\n } while (--n);\n\n n = _w_size;\n p = n;\n do {\n m = s.prev[--p];\n s.prev[p] = (m >= _w_size ? m - _w_size : 0);\n /* If n is not on any hash chain, prev[n] is garbage but\n * its value will never be used.\n */\n } while (--n);\n\n more += _w_size;\n }\n if (s.strm.avail_in === 0) {\n break;\n }\n\n /* If there was no sliding:\n * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&\n * more == window_size - lookahead - strstart\n * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)\n * => more >= window_size - 2*WSIZE + 2\n * In the BIG_MEM or MMAP case (not yet supported),\n * window_size == input_size + MIN_LOOKAHEAD &&\n * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.\n * Otherwise, window_size == 2*WSIZE so more >= 2.\n * If there was sliding, more >= WSIZE. So in all cases, more >= 2.\n */\n //Assert(more >= 2, \"more < 2\");\n n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);\n s.lookahead += n;\n\n /* Initialize the hash value now that we have some input: */\n if (s.lookahead + s.insert >= MIN_MATCH) {\n str = s.strstart - s.insert;\n s.ins_h = s.window[str];\n\n /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask;\n//#if MIN_MATCH != 3\n// Call update_hash() MIN_MATCH-3 more times\n//#endif\n while (s.insert) {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = str;\n str++;\n s.insert--;\n if (s.lookahead + s.insert < MIN_MATCH) {\n break;\n }\n }\n }\n /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,\n * but this is not important since only literal bytes will be emitted.\n */\n\n } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);\n\n /* If the WIN_INIT bytes after the end of the current data have never been\n * written, then zero those bytes in order to avoid memory check reports of\n * the use of uninitialized (or uninitialised as Julian writes) bytes by\n * the longest match routines. Update the high water mark for the next\n * time through here. WIN_INIT is set to MAX_MATCH since the longest match\n * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.\n */\n// if (s.high_water < s.window_size) {\n// var curr = s.strstart + s.lookahead;\n// var init = 0;\n//\n// if (s.high_water < curr) {\n// /* Previous high water mark below current data -- zero WIN_INIT\n// * bytes or up to end of window, whichever is less.\n// */\n// init = s.window_size - curr;\n// if (init > WIN_INIT)\n// init = WIN_INIT;\n// zmemzero(s->window + curr, (unsigned)init);\n// s->high_water = curr + init;\n// }\n// else if (s->high_water < (ulg)curr + WIN_INIT) {\n// /* High water mark at or above current data, but below current data\n// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up\n// * to end of window, whichever is less.\n// */\n// init = (ulg)curr + WIN_INIT - s->high_water;\n// if (init > s->window_size - s->high_water)\n// init = s->window_size - s->high_water;\n// zmemzero(s->window + s->high_water, (unsigned)init);\n// s->high_water += init;\n// }\n// }\n//\n// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,\n// \"not enough room for search\");\n}\n\n/* ===========================================================================\n * Copy without compression as much as possible from the input stream, return\n * the current block state.\n * This function does not insert new strings in the dictionary since\n * uncompressible data is probably not useful. This function is used\n * only for the level=0 compression option.\n * NOTE: this function should be optimized to avoid extra copying from\n * window to pending_buf.\n */\nfunction deflate_stored(s, flush) {\n /* Stored blocks are limited to 0xffff bytes, pending_buf is limited\n * to pending_buf_size, and each stored block has a 5 byte header:\n */\n var max_block_size = 0xffff;\n\n if (max_block_size > s.pending_buf_size - 5) {\n max_block_size = s.pending_buf_size - 5;\n }\n\n /* Copy as much as possible from input to output: */\n for (;;) {\n /* Fill the window as much as possible: */\n if (s.lookahead <= 1) {\n\n //Assert(s->strstart < s->w_size+MAX_DIST(s) ||\n // s->block_start >= (long)s->w_size, \"slide too late\");\n// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) ||\n// s.block_start >= s.w_size)) {\n// throw new Error(\"slide too late\");\n// }\n\n fill_window(s);\n if (s.lookahead === 0 && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n\n if (s.lookahead === 0) {\n break;\n }\n /* flush the current block */\n }\n //Assert(s->block_start >= 0L, \"block gone\");\n// if (s.block_start < 0) throw new Error(\"block gone\");\n\n s.strstart += s.lookahead;\n s.lookahead = 0;\n\n /* Emit a stored block if pending_buf will be full: */\n var max_start = s.block_start + max_block_size;\n\n if (s.strstart === 0 || s.strstart >= max_start) {\n /* strstart == 0 is possible when wraparound on 16-bit machine */\n s.lookahead = s.strstart - max_start;\n s.strstart = max_start;\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n\n\n }\n /* Flush if we may have to slide, otherwise block_start may become\n * negative and the data will be gone:\n */\n if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n\n s.insert = 0;\n\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n\n if (s.strstart > s.block_start) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_NEED_MORE;\n}\n\n/* ===========================================================================\n * Compress as much as possible from the input stream, return the current\n * block state.\n * This function does not perform lazy evaluation of matches and inserts\n * new strings in the dictionary only for unmatched strings or for short\n * matches. It is used only for the fast compression options.\n */\nfunction deflate_fast(s, flush) {\n var hash_head; /* head of the hash chain */\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) {\n break; /* flush the current block */\n }\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n * At this point we have always match_length < MIN_MATCH\n */\n if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n }\n if (s.match_length >= MIN_MATCH) {\n // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only\n\n /*** _tr_tally_dist(s, s.strstart - s.match_start,\n s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n\n /* Insert new strings in the hash table only if the match length\n * is not too large. This saves time but degrades compression.\n */\n if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) {\n s.match_length--; /* string at strstart already in table */\n do {\n s.strstart++;\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n /* strstart never exceeds WSIZE-MAX_MATCH, so there are\n * always MIN_MATCH bytes ahead.\n */\n } while (--s.match_length !== 0);\n s.strstart++;\n } else\n {\n s.strstart += s.match_length;\n s.match_length = 0;\n s.ins_h = s.window[s.strstart];\n /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask;\n\n//#if MIN_MATCH != 3\n// Call UPDATE_HASH() MIN_MATCH-3 more times\n//#endif\n /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not\n * matter since it will be recomputed at next deflate call.\n */\n }\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s.window[s.strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * Same as above, but achieves better compression. We use a lazy\n * evaluation for matches: a match is finally adopted only if there is\n * no better match at the next window position.\n */\nfunction deflate_slow(s, flush) {\n var hash_head; /* head of hash chain */\n var bflush; /* set if current block must be flushed */\n\n var max_insert;\n\n /* Process the input block. */\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n */\n s.prev_length = s.match_length;\n s.prev_match = s.match_start;\n s.match_length = MIN_MATCH - 1;\n\n if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&\n s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n\n if (s.match_length <= 5 &&\n (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {\n\n /* If prev_match is also MIN_MATCH, match_start is garbage\n * but we will ignore the current match anyway.\n */\n s.match_length = MIN_MATCH - 1;\n }\n }\n /* If there was a match at the previous step and the current\n * match is not better, output the previous match:\n */\n if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) {\n max_insert = s.strstart + s.lookahead - MIN_MATCH;\n /* Do not insert strings in hash table beyond this. */\n\n //check_match(s, s.strstart-1, s.prev_match, s.prev_length);\n\n /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,\n s.prev_length - MIN_MATCH, bflush);***/\n bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH);\n /* Insert in hash table all strings up to the end of the match.\n * strstart-1 and strstart are already inserted. If there is not\n * enough lookahead, the last two strings are not inserted in\n * the hash table.\n */\n s.lookahead -= s.prev_length - 1;\n s.prev_length -= 2;\n do {\n if (++s.strstart <= max_insert) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n } while (--s.prev_length !== 0);\n s.match_available = 0;\n s.match_length = MIN_MATCH - 1;\n s.strstart++;\n\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n } else if (s.match_available) {\n /* If there was no match at the previous position, output a\n * single literal. If there was a match but the current match\n * is longer, truncate the previous match to a single literal.\n */\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n if (bflush) {\n /*** FLUSH_BLOCK_ONLY(s, 0) ***/\n flush_block_only(s, false);\n /***/\n }\n s.strstart++;\n s.lookahead--;\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n } else {\n /* There is no previous match to compare with, wait for\n * the next step to decide.\n */\n s.match_available = 1;\n s.strstart++;\n s.lookahead--;\n }\n }\n //Assert (flush != Z_NO_FLUSH, \"no flush?\");\n if (s.match_available) {\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n s.match_available = 0;\n }\n s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_BLOCK_DONE;\n}\n\n\n/* ===========================================================================\n * For Z_RLE, simply look for runs of bytes, generate matches only of distance\n * one. Do not maintain a hash table. (It will be regenerated if this run of\n * deflate switches away from Z_RLE.)\n */\nfunction deflate_rle(s, flush) {\n var bflush; /* set if current block must be flushed */\n var prev; /* byte at distance one to match */\n var scan, strend; /* scan goes up to strend for length of run */\n\n var _win = s.window;\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the longest run, plus one for the unrolled loop.\n */\n if (s.lookahead <= MAX_MATCH) {\n fill_window(s);\n if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* See how many times the previous byte repeats */\n s.match_length = 0;\n if (s.lookahead >= MIN_MATCH && s.strstart > 0) {\n scan = s.strstart - 1;\n prev = _win[scan];\n if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {\n strend = s.strstart + MAX_MATCH;\n do {\n /*jshint noempty:false*/\n } while (prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n scan < strend);\n s.match_length = MAX_MATCH - (strend - scan);\n if (s.match_length > s.lookahead) {\n s.match_length = s.lookahead;\n }\n }\n //Assert(scan <= s->window+(uInt)(s->window_size-1), \"wild scan\");\n }\n\n /* Emit match if have run of MIN_MATCH or longer, else emit literal */\n if (s.match_length >= MIN_MATCH) {\n //check_match(s, s.strstart, s.strstart - 1, s.match_length);\n\n /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n s.strstart += s.match_length;\n s.match_length = 0;\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.\n * (It will be regenerated if this run of deflate switches away from Huffman.)\n */\nfunction deflate_huff(s, flush) {\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we have a literal to write. */\n if (s.lookahead === 0) {\n fill_window(s);\n if (s.lookahead === 0) {\n if (flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n break; /* flush the current block */\n }\n }\n\n /* Output a literal byte */\n s.match_length = 0;\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n s.lookahead--;\n s.strstart++;\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* Values for max_lazy_match, good_match and max_chain_length, depending on\n * the desired pack level (0..9). The values given below have been tuned to\n * exclude worst case performance for pathological files. Better values may be\n * found for specific files.\n */\nfunction Config(good_length, max_lazy, nice_length, max_chain, func) {\n this.good_length = good_length;\n this.max_lazy = max_lazy;\n this.nice_length = nice_length;\n this.max_chain = max_chain;\n this.func = func;\n}\n\nvar configuration_table;\n\nconfiguration_table = [\n /* good lazy nice chain */\n new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */\n new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */\n new Config(4, 5, 16, 8, deflate_fast), /* 2 */\n new Config(4, 6, 32, 32, deflate_fast), /* 3 */\n\n new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */\n new Config(8, 16, 32, 32, deflate_slow), /* 5 */\n new Config(8, 16, 128, 128, deflate_slow), /* 6 */\n new Config(8, 32, 128, 256, deflate_slow), /* 7 */\n new Config(32, 128, 258, 1024, deflate_slow), /* 8 */\n new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */\n];\n\n\n/* ===========================================================================\n * Initialize the \"longest match\" routines for a new zlib stream\n */\nfunction lm_init(s) {\n s.window_size = 2 * s.w_size;\n\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n\n /* Set the default configuration parameters:\n */\n s.max_lazy_match = configuration_table[s.level].max_lazy;\n s.good_match = configuration_table[s.level].good_length;\n s.nice_match = configuration_table[s.level].nice_length;\n s.max_chain_length = configuration_table[s.level].max_chain;\n\n s.strstart = 0;\n s.block_start = 0;\n s.lookahead = 0;\n s.insert = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n s.ins_h = 0;\n}\n\n\nfunction DeflateState() {\n this.strm = null; /* pointer back to this zlib stream */\n this.status = 0; /* as the name implies */\n this.pending_buf = null; /* output still pending */\n this.pending_buf_size = 0; /* size of pending_buf */\n this.pending_out = 0; /* next pending byte to output to the stream */\n this.pending = 0; /* nb of bytes in the pending buffer */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.gzhead = null; /* gzip header information to write */\n this.gzindex = 0; /* where in extra, name, or comment */\n this.method = Z_DEFLATED; /* can only be DEFLATED */\n this.last_flush = -1; /* value of flush param for previous deflate call */\n\n this.w_size = 0; /* LZ77 window size (32K by default) */\n this.w_bits = 0; /* log2(w_size) (8..16) */\n this.w_mask = 0; /* w_size - 1 */\n\n this.window = null;\n /* Sliding window. Input bytes are read into the second half of the window,\n * and move to the first half later to keep a dictionary of at least wSize\n * bytes. With this organization, matches are limited to a distance of\n * wSize-MAX_MATCH bytes, but this ensures that IO is always\n * performed with a length multiple of the block size.\n */\n\n this.window_size = 0;\n /* Actual size of window: 2*wSize, except when the user input buffer\n * is directly used as sliding window.\n */\n\n this.prev = null;\n /* Link to older string with same hash index. To limit the size of this\n * array to 64K, this link is maintained only for the last 32K strings.\n * An index in this array is thus a window index modulo 32K.\n */\n\n this.head = null; /* Heads of the hash chains or NIL. */\n\n this.ins_h = 0; /* hash index of string to be inserted */\n this.hash_size = 0; /* number of elements in hash table */\n this.hash_bits = 0; /* log2(hash_size) */\n this.hash_mask = 0; /* hash_size-1 */\n\n this.hash_shift = 0;\n /* Number of bits by which ins_h must be shifted at each input\n * step. It must be such that after MIN_MATCH steps, the oldest\n * byte no longer takes part in the hash key, that is:\n * hash_shift * MIN_MATCH >= hash_bits\n */\n\n this.block_start = 0;\n /* Window position at the beginning of the current output block. Gets\n * negative when the window is moved backwards.\n */\n\n this.match_length = 0; /* length of best match */\n this.prev_match = 0; /* previous match */\n this.match_available = 0; /* set if previous match exists */\n this.strstart = 0; /* start of string to insert */\n this.match_start = 0; /* start of matching string */\n this.lookahead = 0; /* number of valid bytes ahead in window */\n\n this.prev_length = 0;\n /* Length of the best match at previous step. Matches not greater than this\n * are discarded. This is used in the lazy match evaluation.\n */\n\n this.max_chain_length = 0;\n /* To speed up deflation, hash chains are never searched beyond this\n * length. A higher limit improves compression ratio but degrades the\n * speed.\n */\n\n this.max_lazy_match = 0;\n /* Attempt to find a better match only when the current match is strictly\n * smaller than this value. This mechanism is used only for compression\n * levels >= 4.\n */\n // That's alias to max_lazy_match, don't use directly\n //this.max_insert_length = 0;\n /* Insert new strings in the hash table only if the match length is not\n * greater than this length. This saves time but degrades compression.\n * max_insert_length is used only for compression levels <= 3.\n */\n\n this.level = 0; /* compression level (1..9) */\n this.strategy = 0; /* favor or force Huffman coding*/\n\n this.good_match = 0;\n /* Use a faster search when the previous match is longer than this */\n\n this.nice_match = 0; /* Stop searching when current match exceeds this */\n\n /* used by trees.c: */\n\n /* Didn't use ct_data typedef below to suppress compiler warning */\n\n // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */\n // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */\n // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */\n\n // Use flat array of DOUBLE size, with interleaved fata,\n // because JS does not support effective\n this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2);\n this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2);\n this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2);\n zero(this.dyn_ltree);\n zero(this.dyn_dtree);\n zero(this.bl_tree);\n\n this.l_desc = null; /* desc. for literal tree */\n this.d_desc = null; /* desc. for distance tree */\n this.bl_desc = null; /* desc. for bit length tree */\n\n //ush bl_count[MAX_BITS+1];\n this.bl_count = new utils.Buf16(MAX_BITS + 1);\n /* number of codes at each bit length for an optimal tree */\n\n //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */\n this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */\n zero(this.heap);\n\n this.heap_len = 0; /* number of elements in the heap */\n this.heap_max = 0; /* element of largest frequency */\n /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.\n * The same heap array is used to build all trees.\n */\n\n this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1];\n zero(this.depth);\n /* Depth of each subtree used as tie breaker for trees of equal frequency\n */\n\n this.l_buf = 0; /* buffer index for literals or lengths */\n\n this.lit_bufsize = 0;\n /* Size of match buffer for literals/lengths. There are 4 reasons for\n * limiting lit_bufsize to 64K:\n * - frequencies can be kept in 16 bit counters\n * - if compression is not successful for the first block, all input\n * data is still in the window so we can still emit a stored block even\n * when input comes from standard input. (This can also be done for\n * all blocks if lit_bufsize is not greater than 32K.)\n * - if compression is not successful for a file smaller than 64K, we can\n * even emit a stored file instead of a stored block (saving 5 bytes).\n * This is applicable only for zip (not gzip or zlib).\n * - creating new Huffman trees less frequently may not provide fast\n * adaptation to changes in the input data statistics. (Take for\n * example a binary file with poorly compressible code followed by\n * a highly compressible string table.) Smaller buffer sizes give\n * fast adaptation but have of course the overhead of transmitting\n * trees more frequently.\n * - I can't count above 4\n */\n\n this.last_lit = 0; /* running index in l_buf */\n\n this.d_buf = 0;\n /* Buffer index for distances. To simplify the code, d_buf and l_buf have\n * the same number of elements. To use different lengths, an extra flag\n * array would be necessary.\n */\n\n this.opt_len = 0; /* bit length of current block with optimal trees */\n this.static_len = 0; /* bit length of current block with static trees */\n this.matches = 0; /* number of string matches in current block */\n this.insert = 0; /* bytes at end of window left to insert */\n\n\n this.bi_buf = 0;\n /* Output buffer. bits are inserted starting at the bottom (least\n * significant bits).\n */\n this.bi_valid = 0;\n /* Number of valid bits in bi_buf. All bits above the last valid bit\n * are always zero.\n */\n\n // Used for window memory init. We safely ignore it for JS. That makes\n // sense only for pointers and memory check tools.\n //this.high_water = 0;\n /* High water mark offset in window for initialized bytes -- bytes above\n * this are set to zero in order to avoid memory check warnings when\n * longest match routines access bytes past the input. This is then\n * updated to the new high water mark.\n */\n}\n\n\nfunction deflateResetKeep(strm) {\n var s;\n\n if (!strm || !strm.state) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.total_in = strm.total_out = 0;\n strm.data_type = Z_UNKNOWN;\n\n s = strm.state;\n s.pending = 0;\n s.pending_out = 0;\n\n if (s.wrap < 0) {\n s.wrap = -s.wrap;\n /* was made negative by deflate(..., Z_FINISH); */\n }\n s.status = (s.wrap ? INIT_STATE : BUSY_STATE);\n strm.adler = (s.wrap === 2) ?\n 0 // crc32(0, Z_NULL, 0)\n :\n 1; // adler32(0, Z_NULL, 0)\n s.last_flush = Z_NO_FLUSH;\n trees._tr_init(s);\n return Z_OK;\n}\n\n\nfunction deflateReset(strm) {\n var ret = deflateResetKeep(strm);\n if (ret === Z_OK) {\n lm_init(strm.state);\n }\n return ret;\n}\n\n\nfunction deflateSetHeader(strm, head) {\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; }\n strm.state.gzhead = head;\n return Z_OK;\n}\n\n\nfunction deflateInit2(strm, level, method, windowBits, memLevel, strategy) {\n if (!strm) { // === Z_NULL\n return Z_STREAM_ERROR;\n }\n var wrap = 1;\n\n if (level === Z_DEFAULT_COMPRESSION) {\n level = 6;\n }\n\n if (windowBits < 0) { /* suppress zlib wrapper */\n wrap = 0;\n windowBits = -windowBits;\n }\n\n else if (windowBits > 15) {\n wrap = 2; /* write gzip wrapper instead */\n windowBits -= 16;\n }\n\n\n if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED ||\n windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||\n strategy < 0 || strategy > Z_FIXED) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n\n if (windowBits === 8) {\n windowBits = 9;\n }\n /* until 256-byte window bug fixed */\n\n var s = new DeflateState();\n\n strm.state = s;\n s.strm = strm;\n\n s.wrap = wrap;\n s.gzhead = null;\n s.w_bits = windowBits;\n s.w_size = 1 << s.w_bits;\n s.w_mask = s.w_size - 1;\n\n s.hash_bits = memLevel + 7;\n s.hash_size = 1 << s.hash_bits;\n s.hash_mask = s.hash_size - 1;\n s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);\n\n s.window = new utils.Buf8(s.w_size * 2);\n s.head = new utils.Buf16(s.hash_size);\n s.prev = new utils.Buf16(s.w_size);\n\n // Don't need mem init magic for JS.\n //s.high_water = 0; /* nothing written to s->window yet */\n\n s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */\n\n s.pending_buf_size = s.lit_bufsize * 4;\n\n //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);\n //s->pending_buf = (uchf *) overlay;\n s.pending_buf = new utils.Buf8(s.pending_buf_size);\n\n // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)\n //s->d_buf = overlay + s->lit_bufsize/sizeof(ush);\n s.d_buf = 1 * s.lit_bufsize;\n\n //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;\n s.l_buf = (1 + 2) * s.lit_bufsize;\n\n s.level = level;\n s.strategy = strategy;\n s.method = method;\n\n return deflateReset(strm);\n}\n\nfunction deflateInit(strm, level) {\n return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);\n}\n\n\nfunction deflate(strm, flush) {\n var old_flush, s;\n var beg, val; // for gzip header write only\n\n if (!strm || !strm.state ||\n flush > Z_BLOCK || flush < 0) {\n return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;\n }\n\n s = strm.state;\n\n if (!strm.output ||\n (!strm.input && strm.avail_in !== 0) ||\n (s.status === FINISH_STATE && flush !== Z_FINISH)) {\n return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);\n }\n\n s.strm = strm; /* just in case */\n old_flush = s.last_flush;\n s.last_flush = flush;\n\n /* Write the header */\n if (s.status === INIT_STATE) {\n\n if (s.wrap === 2) { // GZIP header\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n put_byte(s, 31);\n put_byte(s, 139);\n put_byte(s, 8);\n if (!s.gzhead) { // s->gzhead == Z_NULL\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, OS_CODE);\n s.status = BUSY_STATE;\n }\n else {\n put_byte(s, (s.gzhead.text ? 1 : 0) +\n (s.gzhead.hcrc ? 2 : 0) +\n (!s.gzhead.extra ? 0 : 4) +\n (!s.gzhead.name ? 0 : 8) +\n (!s.gzhead.comment ? 0 : 16)\n );\n put_byte(s, s.gzhead.time & 0xff);\n put_byte(s, (s.gzhead.time >> 8) & 0xff);\n put_byte(s, (s.gzhead.time >> 16) & 0xff);\n put_byte(s, (s.gzhead.time >> 24) & 0xff);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, s.gzhead.os & 0xff);\n if (s.gzhead.extra && s.gzhead.extra.length) {\n put_byte(s, s.gzhead.extra.length & 0xff);\n put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);\n }\n if (s.gzhead.hcrc) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0);\n }\n s.gzindex = 0;\n s.status = EXTRA_STATE;\n }\n }\n else // DEFLATE header\n {\n var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8;\n var level_flags = -1;\n\n if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {\n level_flags = 0;\n } else if (s.level < 6) {\n level_flags = 1;\n } else if (s.level === 6) {\n level_flags = 2;\n } else {\n level_flags = 3;\n }\n header |= (level_flags << 6);\n if (s.strstart !== 0) { header |= PRESET_DICT; }\n header += 31 - (header % 31);\n\n s.status = BUSY_STATE;\n putShortMSB(s, header);\n\n /* Save the adler32 of the preset dictionary: */\n if (s.strstart !== 0) {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n strm.adler = 1; // adler32(0L, Z_NULL, 0);\n }\n }\n\n//#ifdef GZIP\n if (s.status === EXTRA_STATE) {\n if (s.gzhead.extra/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n\n while (s.gzindex < (s.gzhead.extra.length & 0xffff)) {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n break;\n }\n }\n put_byte(s, s.gzhead.extra[s.gzindex] & 0xff);\n s.gzindex++;\n }\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (s.gzindex === s.gzhead.extra.length) {\n s.gzindex = 0;\n s.status = NAME_STATE;\n }\n }\n else {\n s.status = NAME_STATE;\n }\n }\n if (s.status === NAME_STATE) {\n if (s.gzhead.name/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.name.length) {\n val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.gzindex = 0;\n s.status = COMMENT_STATE;\n }\n }\n else {\n s.status = COMMENT_STATE;\n }\n }\n if (s.status === COMMENT_STATE) {\n if (s.gzhead.comment/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.comment.length) {\n val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.status = HCRC_STATE;\n }\n }\n else {\n s.status = HCRC_STATE;\n }\n }\n if (s.status === HCRC_STATE) {\n if (s.gzhead.hcrc) {\n if (s.pending + 2 > s.pending_buf_size) {\n flush_pending(strm);\n }\n if (s.pending + 2 <= s.pending_buf_size) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n s.status = BUSY_STATE;\n }\n }\n else {\n s.status = BUSY_STATE;\n }\n }\n//#endif\n\n /* Flush as much pending output as possible */\n if (s.pending !== 0) {\n flush_pending(strm);\n if (strm.avail_out === 0) {\n /* Since avail_out is 0, deflate will be called again with\n * more output space, but possibly with both pending and\n * avail_in equal to zero. There won't be anything to do,\n * but this is not an error situation so make sure we\n * return OK instead of BUF_ERROR at next call of deflate:\n */\n s.last_flush = -1;\n return Z_OK;\n }\n\n /* Make sure there is something to do and avoid duplicate consecutive\n * flushes. For repeated and useless calls with Z_FINISH, we keep\n * returning Z_STREAM_END instead of Z_BUF_ERROR.\n */\n } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&\n flush !== Z_FINISH) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* User must not provide more input after the first FINISH: */\n if (s.status === FINISH_STATE && strm.avail_in !== 0) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* Start a new block or continue the current one.\n */\n if (strm.avail_in !== 0 || s.lookahead !== 0 ||\n (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) {\n var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) :\n (s.strategy === Z_RLE ? deflate_rle(s, flush) :\n configuration_table[s.level].func(s, flush));\n\n if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {\n s.status = FINISH_STATE;\n }\n if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {\n if (strm.avail_out === 0) {\n s.last_flush = -1;\n /* avoid BUF_ERROR next call, see above */\n }\n return Z_OK;\n /* If flush != Z_NO_FLUSH && avail_out == 0, the next call\n * of deflate should use the same flush parameter to make sure\n * that the flush is complete. So we don't have to output an\n * empty block here, this will be done at next call. This also\n * ensures that for a very small output buffer, we emit at most\n * one empty block.\n */\n }\n if (bstate === BS_BLOCK_DONE) {\n if (flush === Z_PARTIAL_FLUSH) {\n trees._tr_align(s);\n }\n else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */\n\n trees._tr_stored_block(s, 0, 0, false);\n /* For a full flush, this empty block will be recognized\n * as a special marker by inflate_sync().\n */\n if (flush === Z_FULL_FLUSH) {\n /*** CLEAR_HASH(s); ***/ /* forget history */\n zero(s.head); // Fill with NIL (= 0);\n\n if (s.lookahead === 0) {\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n }\n }\n flush_pending(strm);\n if (strm.avail_out === 0) {\n s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */\n return Z_OK;\n }\n }\n }\n //Assert(strm->avail_out > 0, \"bug2\");\n //if (strm.avail_out <= 0) { throw new Error(\"bug2\");}\n\n if (flush !== Z_FINISH) { return Z_OK; }\n if (s.wrap <= 0) { return Z_STREAM_END; }\n\n /* Write the trailer */\n if (s.wrap === 2) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n put_byte(s, (strm.adler >> 16) & 0xff);\n put_byte(s, (strm.adler >> 24) & 0xff);\n put_byte(s, strm.total_in & 0xff);\n put_byte(s, (strm.total_in >> 8) & 0xff);\n put_byte(s, (strm.total_in >> 16) & 0xff);\n put_byte(s, (strm.total_in >> 24) & 0xff);\n }\n else\n {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n\n flush_pending(strm);\n /* If avail_out is zero, the application will call deflate again\n * to flush the rest.\n */\n if (s.wrap > 0) { s.wrap = -s.wrap; }\n /* write the trailer only once! */\n return s.pending !== 0 ? Z_OK : Z_STREAM_END;\n}\n\nfunction deflateEnd(strm) {\n var status;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n status = strm.state.status;\n if (status !== INIT_STATE &&\n status !== EXTRA_STATE &&\n status !== NAME_STATE &&\n status !== COMMENT_STATE &&\n status !== HCRC_STATE &&\n status !== BUSY_STATE &&\n status !== FINISH_STATE\n ) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.state = null;\n\n return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK;\n}\n\n\n/* =========================================================================\n * Initializes the compression dictionary from the given byte\n * sequence without producing any compressed output.\n */\nfunction deflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var s;\n var str, n;\n var wrap;\n var avail;\n var next;\n var input;\n var tmpDict;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n s = strm.state;\n wrap = s.wrap;\n\n if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {\n return Z_STREAM_ERROR;\n }\n\n /* when using zlib wrappers, compute Adler-32 for provided dictionary */\n if (wrap === 1) {\n /* adler32(strm->adler, dictionary, dictLength); */\n strm.adler = adler32(strm.adler, dictionary, dictLength, 0);\n }\n\n s.wrap = 0; /* avoid computing Adler-32 in read_buf */\n\n /* if dictionary would fill window, just replace the history */\n if (dictLength >= s.w_size) {\n if (wrap === 0) { /* already empty otherwise */\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n /* use the tail */\n // dictionary = dictionary.slice(dictLength - s.w_size);\n tmpDict = new utils.Buf8(s.w_size);\n utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0);\n dictionary = tmpDict;\n dictLength = s.w_size;\n }\n /* insert dictionary into window and hash */\n avail = strm.avail_in;\n next = strm.next_in;\n input = strm.input;\n strm.avail_in = dictLength;\n strm.next_in = 0;\n strm.input = dictionary;\n fill_window(s);\n while (s.lookahead >= MIN_MATCH) {\n str = s.strstart;\n n = s.lookahead - (MIN_MATCH - 1);\n do {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n\n s.head[s.ins_h] = str;\n str++;\n } while (--n);\n s.strstart = str;\n s.lookahead = MIN_MATCH - 1;\n fill_window(s);\n }\n s.strstart += s.lookahead;\n s.block_start = s.strstart;\n s.insert = s.lookahead;\n s.lookahead = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n strm.next_in = next;\n strm.input = input;\n strm.avail_in = avail;\n s.wrap = wrap;\n return Z_OK;\n}\n\n\nexports.deflateInit = deflateInit;\nexports.deflateInit2 = deflateInit2;\nexports.deflateReset = deflateReset;\nexports.deflateResetKeep = deflateResetKeep;\nexports.deflateSetHeader = deflateSetHeader;\nexports.deflate = deflate;\nexports.deflateEnd = deflateEnd;\nexports.deflateSetDictionary = deflateSetDictionary;\nexports.deflateInfo = 'pako deflate (from Nodeca project)';\n\n/* Not implemented\nexports.deflateBound = deflateBound;\nexports.deflateCopy = deflateCopy;\nexports.deflateParams = deflateParams;\nexports.deflatePending = deflatePending;\nexports.deflatePrime = deflatePrime;\nexports.deflateTune = deflateTune;\n*/\n","// String encode/decode helpers\n'use strict';\n\n\nvar utils = require('./common');\n\n\n// Quick check if we can use fast array to bin string conversion\n//\n// - apply(Array) can fail on Android 2.2\n// - apply(Uint8Array) can fail on iOS 5.1 Safary\n//\nvar STR_APPLY_OK = true;\nvar STR_APPLY_UIA_OK = true;\n\ntry { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; }\ntry { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }\n\n\n// Table with utf8 lengths (calculated by first byte of sequence)\n// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,\n// because max possible codepoint is 0x10ffff\nvar _utf8len = new utils.Buf8(256);\nfor (var q = 0; q < 256; q++) {\n _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);\n}\n_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start\n\n\n// convert string to array (typed, when possible)\nexports.string2buf = function (str) {\n var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;\n\n // count binary size\n for (m_pos = 0; m_pos < str_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;\n }\n\n // allocate buffer\n buf = new utils.Buf8(buf_len);\n\n // convert\n for (i = 0, m_pos = 0; i < buf_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n if (c < 0x80) {\n /* one byte */\n buf[i++] = c;\n } else if (c < 0x800) {\n /* two bytes */\n buf[i++] = 0xC0 | (c >>> 6);\n buf[i++] = 0x80 | (c & 0x3f);\n } else if (c < 0x10000) {\n /* three bytes */\n buf[i++] = 0xE0 | (c >>> 12);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n } else {\n /* four bytes */\n buf[i++] = 0xf0 | (c >>> 18);\n buf[i++] = 0x80 | (c >>> 12 & 0x3f);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n }\n }\n\n return buf;\n};\n\n// Helper (used in 2 places)\nfunction buf2binstring(buf, len) {\n // use fallback for big arrays to avoid stack overflow\n if (len < 65537) {\n if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) {\n return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len));\n }\n }\n\n var result = '';\n for (var i = 0; i < len; i++) {\n result += String.fromCharCode(buf[i]);\n }\n return result;\n}\n\n\n// Convert byte array to binary string\nexports.buf2binstring = function (buf) {\n return buf2binstring(buf, buf.length);\n};\n\n\n// Convert binary string (typed, when possible)\nexports.binstring2buf = function (str) {\n var buf = new utils.Buf8(str.length);\n for (var i = 0, len = buf.length; i < len; i++) {\n buf[i] = str.charCodeAt(i);\n }\n return buf;\n};\n\n\n// convert array to string\nexports.buf2string = function (buf, max) {\n var i, out, c, c_len;\n var len = max || buf.length;\n\n // Reserve max possible length (2 words per char)\n // NB: by unknown reasons, Array is significantly faster for\n // String.fromCharCode.apply than Uint16Array.\n var utf16buf = new Array(len * 2);\n\n for (out = 0, i = 0; i < len;) {\n c = buf[i++];\n // quick process ascii\n if (c < 0x80) { utf16buf[out++] = c; continue; }\n\n c_len = _utf8len[c];\n // skip 5 & 6 byte codes\n if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }\n\n // apply mask on first byte\n c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;\n // join the rest\n while (c_len > 1 && i < len) {\n c = (c << 6) | (buf[i++] & 0x3f);\n c_len--;\n }\n\n // terminated by end of string?\n if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }\n\n if (c < 0x10000) {\n utf16buf[out++] = c;\n } else {\n c -= 0x10000;\n utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);\n utf16buf[out++] = 0xdc00 | (c & 0x3ff);\n }\n }\n\n return buf2binstring(utf16buf, out);\n};\n\n\n// Calculate max possible position in utf8 buffer,\n// that will not break sequence. If that's not possible\n// - (very small limits) return max size as is.\n//\n// buf[] - utf8 bytes array\n// max - length limit (mandatory);\nexports.utf8border = function (buf, max) {\n var pos;\n\n max = max || buf.length;\n if (max > buf.length) { max = buf.length; }\n\n // go back from last position, until start of sequence found\n pos = max - 1;\n while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }\n\n // Fuckup - very small and broken sequence,\n // return max, because we should return something anyway.\n if (pos < 0) { return max; }\n\n // If we came to start of buffer - that means vuffer is too small,\n // return max too.\n if (pos === 0) { return max; }\n\n return (pos + _utf8len[buf[pos]] > max) ? pos : max;\n};\n","'use strict';\n\n\nfunction ZStream() {\n /* next input byte */\n this.input = null; // JS specific, because we have no pointers\n this.next_in = 0;\n /* number of bytes available at input */\n this.avail_in = 0;\n /* total number of input bytes read so far */\n this.total_in = 0;\n /* next output byte should be put there */\n this.output = null; // JS specific, because we have no pointers\n this.next_out = 0;\n /* remaining free space at output */\n this.avail_out = 0;\n /* total number of bytes output so far */\n this.total_out = 0;\n /* last error message, NULL if no error */\n this.msg = ''/*Z_NULL*/;\n /* not visible by applications */\n this.state = null;\n /* best guess about the data type: binary or text */\n this.data_type = 2/*Z_UNKNOWN*/;\n /* adler32 value of the uncompressed data */\n this.adler = 0;\n}\n\nmodule.exports = ZStream;\n","'use strict';\n\n\nvar zlib_deflate = require('./zlib/deflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\n\nvar toString = Object.prototype.toString;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nvar Z_NO_FLUSH = 0;\nvar Z_FINISH = 4;\n\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_SYNC_FLUSH = 2;\n\nvar Z_DEFAULT_COMPRESSION = -1;\n\nvar Z_DEFAULT_STRATEGY = 0;\n\nvar Z_DEFLATED = 8;\n\n/* ===========================================================================*/\n\n\n/**\n * class Deflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[deflate]],\n * [[deflateRaw]] and [[gzip]].\n **/\n\n/* internal\n * Deflate.chunks -> Array\n *\n * Chunks of output data, if [[Deflate#onData]] not overriden.\n **/\n\n/**\n * Deflate.result -> Uint8Array|Array\n *\n * Compressed result, generated by default [[Deflate#onData]]\n * and [[Deflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Deflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Deflate.err -> Number\n *\n * Error code after deflate finished. 0 (Z_OK) on success.\n * You will not need it in real life, because deflate errors\n * are possible only on wrong options or bad `onData` / `onEnd`\n * custom handlers.\n **/\n\n/**\n * Deflate.msg -> String\n *\n * Error message, if [[Deflate.err]] != 0\n **/\n\n\n/**\n * new Deflate(options)\n * - options (Object): zlib deflate options.\n *\n * Creates new deflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `level`\n * - `windowBits`\n * - `memLevel`\n * - `strategy`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw deflate\n * - `gzip` (Boolean) - create gzip wrapper\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n * - `header` (Object) - custom header for gzip\n * - `text` (Boolean) - true if compressed data believed to be text\n * - `time` (Number) - modification time, unix timestamp\n * - `os` (Number) - operation system code\n * - `extra` (Array) - array of bytes with extra data (max 65536)\n * - `name` (String) - file name (binary string)\n * - `comment` (String) - comment (binary string)\n * - `hcrc` (Boolean) - true if header crc should be added\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var deflate = new pako.Deflate({ level: 3});\n *\n * deflate.push(chunk1, false);\n * deflate.push(chunk2, true); // true -> last chunk\n *\n * if (deflate.err) { throw new Error(deflate.err); }\n *\n * console.log(deflate.result);\n * ```\n **/\nfunction Deflate(options) {\n if (!(this instanceof Deflate)) return new Deflate(options);\n\n this.options = utils.assign({\n level: Z_DEFAULT_COMPRESSION,\n method: Z_DEFLATED,\n chunkSize: 16384,\n windowBits: 15,\n memLevel: 8,\n strategy: Z_DEFAULT_STRATEGY,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n if (opt.raw && (opt.windowBits > 0)) {\n opt.windowBits = -opt.windowBits;\n }\n\n else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {\n opt.windowBits += 16;\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_deflate.deflateInit2(\n this.strm,\n opt.level,\n opt.method,\n opt.windowBits,\n opt.memLevel,\n opt.strategy\n );\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n if (opt.header) {\n zlib_deflate.deflateSetHeader(this.strm, opt.header);\n }\n\n if (opt.dictionary) {\n var dict;\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n // If we need to compress text, change encoding to utf8.\n dict = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(opt.dictionary);\n } else {\n dict = opt.dictionary;\n }\n\n status = zlib_deflate.deflateSetDictionary(this.strm, dict);\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n this._dict_set = true;\n }\n}\n\n/**\n * Deflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be\n * converted to utf8 byte sequence.\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.\n *\n * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with\n * new compressed chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the compression context.\n *\n * On fail call [[Deflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * array format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nDeflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var status, _mode;\n\n if (this.ended) { return false; }\n\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // If we need to compress text, change encoding to utf8.\n strm.input = strings.string2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n status = zlib_deflate.deflate(strm, _mode); /* no bad return value */\n\n if (status !== Z_STREAM_END && status !== Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) {\n if (this.options.to === 'string') {\n this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out)));\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END);\n\n // Finalize on the last chunk.\n if (_mode === Z_FINISH) {\n status = zlib_deflate.deflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === Z_SYNC_FLUSH) {\n this.onEnd(Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Deflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): ouput data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nDeflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Deflate#onEnd(status) -> Void\n * - status (Number): deflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called once after you tell deflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nDeflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === Z_OK) {\n if (this.options.to === 'string') {\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * deflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * Compress `data` with deflate algorithm and `options`.\n *\n * Supported options are:\n *\n * - level\n * - windowBits\n * - memLevel\n * - strategy\n * - dictionary\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , data = Uint8Array([1,2,3,4,5,6,7,8,9]);\n *\n * console.log(pako.deflate(data));\n * ```\n **/\nfunction deflate(input, options) {\n var deflator = new Deflate(options);\n\n deflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (deflator.err) { throw deflator.msg || msg[deflator.err]; }\n\n return deflator.result;\n}\n\n\n/**\n * deflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction deflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return deflate(input, options);\n}\n\n\n/**\n * gzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but create gzip wrapper instead of\n * deflate one.\n **/\nfunction gzip(input, options) {\n options = options || {};\n options.gzip = true;\n return deflate(input, options);\n}\n\n\nexports.Deflate = Deflate;\nexports.deflate = deflate;\nexports.deflateRaw = deflateRaw;\nexports.gzip = gzip;\n","'use strict';\n\n// See state defs from inflate.js\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\n\n/*\n Decode literal, length, and distance codes and write out the resulting\n literal and match bytes until either not enough input or output is\n available, an end-of-block is encountered, or a data error is encountered.\n When large enough input and output buffers are supplied to inflate(), for\n example, a 16K input buffer and a 64K output buffer, more than 95% of the\n inflate execution time is spent in this routine.\n\n Entry assumptions:\n\n state.mode === LEN\n strm.avail_in >= 6\n strm.avail_out >= 258\n start >= strm.avail_out\n state.bits < 8\n\n On return, state.mode is one of:\n\n LEN -- ran out of enough output space or enough available input\n TYPE -- reached end of block code, inflate() to interpret next block\n BAD -- error in block data\n\n Notes:\n\n - The maximum input bits used by a length/distance pair is 15 bits for the\n length code, 5 bits for the length extra, 15 bits for the distance code,\n and 13 bits for the distance extra. This totals 48 bits, or six bytes.\n Therefore if strm.avail_in >= 6, then there is enough input to avoid\n checking for available input while decoding.\n\n - The maximum bytes that a single length/distance pair can output is 258\n bytes, which is the maximum length that can be coded. inflate_fast()\n requires strm.avail_out >= 258 for each loop to avoid checking for\n output space.\n */\nmodule.exports = function inflate_fast(strm, start) {\n var state;\n var _in; /* local strm.input */\n var last; /* have enough input while in < last */\n var _out; /* local strm.output */\n var beg; /* inflate()'s initial strm.output */\n var end; /* while out < end, enough space available */\n//#ifdef INFLATE_STRICT\n var dmax; /* maximum distance from zlib header */\n//#endif\n var wsize; /* window size or zero if not using window */\n var whave; /* valid bytes in the window */\n var wnext; /* window write index */\n // Use `s_window` instead `window`, avoid conflict with instrumentation tools\n var s_window; /* allocated sliding window, if wsize != 0 */\n var hold; /* local strm.hold */\n var bits; /* local strm.bits */\n var lcode; /* local strm.lencode */\n var dcode; /* local strm.distcode */\n var lmask; /* mask for first level of length codes */\n var dmask; /* mask for first level of distance codes */\n var here; /* retrieved table entry */\n var op; /* code bits, operation, extra bits, or */\n /* window position, window bytes to copy */\n var len; /* match length, unused bytes */\n var dist; /* match distance */\n var from; /* where to copy match from */\n var from_source;\n\n\n var input, output; // JS specific, because we have no pointers\n\n /* copy state to local variables */\n state = strm.state;\n //here = state.here;\n _in = strm.next_in;\n input = strm.input;\n last = _in + (strm.avail_in - 5);\n _out = strm.next_out;\n output = strm.output;\n beg = _out - (start - strm.avail_out);\n end = _out + (strm.avail_out - 257);\n//#ifdef INFLATE_STRICT\n dmax = state.dmax;\n//#endif\n wsize = state.wsize;\n whave = state.whave;\n wnext = state.wnext;\n s_window = state.window;\n hold = state.hold;\n bits = state.bits;\n lcode = state.lencode;\n dcode = state.distcode;\n lmask = (1 << state.lenbits) - 1;\n dmask = (1 << state.distbits) - 1;\n\n\n /* decode literals and length/distances until end-of-block or not enough\n input data or output space */\n\n top:\n do {\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n\n here = lcode[hold & lmask];\n\n dolen:\n for (;;) { // Goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n if (op === 0) { /* literal */\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n output[_out++] = here & 0xffff/*here.val*/;\n }\n else if (op & 16) { /* length base */\n len = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (op) {\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n len += hold & ((1 << op) - 1);\n hold >>>= op;\n bits -= op;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", len));\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n here = dcode[hold & dmask];\n\n dodist:\n for (;;) { // goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n\n if (op & 16) { /* distance base */\n dist = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n }\n dist += hold & ((1 << op) - 1);\n//#ifdef INFLATE_STRICT\n if (dist > dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n//#endif\n hold >>>= op;\n bits -= op;\n //Tracevv((stderr, \"inflate: distance %u\\n\", dist));\n op = _out - beg; /* max distance in output */\n if (dist > op) { /* see if copy from window */\n op = dist - op; /* distance back in window */\n if (op > whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n\n// (!) This block is disabled in zlib defailts,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// if (len <= op - whave) {\n// do {\n// output[_out++] = 0;\n// } while (--len);\n// continue top;\n// }\n// len -= op - whave;\n// do {\n// output[_out++] = 0;\n// } while (--op > whave);\n// if (op === 0) {\n// from = _out - dist;\n// do {\n// output[_out++] = output[from++];\n// } while (--len);\n// continue top;\n// }\n//#endif\n }\n from = 0; // window index\n from_source = s_window;\n if (wnext === 0) { /* very common case */\n from += wsize - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n else if (wnext < op) { /* wrap around window */\n from += wsize + wnext - op;\n op -= wnext;\n if (op < len) { /* some from end of window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = 0;\n if (wnext < len) { /* some from start of window */\n op = wnext;\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n }\n else { /* contiguous in window */\n from += wnext - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n while (len > 2) {\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n len -= 3;\n }\n if (len) {\n output[_out++] = from_source[from++];\n if (len > 1) {\n output[_out++] = from_source[from++];\n }\n }\n }\n else {\n from = _out - dist; /* copy direct from output */\n do { /* minimum length is three */\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n len -= 3;\n } while (len > 2);\n if (len) {\n output[_out++] = output[from++];\n if (len > 1) {\n output[_out++] = output[from++];\n }\n }\n }\n }\n else if ((op & 64) === 0) { /* 2nd level distance code */\n here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dodist;\n }\n else {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n }\n else if ((op & 64) === 0) { /* 2nd level length code */\n here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dolen;\n }\n else if (op & 32) { /* end-of-block */\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.mode = TYPE;\n break top;\n }\n else {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n } while (_in < last && _out < end);\n\n /* return unused bytes (on entry, bits < 8, so in won't go too far back) */\n len = bits >> 3;\n _in -= len;\n bits -= len << 3;\n hold &= (1 << bits) - 1;\n\n /* update state and return */\n strm.next_in = _in;\n strm.next_out = _out;\n strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last));\n strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end));\n state.hold = hold;\n state.bits = bits;\n return;\n};\n","'use strict';\n\n\nvar utils = require('../utils/common');\n\nvar MAXBITS = 15;\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\nvar lbase = [ /* Length codes 257..285 base */\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,\n 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0\n];\n\nvar lext = [ /* Length codes 257..285 extra */\n 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,\n 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78\n];\n\nvar dbase = [ /* Distance codes 0..29 base */\n 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,\n 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,\n 8193, 12289, 16385, 24577, 0, 0\n];\n\nvar dext = [ /* Distance codes 0..29 extra */\n 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,\n 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,\n 28, 28, 29, 29, 64, 64\n];\n\nmodule.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts)\n{\n var bits = opts.bits;\n //here = opts.here; /* table entry for duplication */\n\n var len = 0; /* a code's length in bits */\n var sym = 0; /* index of code symbols */\n var min = 0, max = 0; /* minimum and maximum code lengths */\n var root = 0; /* number of index bits for root table */\n var curr = 0; /* number of index bits for current table */\n var drop = 0; /* code bits to drop for sub-table */\n var left = 0; /* number of prefix codes available */\n var used = 0; /* code entries in table used */\n var huff = 0; /* Huffman code */\n var incr; /* for incrementing code, index */\n var fill; /* index for replicating entries */\n var low; /* low bits for current root entry */\n var mask; /* mask for low root bits */\n var next; /* next available space in table */\n var base = null; /* base value table to use */\n var base_index = 0;\n// var shoextra; /* extra bits table to use */\n var end; /* use base and extra for symbol > end */\n var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */\n var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */\n var extra = null;\n var extra_index = 0;\n\n var here_bits, here_op, here_val;\n\n /*\n Process a set of code lengths to create a canonical Huffman code. The\n code lengths are lens[0..codes-1]. Each length corresponds to the\n symbols 0..codes-1. The Huffman code is generated by first sorting the\n symbols by length from short to long, and retaining the symbol order\n for codes with equal lengths. Then the code starts with all zero bits\n for the first code of the shortest length, and the codes are integer\n increments for the same length, and zeros are appended as the length\n increases. For the deflate format, these bits are stored backwards\n from their more natural integer increment ordering, and so when the\n decoding tables are built in the large loop below, the integer codes\n are incremented backwards.\n\n This routine assumes, but does not check, that all of the entries in\n lens[] are in the range 0..MAXBITS. The caller must assure this.\n 1..MAXBITS is interpreted as that code length. zero means that that\n symbol does not occur in this code.\n\n The codes are sorted by computing a count of codes for each length,\n creating from that a table of starting indices for each length in the\n sorted table, and then entering the symbols in order in the sorted\n table. The sorted table is work[], with that space being provided by\n the caller.\n\n The length counts are used for other purposes as well, i.e. finding\n the minimum and maximum length codes, determining if there are any\n codes at all, checking for a valid set of lengths, and looking ahead\n at length counts to determine sub-table sizes when building the\n decoding tables.\n */\n\n /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */\n for (len = 0; len <= MAXBITS; len++) {\n count[len] = 0;\n }\n for (sym = 0; sym < codes; sym++) {\n count[lens[lens_index + sym]]++;\n }\n\n /* bound code lengths, force root to be within code lengths */\n root = bits;\n for (max = MAXBITS; max >= 1; max--) {\n if (count[max] !== 0) { break; }\n }\n if (root > max) {\n root = max;\n }\n if (max === 0) { /* no symbols to code at all */\n //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */\n //table.bits[opts.table_index] = 1; //here.bits = (var char)1;\n //table.val[opts.table_index++] = 0; //here.val = (var short)0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n\n //table.op[opts.table_index] = 64;\n //table.bits[opts.table_index] = 1;\n //table.val[opts.table_index++] = 0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n opts.bits = 1;\n return 0; /* no symbols, but wait for decoding to report error */\n }\n for (min = 1; min < max; min++) {\n if (count[min] !== 0) { break; }\n }\n if (root < min) {\n root = min;\n }\n\n /* check for an over-subscribed or incomplete set of lengths */\n left = 1;\n for (len = 1; len <= MAXBITS; len++) {\n left <<= 1;\n left -= count[len];\n if (left < 0) {\n return -1;\n } /* over-subscribed */\n }\n if (left > 0 && (type === CODES || max !== 1)) {\n return -1; /* incomplete set */\n }\n\n /* generate offsets into symbol table for each length for sorting */\n offs[1] = 0;\n for (len = 1; len < MAXBITS; len++) {\n offs[len + 1] = offs[len] + count[len];\n }\n\n /* sort symbols by length, by symbol order within each length */\n for (sym = 0; sym < codes; sym++) {\n if (lens[lens_index + sym] !== 0) {\n work[offs[lens[lens_index + sym]]++] = sym;\n }\n }\n\n /*\n Create and fill in decoding tables. In this loop, the table being\n filled is at next and has curr index bits. The code being used is huff\n with length len. That code is converted to an index by dropping drop\n bits off of the bottom. For codes where len is less than drop + curr,\n those top drop + curr - len bits are incremented through all values to\n fill the table with replicated entries.\n\n root is the number of index bits for the root table. When len exceeds\n root, sub-tables are created pointed to by the root entry with an index\n of the low root bits of huff. This is saved in low to check for when a\n new sub-table should be started. drop is zero when the root table is\n being filled, and drop is root when sub-tables are being filled.\n\n When a new sub-table is needed, it is necessary to look ahead in the\n code lengths to determine what size sub-table is needed. The length\n counts are used for this, and so count[] is decremented as codes are\n entered in the tables.\n\n used keeps track of how many table entries have been allocated from the\n provided *table space. It is checked for LENS and DIST tables against\n the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in\n the initial root table size constants. See the comments in inftrees.h\n for more information.\n\n sym increments through all symbols, and the loop terminates when\n all codes of length max, i.e. all codes, have been processed. This\n routine permits incomplete codes, so another loop after this one fills\n in the rest of the decoding tables with invalid code markers.\n */\n\n /* set up for code type */\n // poor man optimization - use if-else instead of switch,\n // to avoid deopts in old v8\n if (type === CODES) {\n base = extra = work; /* dummy value--not used */\n end = 19;\n\n } else if (type === LENS) {\n base = lbase;\n base_index -= 257;\n extra = lext;\n extra_index -= 257;\n end = 256;\n\n } else { /* DISTS */\n base = dbase;\n extra = dext;\n end = -1;\n }\n\n /* initialize opts for loop */\n huff = 0; /* starting code */\n sym = 0; /* starting code symbol */\n len = min; /* starting code length */\n next = table_index; /* current table to fill in */\n curr = root; /* current table index bits */\n drop = 0; /* current bits to drop from code for index */\n low = -1; /* trigger new sub-table when len > root */\n used = 1 << root; /* use root table entries */\n mask = used - 1; /* mask for comparing low */\n\n /* check available table space */\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* process all codes and make table entries */\n for (;;) {\n /* create table entry */\n here_bits = len - drop;\n if (work[sym] < end) {\n here_op = 0;\n here_val = work[sym];\n }\n else if (work[sym] > end) {\n here_op = extra[extra_index + work[sym]];\n here_val = base[base_index + work[sym]];\n }\n else {\n here_op = 32 + 64; /* end of block */\n here_val = 0;\n }\n\n /* replicate for those indices with low len bits equal to huff */\n incr = 1 << (len - drop);\n fill = 1 << curr;\n min = fill; /* save offset to next table */\n do {\n fill -= incr;\n table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0;\n } while (fill !== 0);\n\n /* backwards increment the len-bit code huff */\n incr = 1 << (len - 1);\n while (huff & incr) {\n incr >>= 1;\n }\n if (incr !== 0) {\n huff &= incr - 1;\n huff += incr;\n } else {\n huff = 0;\n }\n\n /* go to next symbol, update count, len */\n sym++;\n if (--count[len] === 0) {\n if (len === max) { break; }\n len = lens[lens_index + work[sym]];\n }\n\n /* create new sub-table if needed */\n if (len > root && (huff & mask) !== low) {\n /* if first time, transition to sub-tables */\n if (drop === 0) {\n drop = root;\n }\n\n /* increment past last table */\n next += min; /* here min is 1 << curr */\n\n /* determine length of next table */\n curr = len - drop;\n left = 1 << curr;\n while (curr + drop < max) {\n left -= count[curr + drop];\n if (left <= 0) { break; }\n curr++;\n left <<= 1;\n }\n\n /* check for enough space */\n used += 1 << curr;\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* point entry in root table to sub-table */\n low = huff & mask;\n /*table.op[low] = curr;\n table.bits[low] = root;\n table.val[low] = next - opts.table_index;*/\n table[low] = (root << 24) | (curr << 16) | (next - table_index) |0;\n }\n }\n\n /* fill in remaining table entry if code is incomplete (guaranteed to have\n at most one remaining entry, since if the code is incomplete, the\n maximum code length that was allowed to get this far is one bit) */\n if (huff !== 0) {\n //table.op[next + huff] = 64; /* invalid code marker */\n //table.bits[next + huff] = len - drop;\n //table.val[next + huff] = 0;\n table[next + huff] = ((len - drop) << 24) | (64 << 16) |0;\n }\n\n /* set return parameters */\n //opts.table_index += used;\n opts.bits = root;\n return 0;\n};\n","'use strict';\n\n\nvar utils = require('../utils/common');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar inflate_fast = require('./inffast');\nvar inflate_table = require('./inftrees');\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\n//var Z_NO_FLUSH = 0;\n//var Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\n//var Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\nvar Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\nvar Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n\n/* STATES ====================================================================*/\n/* ===========================================================================*/\n\n\nvar HEAD = 1; /* i: waiting for magic header */\nvar FLAGS = 2; /* i: waiting for method and flags (gzip) */\nvar TIME = 3; /* i: waiting for modification time (gzip) */\nvar OS = 4; /* i: waiting for extra flags and operating system (gzip) */\nvar EXLEN = 5; /* i: waiting for extra length (gzip) */\nvar EXTRA = 6; /* i: waiting for extra bytes (gzip) */\nvar NAME = 7; /* i: waiting for end of file name (gzip) */\nvar COMMENT = 8; /* i: waiting for end of comment (gzip) */\nvar HCRC = 9; /* i: waiting for header crc (gzip) */\nvar DICTID = 10; /* i: waiting for dictionary check value */\nvar DICT = 11; /* waiting for inflateSetDictionary() call */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\nvar TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */\nvar STORED = 14; /* i: waiting for stored size (length and complement) */\nvar COPY_ = 15; /* i/o: same as COPY below, but only first time in */\nvar COPY = 16; /* i/o: waiting for input or output to copy stored block */\nvar TABLE = 17; /* i: waiting for dynamic block table lengths */\nvar LENLENS = 18; /* i: waiting for code length code lengths */\nvar CODELENS = 19; /* i: waiting for length/lit and distance code lengths */\nvar LEN_ = 20; /* i: same as LEN below, but only first time in */\nvar LEN = 21; /* i: waiting for length/lit/eob code */\nvar LENEXT = 22; /* i: waiting for length extra bits */\nvar DIST = 23; /* i: waiting for distance code */\nvar DISTEXT = 24; /* i: waiting for distance extra bits */\nvar MATCH = 25; /* o: waiting for output space to copy string */\nvar LIT = 26; /* o: waiting for output space to write literal */\nvar CHECK = 27; /* i: waiting for 32-bit check value */\nvar LENGTH = 28; /* i: waiting for 32-bit length (gzip) */\nvar DONE = 29; /* finished check, done -- remain here until reset */\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar MEM = 31; /* got an inflate() memory error -- remain here until reset */\nvar SYNC = 32; /* looking for synchronization bytes to restart inflate() */\n\n/* ===========================================================================*/\n\n\n\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_WBITS = MAX_WBITS;\n\n\nfunction zswap32(q) {\n return (((q >>> 24) & 0xff) +\n ((q >>> 8) & 0xff00) +\n ((q & 0xff00) << 8) +\n ((q & 0xff) << 24));\n}\n\n\nfunction InflateState() {\n this.mode = 0; /* current inflate mode */\n this.last = false; /* true if processing last block */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.havedict = false; /* true if dictionary provided */\n this.flags = 0; /* gzip header method and flags (0 if zlib) */\n this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */\n this.check = 0; /* protected copy of check value */\n this.total = 0; /* protected copy of output count */\n // TODO: may be {}\n this.head = null; /* where to save gzip header information */\n\n /* sliding window */\n this.wbits = 0; /* log base 2 of requested window size */\n this.wsize = 0; /* window size or zero if not using window */\n this.whave = 0; /* valid bytes in the window */\n this.wnext = 0; /* window write index */\n this.window = null; /* allocated sliding window, if needed */\n\n /* bit accumulator */\n this.hold = 0; /* input bit accumulator */\n this.bits = 0; /* number of bits in \"in\" */\n\n /* for string and stored block copying */\n this.length = 0; /* literal or length of data to copy */\n this.offset = 0; /* distance back to copy string from */\n\n /* for table and code decoding */\n this.extra = 0; /* extra bits needed */\n\n /* fixed and dynamic code tables */\n this.lencode = null; /* starting table for length/literal codes */\n this.distcode = null; /* starting table for distance codes */\n this.lenbits = 0; /* index bits for lencode */\n this.distbits = 0; /* index bits for distcode */\n\n /* dynamic table building */\n this.ncode = 0; /* number of code length code lengths */\n this.nlen = 0; /* number of length code lengths */\n this.ndist = 0; /* number of distance code lengths */\n this.have = 0; /* number of code lengths in lens[] */\n this.next = null; /* next available space in codes[] */\n\n this.lens = new utils.Buf16(320); /* temporary storage for code lengths */\n this.work = new utils.Buf16(288); /* work area for code table building */\n\n /*\n because we don't have pointers in js, we use lencode and distcode directly\n as buffers so we don't need codes\n */\n //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */\n this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */\n this.distdyn = null; /* dynamic table for distance codes (JS specific) */\n this.sane = 0; /* if false, allow invalid distance too far */\n this.back = 0; /* bits back of last unprocessed length/lit */\n this.was = 0; /* initial length of match */\n}\n\nfunction inflateResetKeep(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n strm.total_in = strm.total_out = state.total = 0;\n strm.msg = ''; /*Z_NULL*/\n if (state.wrap) { /* to support ill-conceived Java test suite */\n strm.adler = state.wrap & 1;\n }\n state.mode = HEAD;\n state.last = 0;\n state.havedict = 0;\n state.dmax = 32768;\n state.head = null/*Z_NULL*/;\n state.hold = 0;\n state.bits = 0;\n //state.lencode = state.distcode = state.next = state.codes;\n state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS);\n state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS);\n\n state.sane = 1;\n state.back = -1;\n //Tracev((stderr, \"inflate: reset\\n\"));\n return Z_OK;\n}\n\nfunction inflateReset(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n state.wsize = 0;\n state.whave = 0;\n state.wnext = 0;\n return inflateResetKeep(strm);\n\n}\n\nfunction inflateReset2(strm, windowBits) {\n var wrap;\n var state;\n\n /* get the state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n /* extract wrap request from windowBits parameter */\n if (windowBits < 0) {\n wrap = 0;\n windowBits = -windowBits;\n }\n else {\n wrap = (windowBits >> 4) + 1;\n if (windowBits < 48) {\n windowBits &= 15;\n }\n }\n\n /* set number of window bits, free window if different */\n if (windowBits && (windowBits < 8 || windowBits > 15)) {\n return Z_STREAM_ERROR;\n }\n if (state.window !== null && state.wbits !== windowBits) {\n state.window = null;\n }\n\n /* update state and reset the rest of it */\n state.wrap = wrap;\n state.wbits = windowBits;\n return inflateReset(strm);\n}\n\nfunction inflateInit2(strm, windowBits) {\n var ret;\n var state;\n\n if (!strm) { return Z_STREAM_ERROR; }\n //strm.msg = Z_NULL; /* in case we return an error */\n\n state = new InflateState();\n\n //if (state === Z_NULL) return Z_MEM_ERROR;\n //Tracev((stderr, \"inflate: allocated\\n\"));\n strm.state = state;\n state.window = null/*Z_NULL*/;\n ret = inflateReset2(strm, windowBits);\n if (ret !== Z_OK) {\n strm.state = null/*Z_NULL*/;\n }\n return ret;\n}\n\nfunction inflateInit(strm) {\n return inflateInit2(strm, DEF_WBITS);\n}\n\n\n/*\n Return state with length and distance decoding tables and index sizes set to\n fixed code decoding. Normally this returns fixed tables from inffixed.h.\n If BUILDFIXED is defined, then instead this routine builds the tables the\n first time it's called, and returns those tables the first time and\n thereafter. This reduces the size of the code by about 2K bytes, in\n exchange for a little execution time. However, BUILDFIXED should not be\n used for threaded applications, since the rewriting of the tables and virgin\n may not be thread-safe.\n */\nvar virgin = true;\n\nvar lenfix, distfix; // We have no pointers in JS, so keep tables separate\n\nfunction fixedtables(state) {\n /* build fixed huffman tables if first call (may not be thread safe) */\n if (virgin) {\n var sym;\n\n lenfix = new utils.Buf32(512);\n distfix = new utils.Buf32(32);\n\n /* literal/length table */\n sym = 0;\n while (sym < 144) { state.lens[sym++] = 8; }\n while (sym < 256) { state.lens[sym++] = 9; }\n while (sym < 280) { state.lens[sym++] = 7; }\n while (sym < 288) { state.lens[sym++] = 8; }\n\n inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 });\n\n /* distance table */\n sym = 0;\n while (sym < 32) { state.lens[sym++] = 5; }\n\n inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 });\n\n /* do this just once */\n virgin = false;\n }\n\n state.lencode = lenfix;\n state.lenbits = 9;\n state.distcode = distfix;\n state.distbits = 5;\n}\n\n\n/*\n Update the window with the last wsize (normally 32K) bytes written before\n returning. If window does not exist yet, create it. This is only called\n when a window is already in use, or when output has been written during this\n inflate call, but the end of the deflate stream has not been reached yet.\n It is also called to create a window for dictionary data when a dictionary\n is loaded.\n\n Providing output buffers larger than 32K to inflate() should provide a speed\n advantage, since only the last 32K of output is copied to the sliding window\n upon return from inflate(), and since all distances after the first 32K of\n output will fall in the output data, making match copies simpler and faster.\n The advantage may be dependent on the size of the processor's data caches.\n */\nfunction updatewindow(strm, src, end, copy) {\n var dist;\n var state = strm.state;\n\n /* if it hasn't been done already, allocate space for the window */\n if (state.window === null) {\n state.wsize = 1 << state.wbits;\n state.wnext = 0;\n state.whave = 0;\n\n state.window = new utils.Buf8(state.wsize);\n }\n\n /* copy state->wsize or less output bytes into the circular window */\n if (copy >= state.wsize) {\n utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0);\n state.wnext = 0;\n state.whave = state.wsize;\n }\n else {\n dist = state.wsize - state.wnext;\n if (dist > copy) {\n dist = copy;\n }\n //zmemcpy(state->window + state->wnext, end - copy, dist);\n utils.arraySet(state.window, src, end - copy, dist, state.wnext);\n copy -= dist;\n if (copy) {\n //zmemcpy(state->window, end - copy, copy);\n utils.arraySet(state.window, src, end - copy, copy, 0);\n state.wnext = copy;\n state.whave = state.wsize;\n }\n else {\n state.wnext += dist;\n if (state.wnext === state.wsize) { state.wnext = 0; }\n if (state.whave < state.wsize) { state.whave += dist; }\n }\n }\n return 0;\n}\n\nfunction inflate(strm, flush) {\n var state;\n var input, output; // input/output buffers\n var next; /* next input INDEX */\n var put; /* next output INDEX */\n var have, left; /* available input and output */\n var hold; /* bit buffer */\n var bits; /* bits in bit buffer */\n var _in, _out; /* save starting available input and output */\n var copy; /* number of stored or match bytes to copy */\n var from; /* where to copy match bytes from */\n var from_source;\n var here = 0; /* current decoding table entry */\n var here_bits, here_op, here_val; // paked \"here\" denormalized (JS specific)\n //var last; /* parent table entry */\n var last_bits, last_op, last_val; // paked \"last\" denormalized (JS specific)\n var len; /* length to copy for repeats, bits to drop */\n var ret; /* return code */\n var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */\n var opts;\n\n var n; // temporary var for NEED_BITS\n\n var order = /* permutation of code lengths */\n [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];\n\n\n if (!strm || !strm.state || !strm.output ||\n (!strm.input && strm.avail_in !== 0)) {\n return Z_STREAM_ERROR;\n }\n\n state = strm.state;\n if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */\n\n\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n _in = have;\n _out = left;\n ret = Z_OK;\n\n inf_leave: // goto emulation\n for (;;) {\n switch (state.mode) {\n case HEAD:\n if (state.wrap === 0) {\n state.mode = TYPEDO;\n break;\n }\n //=== NEEDBITS(16);\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */\n state.check = 0/*crc32(0L, Z_NULL, 0)*/;\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = FLAGS;\n break;\n }\n state.flags = 0; /* expect zlib header */\n if (state.head) {\n state.head.done = false;\n }\n if (!(state.wrap & 1) || /* check if zlib header allowed */\n (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) {\n strm.msg = 'incorrect header check';\n state.mode = BAD;\n break;\n }\n if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n len = (hold & 0x0f)/*BITS(4)*/ + 8;\n if (state.wbits === 0) {\n state.wbits = len;\n }\n else if (len > state.wbits) {\n strm.msg = 'invalid window size';\n state.mode = BAD;\n break;\n }\n state.dmax = 1 << len;\n //Tracev((stderr, \"inflate: zlib header ok\\n\"));\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = hold & 0x200 ? DICTID : TYPE;\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n break;\n case FLAGS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.flags = hold;\n if ((state.flags & 0xff) !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n if (state.flags & 0xe000) {\n strm.msg = 'unknown header flags set';\n state.mode = BAD;\n break;\n }\n if (state.head) {\n state.head.text = ((hold >> 8) & 1);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = TIME;\n /* falls through */\n case TIME:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.time = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC4(state.check, hold)\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n hbuf[2] = (hold >>> 16) & 0xff;\n hbuf[3] = (hold >>> 24) & 0xff;\n state.check = crc32(state.check, hbuf, 4, 0);\n //===\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = OS;\n /* falls through */\n case OS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.xflags = (hold & 0xff);\n state.head.os = (hold >> 8);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = EXLEN;\n /* falls through */\n case EXLEN:\n if (state.flags & 0x0400) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length = hold;\n if (state.head) {\n state.head.extra_len = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n else if (state.head) {\n state.head.extra = null/*Z_NULL*/;\n }\n state.mode = EXTRA;\n /* falls through */\n case EXTRA:\n if (state.flags & 0x0400) {\n copy = state.length;\n if (copy > have) { copy = have; }\n if (copy) {\n if (state.head) {\n len = state.head.extra_len - state.length;\n if (!state.head.extra) {\n // Use untyped array for more conveniend processing later\n state.head.extra = new Array(state.head.extra_len);\n }\n utils.arraySet(\n state.head.extra,\n input,\n next,\n // extra field is limited to 65536 bytes\n // - no need for additional size check\n copy,\n /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/\n len\n );\n //zmemcpy(state.head.extra + len, next,\n // len + copy > state.head.extra_max ?\n // state.head.extra_max - len : copy);\n }\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n state.length -= copy;\n }\n if (state.length) { break inf_leave; }\n }\n state.length = 0;\n state.mode = NAME;\n /* falls through */\n case NAME:\n if (state.flags & 0x0800) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n // TODO: 2 or 1 bytes?\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.name_max*/)) {\n state.head.name += String.fromCharCode(len);\n }\n } while (len && copy < have);\n\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.name = null;\n }\n state.length = 0;\n state.mode = COMMENT;\n /* falls through */\n case COMMENT:\n if (state.flags & 0x1000) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.comm_max*/)) {\n state.head.comment += String.fromCharCode(len);\n }\n } while (len && copy < have);\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.comment = null;\n }\n state.mode = HCRC;\n /* falls through */\n case HCRC:\n if (state.flags & 0x0200) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.check & 0xffff)) {\n strm.msg = 'header crc mismatch';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n if (state.head) {\n state.head.hcrc = ((state.flags >> 9) & 1);\n state.head.done = true;\n }\n strm.adler = state.check = 0;\n state.mode = TYPE;\n break;\n case DICTID:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n strm.adler = state.check = zswap32(hold);\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = DICT;\n /* falls through */\n case DICT:\n if (state.havedict === 0) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n return Z_NEED_DICT;\n }\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = TYPE;\n /* falls through */\n case TYPE:\n if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case TYPEDO:\n if (state.last) {\n //--- BYTEBITS() ---//\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n state.mode = CHECK;\n break;\n }\n //=== NEEDBITS(3); */\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.last = (hold & 0x01)/*BITS(1)*/;\n //--- DROPBITS(1) ---//\n hold >>>= 1;\n bits -= 1;\n //---//\n\n switch ((hold & 0x03)/*BITS(2)*/) {\n case 0: /* stored block */\n //Tracev((stderr, \"inflate: stored block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = STORED;\n break;\n case 1: /* fixed block */\n fixedtables(state);\n //Tracev((stderr, \"inflate: fixed codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = LEN_; /* decode codes */\n if (flush === Z_TREES) {\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break inf_leave;\n }\n break;\n case 2: /* dynamic block */\n //Tracev((stderr, \"inflate: dynamic codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = TABLE;\n break;\n case 3:\n strm.msg = 'invalid block type';\n state.mode = BAD;\n }\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break;\n case STORED:\n //--- BYTEBITS() ---// /* go to byte boundary */\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) {\n strm.msg = 'invalid stored block lengths';\n state.mode = BAD;\n break;\n }\n state.length = hold & 0xffff;\n //Tracev((stderr, \"inflate: stored length %u\\n\",\n // state.length));\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = COPY_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case COPY_:\n state.mode = COPY;\n /* falls through */\n case COPY:\n copy = state.length;\n if (copy) {\n if (copy > have) { copy = have; }\n if (copy > left) { copy = left; }\n if (copy === 0) { break inf_leave; }\n //--- zmemcpy(put, next, copy); ---\n utils.arraySet(output, input, next, copy, put);\n //---//\n have -= copy;\n next += copy;\n left -= copy;\n put += copy;\n state.length -= copy;\n break;\n }\n //Tracev((stderr, \"inflate: stored end\\n\"));\n state.mode = TYPE;\n break;\n case TABLE:\n //=== NEEDBITS(14); */\n while (bits < 14) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4;\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n//#ifndef PKZIP_BUG_WORKAROUND\n if (state.nlen > 286 || state.ndist > 30) {\n strm.msg = 'too many length or distance symbols';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracev((stderr, \"inflate: table sizes ok\\n\"));\n state.have = 0;\n state.mode = LENLENS;\n /* falls through */\n case LENLENS:\n while (state.have < state.ncode) {\n //=== NEEDBITS(3);\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.lens[order[state.have++]] = (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n while (state.have < 19) {\n state.lens[order[state.have++]] = 0;\n }\n // We have separate tables & no pointers. 2 commented lines below not needed.\n //state.next = state.codes;\n //state.lencode = state.next;\n // Switch to use dynamic table\n state.lencode = state.lendyn;\n state.lenbits = 7;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts);\n state.lenbits = opts.bits;\n\n if (ret) {\n strm.msg = 'invalid code lengths set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, \"inflate: code lengths ok\\n\"));\n state.have = 0;\n state.mode = CODELENS;\n /* falls through */\n case CODELENS:\n while (state.have < state.nlen + state.ndist) {\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_val < 16) {\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.lens[state.have++] = here_val;\n }\n else {\n if (here_val === 16) {\n //=== NEEDBITS(here.bits + 2);\n n = here_bits + 2;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n if (state.have === 0) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n len = state.lens[state.have - 1];\n copy = 3 + (hold & 0x03);//BITS(2);\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n }\n else if (here_val === 17) {\n //=== NEEDBITS(here.bits + 3);\n n = here_bits + 3;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 3 + (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n else {\n //=== NEEDBITS(here.bits + 7);\n n = here_bits + 7;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 11 + (hold & 0x7f);//BITS(7);\n //--- DROPBITS(7) ---//\n hold >>>= 7;\n bits -= 7;\n //---//\n }\n if (state.have + copy > state.nlen + state.ndist) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n while (copy--) {\n state.lens[state.have++] = len;\n }\n }\n }\n\n /* handle error breaks in while */\n if (state.mode === BAD) { break; }\n\n /* check for end-of-block code (better have one) */\n if (state.lens[256] === 0) {\n strm.msg = 'invalid code -- missing end-of-block';\n state.mode = BAD;\n break;\n }\n\n /* build code tables -- note: do not change the lenbits or distbits\n values here (9 and 6) without reading the comments in inftrees.h\n concerning the ENOUGH constants, which depend on those values */\n state.lenbits = 9;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.lenbits = opts.bits;\n // state.lencode = state.next;\n\n if (ret) {\n strm.msg = 'invalid literal/lengths set';\n state.mode = BAD;\n break;\n }\n\n state.distbits = 6;\n //state.distcode.copy(state.codes);\n // Switch to use dynamic table\n state.distcode = state.distdyn;\n opts = { bits: state.distbits };\n ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.distbits = opts.bits;\n // state.distcode = state.next;\n\n if (ret) {\n strm.msg = 'invalid distances set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, 'inflate: codes ok\\n'));\n state.mode = LEN_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case LEN_:\n state.mode = LEN;\n /* falls through */\n case LEN:\n if (have >= 6 && left >= 258) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n inflate_fast(strm, _out);\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n if (state.mode === TYPE) {\n state.back = -1;\n }\n break;\n }\n state.back = 0;\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if (here_bits <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_op && (here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.lencode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n state.length = here_val;\n if (here_op === 0) {\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n state.mode = LIT;\n break;\n }\n if (here_op & 32) {\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.back = -1;\n state.mode = TYPE;\n break;\n }\n if (here_op & 64) {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break;\n }\n state.extra = here_op & 15;\n state.mode = LENEXT;\n /* falls through */\n case LENEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", state.length));\n state.was = state.length;\n state.mode = DIST;\n /* falls through */\n case DIST:\n for (;;) {\n here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if ((here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.distcode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n if (here_op & 64) {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break;\n }\n state.offset = here_val;\n state.extra = (here_op) & 15;\n state.mode = DISTEXT;\n /* falls through */\n case DISTEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n//#ifdef INFLATE_STRICT\n if (state.offset > state.dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracevv((stderr, \"inflate: distance %u\\n\", state.offset));\n state.mode = MATCH;\n /* falls through */\n case MATCH:\n if (left === 0) { break inf_leave; }\n copy = _out - left;\n if (state.offset > copy) { /* copy from window */\n copy = state.offset - copy;\n if (copy > state.whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n// (!) This block is disabled in zlib defailts,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// Trace((stderr, \"inflate.c too far\\n\"));\n// copy -= state.whave;\n// if (copy > state.length) { copy = state.length; }\n// if (copy > left) { copy = left; }\n// left -= copy;\n// state.length -= copy;\n// do {\n// output[put++] = 0;\n// } while (--copy);\n// if (state.length === 0) { state.mode = LEN; }\n// break;\n//#endif\n }\n if (copy > state.wnext) {\n copy -= state.wnext;\n from = state.wsize - copy;\n }\n else {\n from = state.wnext - copy;\n }\n if (copy > state.length) { copy = state.length; }\n from_source = state.window;\n }\n else { /* copy from output */\n from_source = output;\n from = put - state.offset;\n copy = state.length;\n }\n if (copy > left) { copy = left; }\n left -= copy;\n state.length -= copy;\n do {\n output[put++] = from_source[from++];\n } while (--copy);\n if (state.length === 0) { state.mode = LEN; }\n break;\n case LIT:\n if (left === 0) { break inf_leave; }\n output[put++] = state.length;\n left--;\n state.mode = LEN;\n break;\n case CHECK:\n if (state.wrap) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n // Use '|' insdead of '+' to make sure that result is signed\n hold |= input[next++] << bits;\n bits += 8;\n }\n //===//\n _out -= left;\n strm.total_out += _out;\n state.total += _out;\n if (_out) {\n strm.adler = state.check =\n /*UPDATE(state.check, put - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out));\n\n }\n _out = left;\n // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too\n if ((state.flags ? hold : zswap32(hold)) !== state.check) {\n strm.msg = 'incorrect data check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: check matches trailer\\n\"));\n }\n state.mode = LENGTH;\n /* falls through */\n case LENGTH:\n if (state.wrap && state.flags) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.total & 0xffffffff)) {\n strm.msg = 'incorrect length check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: length matches trailer\\n\"));\n }\n state.mode = DONE;\n /* falls through */\n case DONE:\n ret = Z_STREAM_END;\n break inf_leave;\n case BAD:\n ret = Z_DATA_ERROR;\n break inf_leave;\n case MEM:\n return Z_MEM_ERROR;\n case SYNC:\n /* falls through */\n default:\n return Z_STREAM_ERROR;\n }\n }\n\n // inf_leave <- here is real place for \"goto inf_leave\", emulated via \"break inf_leave\"\n\n /*\n Return from inflate(), updating the total counts and the check value.\n If there was no progress during the inflate() call, return a buffer\n error. Call updatewindow() to create and/or update the window state.\n Note: a memory error from inflate() is non-recoverable.\n */\n\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n\n if (state.wsize || (_out !== strm.avail_out && state.mode < BAD &&\n (state.mode < CHECK || flush !== Z_FINISH))) {\n if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n }\n _in -= strm.avail_in;\n _out -= strm.avail_out;\n strm.total_in += _in;\n strm.total_out += _out;\n state.total += _out;\n if (state.wrap && _out) {\n strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out));\n }\n strm.data_type = state.bits + (state.last ? 64 : 0) +\n (state.mode === TYPE ? 128 : 0) +\n (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0);\n if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) {\n ret = Z_BUF_ERROR;\n }\n return ret;\n}\n\nfunction inflateEnd(strm) {\n\n if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) {\n return Z_STREAM_ERROR;\n }\n\n var state = strm.state;\n if (state.window) {\n state.window = null;\n }\n strm.state = null;\n return Z_OK;\n}\n\nfunction inflateGetHeader(strm, head) {\n var state;\n\n /* check state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; }\n\n /* save header structure */\n state.head = head;\n head.done = false;\n return Z_OK;\n}\n\nfunction inflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var state;\n var dictid;\n var ret;\n\n /* check state */\n if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n if (state.wrap !== 0 && state.mode !== DICT) {\n return Z_STREAM_ERROR;\n }\n\n /* check for correct dictionary identifier */\n if (state.mode === DICT) {\n dictid = 1; /* adler32(0, null, 0)*/\n /* dictid = adler32(dictid, dictionary, dictLength); */\n dictid = adler32(dictid, dictionary, dictLength, 0);\n if (dictid !== state.check) {\n return Z_DATA_ERROR;\n }\n }\n /* copy dictionary to window using updatewindow(), which will amend the\n existing dictionary if appropriate */\n ret = updatewindow(strm, dictionary, dictLength, dictLength);\n if (ret) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n state.havedict = 1;\n // Tracev((stderr, \"inflate: dictionary set\\n\"));\n return Z_OK;\n}\n\nexports.inflateReset = inflateReset;\nexports.inflateReset2 = inflateReset2;\nexports.inflateResetKeep = inflateResetKeep;\nexports.inflateInit = inflateInit;\nexports.inflateInit2 = inflateInit2;\nexports.inflate = inflate;\nexports.inflateEnd = inflateEnd;\nexports.inflateGetHeader = inflateGetHeader;\nexports.inflateSetDictionary = inflateSetDictionary;\nexports.inflateInfo = 'pako inflate (from Nodeca project)';\n\n/* Not implemented\nexports.inflateCopy = inflateCopy;\nexports.inflateGetDictionary = inflateGetDictionary;\nexports.inflateMark = inflateMark;\nexports.inflatePrime = inflatePrime;\nexports.inflateSync = inflateSync;\nexports.inflateSyncPoint = inflateSyncPoint;\nexports.inflateUndermine = inflateUndermine;\n*/\n","'use strict';\n\n\nmodule.exports = {\n\n /* Allowed flush values; see deflate() and inflate() below for details */\n Z_NO_FLUSH: 0,\n Z_PARTIAL_FLUSH: 1,\n Z_SYNC_FLUSH: 2,\n Z_FULL_FLUSH: 3,\n Z_FINISH: 4,\n Z_BLOCK: 5,\n Z_TREES: 6,\n\n /* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\n Z_OK: 0,\n Z_STREAM_END: 1,\n Z_NEED_DICT: 2,\n Z_ERRNO: -1,\n Z_STREAM_ERROR: -2,\n Z_DATA_ERROR: -3,\n //Z_MEM_ERROR: -4,\n Z_BUF_ERROR: -5,\n //Z_VERSION_ERROR: -6,\n\n /* compression levels */\n Z_NO_COMPRESSION: 0,\n Z_BEST_SPEED: 1,\n Z_BEST_COMPRESSION: 9,\n Z_DEFAULT_COMPRESSION: -1,\n\n\n Z_FILTERED: 1,\n Z_HUFFMAN_ONLY: 2,\n Z_RLE: 3,\n Z_FIXED: 4,\n Z_DEFAULT_STRATEGY: 0,\n\n /* Possible values of the data_type field (though see inflate()) */\n Z_BINARY: 0,\n Z_TEXT: 1,\n //Z_ASCII: 1, // = Z_TEXT (deprecated)\n Z_UNKNOWN: 2,\n\n /* The deflate compression method */\n Z_DEFLATED: 8\n //Z_NULL: null // Use -1 or null inline, depending on var type\n};\n","'use strict';\n\n\nfunction GZheader() {\n /* true if compressed data believed to be text */\n this.text = 0;\n /* modification time */\n this.time = 0;\n /* extra flags (not used when writing a gzip file) */\n this.xflags = 0;\n /* operating system */\n this.os = 0;\n /* pointer to extra field or Z_NULL if none */\n this.extra = null;\n /* extra field length (valid if extra != Z_NULL) */\n this.extra_len = 0; // Actually, we don't need it in JS,\n // but leave for few code modifications\n\n //\n // Setup limits is not necessary because in js we should not preallocate memory\n // for inflate use constant limit in 65536 bytes\n //\n\n /* space at extra (only when reading header) */\n // this.extra_max = 0;\n /* pointer to zero-terminated file name or Z_NULL */\n this.name = '';\n /* space at name (only when reading header) */\n // this.name_max = 0;\n /* pointer to zero-terminated comment or Z_NULL */\n this.comment = '';\n /* space at comment (only when reading header) */\n // this.comm_max = 0;\n /* true if there was or will be a header crc */\n this.hcrc = 0;\n /* true when done reading gzip header (not used when writing a gzip file) */\n this.done = false;\n}\n\nmodule.exports = GZheader;\n","'use strict';\n\n\nvar zlib_inflate = require('./zlib/inflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar c = require('./zlib/constants');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\nvar GZheader = require('./zlib/gzheader');\n\nvar toString = Object.prototype.toString;\n\n/**\n * class Inflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[inflate]]\n * and [[inflateRaw]].\n **/\n\n/* internal\n * inflate.chunks -> Array\n *\n * Chunks of output data, if [[Inflate#onData]] not overriden.\n **/\n\n/**\n * Inflate.result -> Uint8Array|Array|String\n *\n * Uncompressed result, generated by default [[Inflate#onData]]\n * and [[Inflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Inflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Inflate.err -> Number\n *\n * Error code after inflate finished. 0 (Z_OK) on success.\n * Should be checked if broken data possible.\n **/\n\n/**\n * Inflate.msg -> String\n *\n * Error message, if [[Inflate.err]] != 0\n **/\n\n\n/**\n * new Inflate(options)\n * - options (Object): zlib inflate options.\n *\n * Creates new inflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `windowBits`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw inflate\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n * By default, when no options set, autodetect deflate/gzip data format via\n * wrapper header.\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var inflate = new pako.Inflate({ level: 3});\n *\n * inflate.push(chunk1, false);\n * inflate.push(chunk2, true); // true -> last chunk\n *\n * if (inflate.err) { throw new Error(inflate.err); }\n *\n * console.log(inflate.result);\n * ```\n **/\nfunction Inflate(options) {\n if (!(this instanceof Inflate)) return new Inflate(options);\n\n this.options = utils.assign({\n chunkSize: 16384,\n windowBits: 0,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n // Force window size for `raw` data, if not set directly,\n // because we have no header for autodetect.\n if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) {\n opt.windowBits = -opt.windowBits;\n if (opt.windowBits === 0) { opt.windowBits = -15; }\n }\n\n // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate\n if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&\n !(options && options.windowBits)) {\n opt.windowBits += 32;\n }\n\n // Gzip header has no info about windows size, we can do autodetect only\n // for deflate. So, if window size not set, force it to max when gzip possible\n if ((opt.windowBits > 15) && (opt.windowBits < 48)) {\n // bit 3 (16) -> gzipped data\n // bit 4 (32) -> autodetect gzip/deflate\n if ((opt.windowBits & 15) === 0) {\n opt.windowBits |= 15;\n }\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_inflate.inflateInit2(\n this.strm,\n opt.windowBits\n );\n\n if (status !== c.Z_OK) {\n throw new Error(msg[status]);\n }\n\n this.header = new GZheader();\n\n zlib_inflate.inflateGetHeader(this.strm, this.header);\n}\n\n/**\n * Inflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.\n *\n * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with\n * new output chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the decompression context.\n *\n * On fail call [[Inflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nInflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var dictionary = this.options.dictionary;\n var status, _mode;\n var next_out_utf8, tail, utf8str;\n var dict;\n\n // Flag to properly process Z_BUF_ERROR on testing inflate call\n // when we check that all output data was flushed.\n var allowBufError = false;\n\n if (this.ended) { return false; }\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // Only binary strings can be decompressed on practice\n strm.input = strings.binstring2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n\n status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */\n\n if (status === c.Z_NEED_DICT && dictionary) {\n // Convert data if needed\n if (typeof dictionary === 'string') {\n dict = strings.string2buf(dictionary);\n } else if (toString.call(dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(dictionary);\n } else {\n dict = dictionary;\n }\n\n status = zlib_inflate.inflateSetDictionary(this.strm, dict);\n\n }\n\n if (status === c.Z_BUF_ERROR && allowBufError === true) {\n status = c.Z_OK;\n allowBufError = false;\n }\n\n if (status !== c.Z_STREAM_END && status !== c.Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n\n if (strm.next_out) {\n if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) {\n\n if (this.options.to === 'string') {\n\n next_out_utf8 = strings.utf8border(strm.output, strm.next_out);\n\n tail = strm.next_out - next_out_utf8;\n utf8str = strings.buf2string(strm.output, next_out_utf8);\n\n // move tail\n strm.next_out = tail;\n strm.avail_out = chunkSize - tail;\n if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); }\n\n this.onData(utf8str);\n\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n }\n\n // When no more input data, we should check that internal inflate buffers\n // are flushed. The only way to do it when avail_out = 0 - run one more\n // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.\n // Here we set flag to process this error properly.\n //\n // NOTE. Deflate does not return error in this case and does not needs such\n // logic.\n if (strm.avail_in === 0 && strm.avail_out === 0) {\n allowBufError = true;\n }\n\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END);\n\n if (status === c.Z_STREAM_END) {\n _mode = c.Z_FINISH;\n }\n\n // Finalize on the last chunk.\n if (_mode === c.Z_FINISH) {\n status = zlib_inflate.inflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === c.Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === c.Z_SYNC_FLUSH) {\n this.onEnd(c.Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Inflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): ouput data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nInflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Inflate#onEnd(status) -> Void\n * - status (Number): inflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called either after you tell inflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nInflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === c.Z_OK) {\n if (this.options.to === 'string') {\n // Glue & convert here, until we teach pako to send\n // utf8 alligned strings to onData\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * inflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Decompress `data` with inflate/ungzip and `options`. Autodetect\n * format via wrapper header by default. That's why we don't provide\n * separate `ungzip` method.\n *\n * Supported options are:\n *\n * - windowBits\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , input = pako.deflate([1,2,3,4,5,6,7,8,9])\n * , output;\n *\n * try {\n * output = pako.inflate(input);\n * } catch (err)\n * console.log(err);\n * }\n * ```\n **/\nfunction inflate(input, options) {\n var inflator = new Inflate(options);\n\n inflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (inflator.err) { throw inflator.msg || msg[inflator.err]; }\n\n return inflator.result;\n}\n\n\n/**\n * inflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * The same as [[inflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction inflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return inflate(input, options);\n}\n\n\n/**\n * ungzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Just shortcut to [[inflate]], because it autodetects format\n * by header.content. Done for convenience.\n **/\n\n\nexports.Inflate = Inflate;\nexports.inflate = inflate;\nexports.inflateRaw = inflateRaw;\nexports.ungzip = inflate;\n","// Top level file is just a mixin of submodules & constants\n'use strict';\n\nvar assign = require('./lib/utils/common').assign;\n\nvar deflate = require('./lib/deflate');\nvar inflate = require('./lib/inflate');\nvar constants = require('./lib/zlib/constants');\n\nvar pako = {};\n\nassign(pako, deflate, inflate, constants);\n\nmodule.exports = pako;\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport md5 from 'js-md5'\nimport pako from 'pako'\nimport { Filter } from '../core/Filter.js';\n\n\n/**\n* Takes the File inputs from a HTML input of type \"file\" (aka. a file dialog), and reads it as a ArrayBuffer.\n* Every File given in input should be added separately using `addInput( file[i], 'uniqueID' )`.\n* The event \"ready\" must be set up ( using .on(\"ready\", function(){}) ) and will\n* be triggered when all the files given in input are translated into ArrayBuffers.\n* Once ready, all the outputs are accecible using the same uniqueID with the\n* method `getOutput(\"uniqueID\")`.\n* Gzip compressed files will be uncompressed.\n*\n* Once the filter is *updated*, you can query the `filenames` metadata (sorted by categories)\n* and also the `checksums` metadata using `.getMetadata()`. This later metadata \n* give a unique *md5*, very convenient to compare if two files are actually the same.\n* Note that in case the file is *gziped*, the checksum is computed on the raw file,\n* not on the *un-gziped* buffer.\n*\n* **Usage**\n* - [examples/fileToArrayBuffer.html](../examples/fileToArrayBuffer.html)\n*/\nclass FileToArrayBufferReader extends Filter {\n\n constructor(){\n super();\n this._outputCounter = 0;\n \n // filenames by categories\n this.setMetadata(\"filenames\", {});\n \n // md5 checksum by categories\n this.setMetadata(\"checksums\", {});\n }\n\n\n _run(){\n var that = this;\n this._outputCounter = 0;\n var inputCategories = this.getInputCategories();\n\n inputCategories.forEach( function(category){\n that._loadFile( category );\n })\n }\n\n\n /**\n * [PRIVATE]\n * Perform the loading for the input of the given category\n * @param {String} category - input category\n */\n _loadFile( category ){\n var that = this;\n var reader = new FileReader();\n\n reader.onloadend = function(event) {\n var result = event.target.result;\n \n var filename = that._getInput(category).name;\n var basename = filename.split(/[\\\\/]/).pop();\n var extension = basename.split('.').pop();\n var checksum = md5( result );\n \n // few metadata for recognizing files (potentially)\n that._metadata.filenames[ category ] = basename;\n that._metadata.checksums[ category ] = checksum;\n\n if( extension.localeCompare(\"pixp\") ){\n // trying to un-gzip it with Pako\n try {\n result = pako.inflate(result).buffer;\n console.log(\"File was un-gziped successfully\");\n } catch (err) {\n console.log(\"Pako: \" + err + \" (this content is not gziped)\");\n }\n }\n \n that._output[ category ] = result;\n that._fileLoadCount();\n }\n\n reader.onerror = function() {\n this._output[ category ] = null;\n that._fileLoadCount();\n console.warn( \"error reading file from category \" + category );\n //throw new Error(error_message);\n };\n\n reader.readAsArrayBuffer( this._getInput(category) );\n }\n\n\n /**\n * [PRIVATE]\n * Launch the \"ready\" event if all files are loaded\n */\n _fileLoadCount(){\n var that = this;\n this._outputCounter ++;\n\n if( this._outputCounter == this.getNumberOfInputs() ){\n that.triggerEvent(\"ready\");\n }\n }\n\n} /* END of class FileToArrayBufferReader */\n\n\nexport { FileToArrayBufferReader }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport pako from 'pako'\nimport md5 from 'js-md5'\nimport { Filter } from '../core/Filter.js';\n\n\n/**\n* Open a files as ArrayBuffer using their URL. You must specify one or several URL\n* (String) using `addInput(\"...\")` and add function to the event \"ready\" using\n* `.on( \"ready\", function(filter){ ... })`.\n* The \"ready\" event will be called only when all input are loaded.\n* Gzip compressed files will be uncompressed.\n* Once the filter is *updated*, you can query the `filenames` metadata (sorted by categories)\n* and also the `checksums` metadata using `.getMetadata()`. This later metadata \n* give a unique *md5*, very convenient to compare if two files are actually the same.\n* Note that in case the file is *gziped*, the checksum is computed on the raw file,\n* not on the *un-gziped* buffer.\n*\n* **Usage**\n* - [examples/urlFileToArrayBuffer.html](../examples/urlFileToArrayBuffer.html)\n*/\nclass UrlToArrayBufferReader extends Filter {\n\n constructor(){\n super();\n this._outputCounter = 0;\n \n // filenames by categories\n this.setMetadata(\"filenames\", {});\n \n // md5 checksum by categories\n this.setMetadata(\"checksums\", {});\n }\n\n\n _run(){\n var that = this;\n\n if(! this.getNumberOfInputs() ){\n console.warn(\"No input was specified, cannot run this filer.\");\n return;\n }\n\n\n this._forEachInput( function(category, input){\n that._loadUrl(category, input)\n });\n\n }\n\n\n /**\n * [PRIVATE]\n * Perform a XMLHttpRequest with the given url and adds it to the output\n */\n _loadUrl( category, url ){\n var that = this;\n\n var xhr = new XMLHttpRequest();\n xhr.open(\"GET\", url, true);\n xhr.responseType = \"arraybuffer\";\n\n xhr.onload = function(event) {\n var arrayBuff = xhr.response;\n \n var basename = url.split(/[\\\\/]/).pop();\n var extension = basename.split('.').pop();\n var checksum = md5( arrayBuff );\n \n // few metadata for recognizing files (potentially)\n that._metadata.filenames[ category ] = basename;\n that._metadata.checksums[ category ] = checksum;\n\n // trying to un-gzip it with Pako for non pixp files\n if( extension.localeCompare(\"pixp\") ){\n try {\n arrayBuff = pako.inflate(arrayBuff).buffer;\n console.log(\"File was un-gziped successfully\");\n } catch (err) {\n console.log(\"Pako: \" + err + \" (this content is not gziped)\");\n }\n }\n \n that._output[ category ] = arrayBuff\n \n\n that._outputCounter ++;\n\n if( that._outputCounter == that.getNumberOfInputs()){\n that.triggerEvent(\"ready\");\n }\n };\n\n xhr.error = function(){\n console.log(\"here go the error\");\n }\n\n xhr.send();\n }\n\n\n} /* END of class UrlToArrayBufferReader */\n\nexport { UrlToArrayBufferReader }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* Robert D. Vincent\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako'\nimport { Filter } from '../core/Filter.js';\nimport { MniVolume } from '../core/MniVolume.js';\n\n\n/**\n* Decode a HDF5 file, but is most likely to be restricted to the features that are\n* used for Minc2 file format.\n* The metadata \"debug\" can be set to true to\n* enable a verbose mode.\n* Takes an ArrayBuffer as input (0) and output a `MniVolume` (which inherit `Image3D`).\n*\n* **Usage**\n* - [examples/fileToMinc2.html](../examples/fileToMinc2.html)\n*/\nclass Minc2Decoder extends Filter{\n\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n\n this.setMetadata(\"debug\", false);\n\n this._type_enum = {\n INT8: 1,\n UINT8: 2,\n INT16: 3,\n UINT16: 4,\n INT32: 5,\n UINT32: 6,\n FLT: 7,\n DBL: 8,\n STR: 9\n };\n\n this._type_matching = [\n \"int8\",\n \"uint8\",\n \"int16\",\n \"uint16\",\n \"int32\",\n \"uint32\",\n \"float32\",\n \"float64\",\n \"undef\" // STR type is not compatible with minc\n // we deal rgb8 manually\n ];\n\n this.type_sizes = [0, 1, 1, 2, 2, 4, 4, 4, 8, 0];\n\n this._dv_offset = 0;\n this._align = 8;\n this._little_endian = true;\n this._continuation_queue = [];\n this._dv = null;//new DataView(abuf);\n this._superblk = {};\n this._start_offset = 0;\n this._huge_id = 0;\n\n }\n\n /**\n * [PRIVATE]\n */\n createLink() {\n var r = {};\n // internal/private\n r.hdr_offset = 0; // offset to object header.\n r.data_offset = 0; // offset to actual data.\n r.data_length = 0; // length of data.\n r.n_filled = 0; // counts elements written to array\n r.chunk_size = 0; // size of chunks\n r.sym_btree = 0; // offset of symbol table btree\n r.sym_lheap = 0; // offset of symbol table local heap\n // permanent/global\n r.name = \"\"; // name of this group or dataset.\n r.attributes = {}; // indexed by attribute name.\n r.children = []; // not associative for now.\n r.array = undefined; // actual data, if dataset.\n r.type = -1; // type of data.\n r.inflate = false; // true if need to inflate (gzip).\n r.dims = []; // dimension sizes.\n return r;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Turns out that alignment of the messages in at least the\n * version 1 object header is actually relative to the start\n * of the header. So we update the start position of the\n * header here, so we can refer to it when calculating the\n * alignment in this.checkAlignment().\n */\n startAlignment() {\n this._start_offset = this._dv_offset;\n }\n\n\n /**\n * [PRIVATE]\n */\n checkAlignment() {\n var tmp = this._dv_offset - this._start_offset;\n if ((tmp % this._align) !== 0) {\n var n = this._align - (tmp % this._align);\n this._dv_offset += n;\n if (this.getMetadata(\"debug\")) {\n console.log('skipping ' + n + ' bytes at ' + tmp + ' for alignmnent');\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * helper functions to manipulate the current DataView offset.\n */\n skip(n_bytes) {\n this._dv_offset += n_bytes;\n }\n\n\n /**\n * [PRIVATE]\n */\n seek(new_offset) {\n this._dv_offset = new_offset;\n }\n\n\n /**\n * [PRIVATE]\n */\n tell() {\n return this._dv_offset;\n }\n\n\n /**\n * [PRIVATE]\n *\n * helper functions for access to our DataView.\n */\n getU8() {\n var v = this._dv.getUint8(this._dv_offset);\n this._dv_offset += 1;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getU16() {\n var v = this._dv.getUint16(this._dv_offset, this._little_endian);\n this._dv_offset += 2;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getU32() {\n var v = this._dv.getUint32(this._dv_offset, this._little_endian);\n this._dv_offset += 4;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getU64() {\n var v = this._dv.getUint64(this._dv_offset, this._little_endian);\n this._dv_offset += 8;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getF32() {\n var v = this._dv.getFloat32(this._dv_offset, this._little_endian);\n this._dv_offset += 4;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getF64() {\n var v = this._dv.getFloat64(this._dv_offset, this._little_endian);\n this._dv_offset += 8;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getOffset(offsz) {\n var v = 0;\n offsz = offsz || this._superblk.offsz;\n if (offsz === 4) {\n v = this._dv.getUint32(this._dv_offset, this._little_endian);\n } else if (offsz === 8) {\n v = this._dv.getUint64(this._dv_offset, this._little_endian);\n } else {\n throw new Error('Unsupported value for offset size ' + offsz);\n }\n this._dv_offset += offsz;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getLength() {\n var v = this._dv.getUint64(this._dv_offset, this._little_endian);\n this._dv_offset += this._superblk.lensz;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getString(length) {\n var r = \"\";\n var i;\n var c;\n for (i = 0; i < length; i += 1) {\n c = this.getU8();\n if (c === 0) {\n this._dv_offset += (length - i - 1);\n break;\n }\n r += String.fromCharCode(c);\n }\n return r;\n }\n\n\n /**\n * [PRIVATE]\n */\n getArray(typ, n_bytes, new_off) {\n var value;\n var n_values;\n var new_abuf;\n var abuf = this._getInput();\n var i;\n var spp = this._dv_offset;\n if (new_off) {\n this._dv_offset = new_off;\n }\n switch (typ) {\n case this._type_enum.INT8:\n value = new Int8Array(abuf, this._dv_offset, n_bytes);\n break;\n case this._type_enum.UINT8:\n value = new Uint8Array(abuf, this._dv_offset, n_bytes);\n break;\n case this._type_enum.INT16:\n if ((this._dv_offset % 2) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 2;\n value = new Int16Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU16();\n }\n } else {\n value = new Int16Array(abuf, this._dv_offset, n_bytes / 2);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.UINT16:\n if ((this._dv_offset % 2) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 2;\n value = new Uint16Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU16();\n }\n } else {\n value = new Uint16Array(abuf, this._dv_offset, n_bytes / 2);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.INT32:\n if ((this._dv_offset % 4) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 4;\n value = new Int32Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU32();\n }\n } else {\n value = new Int32Array(abuf, this._dv_offset, n_bytes / 4);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.UINT32:\n if ((this._dv_offset % 4) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 4;\n value = new Uint32Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU32();\n }\n } else {\n value = new Uint32Array(abuf, this._dv_offset, n_bytes / 4);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.FLT:\n if ((this._dv_offset % 4) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 4;\n value = new Float32Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getF32();\n }\n } else {\n value = new Float32Array(abuf, this._dv_offset, n_bytes / 4);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.DBL:\n if ((this._dv_offset % 8) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 8;\n value = new Float64Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getF64();\n }\n } else {\n value = new Float64Array(abuf, this._dv_offset, n_bytes / 8);\n this._dv_offset += n_bytes;\n }\n break;\n default:\n throw new Error('Bad type in this.getArray ' + typ);\n }\n if (new_off) {\n this._dv_offset = spp;\n }\n return value;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Get a variably-sized integer from the DataView.\n */\n getUXX(n) {\n var v;\n var i;\n switch (n) {\n case 1:\n v = this._dv.getUint8(this._dv_offset);\n break;\n case 2:\n v = this._dv.getUint16(this._dv_offset, this._little_endian);\n break;\n case 4:\n v = this._dv.getUint32(this._dv_offset, this._little_endian);\n break;\n case 8:\n v = this._dv.getUint64(this._dv_offset, this._little_endian);\n break;\n default:\n /* Certain hdf5 types can have odd numbers of bytes. We try\n * to deal with that special case here.\n */\n v = 0;\n if (!this._little_endian) {\n for (i = 0; i < n; i++) {\n v = (v << 8) + this._dv.getUint8(this._dv_offset + i);\n }\n }\n else {\n for (i = n - 1; i >= 0; i--) {\n v = (v << 8) + this._dv.getUint8(this._dv_offset + i);\n }\n }\n }\n this._dv_offset += n;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Verify that the expected signature is found at this offset.\n */\n checkSignature(str) {\n var i;\n for (i = 0; i < str.length; i += 1) {\n if (this._dv.getUint8(this._dv_offset + i) !== str.charCodeAt(i)) {\n return false;\n }\n }\n this.skip(str.length);\n return true;\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5Superblock() {\n var sb = {};\n if (!this.checkSignature(\"\\u0089HDF\\r\\n\\u001A\\n\")) {\n throw new Error('Bad magic string in HDF5');\n }\n sb.sbver = this.getU8();\n if (sb.sbver > 2) {\n throw new Error('Unsupported HDF5 superblock version ' + sb.sbver);\n }\n if (sb.sbver <= 1) {\n sb.fsver = this.getU8();\n sb.rgver = this.getU8();\n this.skip(1); // reserved\n sb.shver = this.getU8();\n sb.offsz = this.getU8();\n sb.lensz = this.getU8();\n this.skip(1); // reserved\n sb.gln_k = this.getU16();\n sb.gin_k = this.getU16();\n sb.cflags = this.getU32();\n if (sb.sbver === 1) {\n sb.isin_k = this.getU16();\n this.skip(2); // reserved\n }\n sb.base_addr = this.getOffset(sb.offsz);\n sb.gfsi_addr = this.getOffset(sb.offsz);\n sb.eof_addr = this.getOffset(sb.offsz);\n sb.dib_addr = this.getOffset(sb.offsz);\n sb.root_ln_offs = this.getOffset(sb.offsz);\n sb.root_addr = this.getOffset(sb.offsz);\n sb.root_cache_type = this.getU32();\n this.skip(4);\n this.skip(16);\n } else {\n sb.offsz = this.getU8();\n sb.lensz = this.getU8();\n sb.cflags = this.getU8();\n sb.base_addr = this.getOffset(sb.offsz);\n sb.ext_addr = this.getOffset(sb.offsz);\n sb.eof_addr = this.getOffset(sb.offsz);\n sb.root_addr = this.getOffset(sb.offsz);\n sb.checksum = this.getU32();\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"HDF5 SB \" + sb.sbver + \" \" + sb.offsz + \" \" + sb.lensz + \" \" + sb.cflags);\n }\n return sb;\n }\n\n\n /**\n * [PRIVATE]\n *\n * read the v2 fractal heap header\n */\n hdf5FractalHeapHeader() {\n var fh = {};\n if (!this.checkSignature(\"FRHP\")) {\n throw new Error('Bad or missing FRHP signature');\n }\n fh.ver = this.getU8(); // Version\n fh.idlen = this.getU16(); // Heap ID length\n fh.iof_el = this.getU16(); // I/O filter's encoded length\n fh.flags = this.getU8(); // Flags\n fh.objmax = this.getU32(); // Maximum size of managed objects.\n fh.objnid = this.getLength(); // Next huge object ID\n fh.objbta = this.getOffset(); // v2 B-tree address of huge objects\n fh.nf_blk = this.getLength(); // Amount of free space in managed blocks\n fh.af_blk = this.getOffset(); // Address of managed block free space manager\n fh.heap_total = this.getLength(); // Amount of managed space in heap\n fh.heap_alloc = this.getLength(); // Amount of allocated managed space in heap\n fh.bai_offset = this.getLength(); // Offset of direct block allocation iterator\n fh.heap_nobj = this.getLength(); // Number of managed objects in heap\n fh.heap_chuge = this.getLength(); // Size of huge objects in heap\n fh.heap_nhuge = this.getLength(); // Number of huge objects in heap\n fh.heap_ctiny = this.getLength(); // Size of tiny objects in heap\n fh.heap_ntiny = this.getLength(); // Number of tiny objects in heap\n fh.table_width = this.getU16(); // Table width\n fh.start_blksz = this.getLength(); // Starting block size\n fh.max_blksz = this.getLength(); // Maximum direct block size\n fh.max_heapsz = this.getU16(); // Maximum heap size\n fh.rib_srows = this.getU16(); // Starting # of rows in root indirect block\n fh.root_addr = this.getOffset(); // Address of root block\n fh.rib_crows = this.getU16(); // Current # of rows in root indirect block\n\n var max_dblock_rows = Math.log2(fh.max_blksz) - Math.log2(fh.start_blksz) + 2;\n fh.K = Math.min(fh.rib_crows, max_dblock_rows) * fh.table_width;\n fh.N = (fh.rib_crows < max_dblock_rows) ? 0 : fh.K - (max_dblock_rows * fh.table_width);\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"FRHP V\" + fh.ver + \" F\" + fh.flags + \" \" + fh.objbta + \" Total:\" + fh.heap_total + \" Alloc:\" + fh.heap_alloc + \" #obj:\" + fh.heap_nobj + \" width:\" + fh.table_width + \" start_blksz:\" + fh.start_blksz + \" max_blksz:\" + fh.max_blksz + \" \" + fh.max_heapsz + \" srows:\" + fh.rib_srows + \" crows:\" + fh.rib_crows + \" \" + fh.heap_nhuge);\n console.log(\" K: \" + fh.K + \" N: \" + fh.N);\n }\n\n if (fh.iof_el > 0) {\n throw new Error(\"Filters present in fractal heap.\");\n }\n return fh;\n }\n\n\n /**\n * [PRIVATE]\n *\n * read the v2 btree header\n */\n hdf5V2BtreeHeader() {\n var bh = {};\n if (!this.checkSignature(\"BTHD\")) {\n throw new Error('Bad or missing BTHD signature');\n }\n bh.ver = this.getU8();\n bh.type = this.getU8();\n bh.nodesz = this.getU32();\n bh.recsz = this.getU16();\n bh.depth = this.getU16();\n bh.splitp = this.getU8();\n bh.mergep = this.getU8();\n bh.root_addr = this.getOffset();\n bh.root_nrec = this.getU16();\n bh.total_nrec = this.getLength();\n bh.checksum = this.getU32();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTHD V\" + bh.ver + \" T\" + bh.type + \" \" + bh.nodesz + \" \" + bh.recsz + \" \" + bh.depth + \" \" + bh.root_addr + \" \" + bh.root_nrec + \" \" + bh.total_nrec);\n }\n return bh;\n }\n\n\n\n /**\n * [PRIVATE]\n *\n * Enumerates btree records in a block. Records are found both in direct\n * and indirect v2 btree blocks.\n */\n hdf5V2BtreeRecords(fh, bt_type, nrec, link) {\n var i;\n var spp; // saved position pointer\n var offset;\n var length;\n if (bt_type === 1) {\n for (i = 0; i < nrec; i++) {\n offset = this.getOffset();\n length = this.getLength();\n var id = this.getLength();\n if (this.getMetadata(\"debug\")) {\n console.log(\" -> \" + offset + \" \" + length + \" \" + id + \" \" + this._this._huge_id);\n }\n spp = this.tell();\n if (id === this._this._huge_id) {\n this.seek(offset);\n this.hdf5MsgAttribute(length, link);\n }\n this.seek(spp);\n }\n }\n else if (bt_type === 8) {\n var cb_offs;\n var cb_leng;\n /* maximum heap size is stored in bits! */\n cb_offs = fh.max_heapsz / 8;\n var tmp = Math.min(fh.objmax, fh.max_blksz);\n if (tmp <= 256) {\n cb_leng = 1;\n }\n else if (tmp <= 65536) {\n cb_leng = 2;\n }\n else {\n cb_leng = 4;\n }\n for (i = 0; i < nrec; i++) {\n /* Read managed fractal heap ID.\n */\n var vt = this.getU8();\n if ((vt & 0xc0) !== 0) {\n throw new Error('Bad Fractal Heap ID version ' + vt);\n }\n var id_type = (vt & 0x30);\n var flags;\n if (id_type === 0x10) { // huge!\n this._this._huge_id = this.getUXX(7);\n }\n else if (id_type === 0x00) { // managed.\n offset = this.getUXX(cb_offs);\n length = this.getUXX(cb_leng);\n }\n else {\n throw new Error(\"Can't handle this Heap ID: \" + vt);\n }\n flags = this.getU8();\n\n /* Read the rest of the record.\n */\n this.getU32(); // creation order (IGNORE)\n this.getU32(); // hash (IGNORE)\n if (this.getMetadata(\"debug\")) {\n console.log(\" -> \" + vt + \" \" + offset + \" \" + length + \" \" + flags);\n }\n spp = this.tell();\n if (id_type === 0x10) {\n /* A \"huge\" object is found by indexing through the btree\n * present in the header\n */\n this.seek(fh.objbta);\n var bh = this.hdf5V2BtreeHeader();\n if (bh.type === 1) {\n this.seek(bh.root_addr);\n this.hdf5V2BtreeLeafNode(fh, bh.root_nrec, link);\n }\n else {\n throw new Error(\"Can only handle type-1 btrees\");\n }\n }\n else {\n /*\n * A managed object implies that the attribute message is\n * found in the associated fractal heap at the specified\n * offset in the heap. We get the actual address\n * corresponding to the offset here.\n */\n var location = this.hdf5FractalHeapOffset(fh, offset);\n this.seek(location);\n this.hdf5MsgAttribute(length, link);\n }\n this.seek(spp);\n }\n }\n else {\n throw new Error(\"Unhandled V2 btree type.\");\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * read a v2 btree leaf node\n */\n hdf5V2BtreeLeafNode(fh, nrec, link) {\n\n if (!this.checkSignature(\"BTLF\")) {\n throw new Error('Bad or missing BTLF signature');\n }\n\n var ver = this.getU8();\n var typ = this.getU8();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTLF V\" + ver + \" T\" + typ + \" \" + this.tell());\n }\n this.hdf5V2BtreeRecords(fh, typ, nrec, link);\n }\n\n\n /**\n * [PRIVATE]\n *\n * read the hdf5 v2 btree internal node\n */\n hdf5V2BtreeInternalNode(fh, nrec, depth, link) {\n\n if (!this.checkSignature(\"BTIN\")) {\n throw new Error('Bad or missing BTIN signature');\n }\n var ver = this.getU8();\n var type = this.getU8();\n var i;\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTIN V\" + ver + \" T\" + type);\n }\n this.hdf5V2BtreeRecords(fh, type, nrec, link);\n for (i = 0; i <= nrec; i++) {\n var child_offset = this.getOffset();\n var child_nrec = this.getUXX(1); // TODO: calculate real size!!\n var child_total;\n /* TODO: unfortunately, this field is optional and\n * variably-sized. Calculating the size is non-trivial, as it\n * depends on the total depth and size of the tree. For now\n * we will just assume it is its minimum size, as I've never\n * encountered a file with depth > 1 anyway.\n */\n if (depth > 1) {\n child_total = this.getUXX(1);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\" child->\" + child_offset + \" \" + child_nrec + \" \" + child_total);\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5GetMsgName(n) {\n\n // JO: used to be in the global scope.\n /* Names of the various HDF5 messages.\n * Note that MESSAGE23 appears to be illegal. All the rest are defined,\n * although I've never encountered a BOGUS message!\n */\n var msg_names = [\n \"NIL\", \"Dataspace\", \"LinkInfo\", \"Datatype\", \"FillValue 1\", \"FillValue 2\",\n \"Link\", \"ExternalFiles\", \"Layout\", \"BOGUS\", \"GroupInfo\", \"FilterPipeline\",\n \"Attribute\", \"ObjectComment\", \"ObjectModTime 1\", \"SharedMsgTable\",\n \"ObjHdrContinue\", \"SymbolTable\", \"ObjectModTime 2\", \"BtreeKValue\",\n \"DriverInfo\", \"AttrInfo\", \"ObjectRefCnt\", \"MESSAGE23\",\n \"FileSpaceInfo\"\n ];\n\n if (n < msg_names.length) {\n return msg_names[n];\n }\n throw new Error('Unknown message type ' + n + \" \" + this.tell());\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5V1BtreeNode(link) {\n var abuf = this._getInput();\n var i;\n var bt = {};\n if (!this.checkSignature(\"TREE\")) {\n throw new Error('Bad TREE signature at ' + this.tell());\n }\n\n bt.keys = [];\n\n bt.node_type = this.getU8();\n bt.node_level = this.getU8();\n bt.entries_used = this.getU16();\n bt.left_sibling = this.getOffset();\n bt.right_sibling = this.getOffset();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTREE type \" + bt.node_type + \" lvl \" +\n bt.node_level + \" n_used \" + bt.entries_used + \" \" +\n bt.left_sibling + \" \" + bt.right_sibling);\n }\n\n if (!link) {\n /* If this BTREE is associated with a group (not a dataset),\n * then its keys are single \"length\" value.\n */\n for (i = 0; i < bt.entries_used; i += 1) {\n bt.keys[i] = {};\n bt.keys[i].key_value = this.getLength();\n bt.keys[i].child_address = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n console.log(\" BTREE \" + i + \" key \" +\n bt.keys[i].key_value + \" adr \" +\n bt.keys[i].child_address);\n }\n }\n } else {\n var j;\n\n /* If this BTREE is a \"chunked raw data node\" associated\n * with a dataset, then its keys are complex, consisting\n * of the chunk size in bytes, a filter mask, and a set of\n * offsets matching the dimensionality of the chunk layout.\n * The chunk size stores the actual stored length of the\n * data, so it may not equal the uncompressed chunk size.\n */\n var chunks = [];\n\n for (i = 0; i < bt.entries_used; i += 1) {\n bt.keys[i] = {};\n chunks[i] = {};\n chunks[i].chunk_size = this.getU32();\n chunks[i].filter_mask = this.getU32();\n chunks[i].chunk_offsets = [];\n for (j = 0; j < link.dims.length + 1; j += 1) {\n chunks[i].chunk_offsets.push(this.getU64());\n }\n bt.keys[i].child_address = this.getOffset();\n if (i < bt.entries_used) {\n if (this.getMetadata(\"debug\")) {\n console.log(\" BTREE \" + i +\n \" chunk_size \" + chunks[i].chunk_size +\n \" filter_mask \" + chunks[i].filter_mask +\n \" addr \" + bt.keys[i].child_address);\n }\n }\n }\n chunks[i] = {};\n chunks[i].chunk_size = this.getU32();\n chunks[i].filter_mask = this.getU32();\n chunks[i].chunk_offsets = [];\n for (j = 0; j < link.dims.length + 1; j += 1) {\n chunks[i].chunk_offsets.push(this.getU64());\n }\n\n /* If we're at a leaf node, we have data to deal with.\n * We might have to uncompress!\n */\n if (bt.node_level === 0) {\n var length;\n var offset;\n var sp;\n var dp;\n\n for (i = 0; i < bt.entries_used; i += 1) {\n length = chunks[i].chunk_size;\n offset = bt.keys[i].child_address;\n\n if (link.inflate) {\n sp = new Uint8Array(abuf, offset, length);\n dp = pako.inflate(sp);\n switch (link.type) {\n case this._type_enum.INT8:\n dp = new Int8Array(dp.buffer);\n break;\n case this._type_enum.UINT8:\n dp = new Uint8Array(dp.buffer);\n break;\n case this._type_enum.INT16:\n dp = new Int16Array(dp.buffer);\n break;\n case this._type_enum.UINT16:\n dp = new Uint16Array(dp.buffer);\n break;\n case this._type_enum.INT32:\n dp = new Int32Array(dp.buffer);\n break;\n case this._type_enum.UINT32:\n dp = new Uint32Array(dp.buffer);\n break;\n case this._type_enum.FLT:\n dp = new Float32Array(dp.buffer);\n break;\n case this._type_enum.DBL:\n dp = new Float64Array(dp.buffer);\n break;\n default:\n throw new Error('Unknown type code ' + link.type);\n }\n if (link.array.length - link.n_filled < dp.length) {\n dp = dp.subarray(0, link.array.length - link.n_filled);\n }\n link.array.set(dp, link.n_filled);\n link.n_filled += dp.length;\n if (this.getMetadata(\"debug\")) {\n console.log(link.name + \" \" + sp.length + \" \" + dp.length + \" \" + link.n_filled + \"/\" + link.array.length);\n }\n }\n else {\n /* no need to inflate data. */\n dp = this.getArray(link.type, length, offset);\n link.array.set(dp, link.n_filled);\n link.n_filled += dp.length;\n }\n }\n } else {\n for (i = 0; i < bt.entries_used; i += 1) {\n this.seek(bt.keys[i].child_address);\n this.hdf5V1BtreeNode(link);\n }\n }\n }\n return bt;\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5GroupSymbolTable(lh, link) {\n if (!this.checkSignature(\"SNOD\")) {\n throw new Error('Bad or missing SNOD signature');\n }\n var ver = this.getU8();\n this.skip(1);\n var n_sym = this.getU16();\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5GroupSymbolTable V\" + ver + \" #\" + n_sym +\n \" '\" + link.name + \"'\");\n }\n var i;\n var link_name_offset;\n var ohdr_address;\n var cache_type;\n var child;\n var spp;\n\n for (i = 0; i < 2 * this._superblk.gln_k; i += 1) {\n link_name_offset = this.getOffset();\n ohdr_address = this.getOffset();\n cache_type = this.getU32();\n this.skip(20);\n\n if (i < n_sym) {\n child = this.createLink();\n child.hdr_offset = ohdr_address;\n if (lh) {\n spp = this.tell();\n /* The link name is a zero-terminated string\n * starting at the link_name_off relative to\n * the beginning of the data segment of the local\n * heap.\n */\n this.seek(lh.lh_dseg_off + link_name_offset);\n child.name = this.getString(lh.lh_dseg_len);\n this.seek(spp);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\" \" + i + \" O \" + link_name_offset + \" A \" +\n ohdr_address + \" T \" + cache_type + \" '\" +\n child.name + \"'\");\n }\n link.children.push(child);\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a v1 local heap header. These define relatively small\n * regions used primarily for storing symbol names associated with\n * a symbol table message.\n */\n hdf5LocalHeap() {\n var lh = {};\n if (!this.checkSignature(\"HEAP\")) {\n throw new Error('Bad or missing HEAP signature');\n }\n lh.lh_ver = this.getU8();\n this.skip(3);\n lh.lh_dseg_len = this.getLength();\n lh.lh_flst_len = this.getLength();\n lh.lh_dseg_off = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n console.log(\"LHEAP V\" + lh.lh_ver + \" \" + lh.lh_dseg_len + \" \" +\n lh.lh_flst_len + \" \" + lh.lh_dseg_off);\n }\n return lh;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"dataspace\" message. Dataspaces define the\n * dimensionality of a dataset or attribute. They define the\n * number of dimensions (rank) and the current length of each\n * dimension. It is possible to specify a \"maximum\" length that is\n * greater than or equal to the current length, but MINC doesn't\n * rely on that feature so these values are ignored. Finally it\n * is also possible to specify a \"permutation index\" that alters\n * storage order of the dataset, but again, MINC doesn't rely on\n * this feature, so the values are ignored.\n */\n hdf5MsgDataspace(sz, link) {\n var cb;\n var ver = this.getU8();\n var n_dim = this.getU8();\n var flag = this.getU8();\n if (ver <= 1) {\n this.skip(5);\n } else {\n this.skip(1);\n }\n\n var n_items = 1;\n var dlen = [];\n var i;\n for (i = 0; i < n_dim; i += 1) {\n dlen[i] = this.getLength();\n n_items *= dlen[i];\n }\n\n cb = (n_dim * this._superblk.lensz) + ((ver <= 1) ? 8 : 4);\n\n var dmax = [];\n if ((flag & 1) !== 0) {\n cb += n_dim * this._superblk.lensz;\n for (i = 0; i < n_dim; i += 1) {\n dmax[i] = this.getLength();\n }\n }\n\n var dind = [];\n if ((flag & 2) !== 0) {\n cb += n_dim * this._superblk.lensz;\n for (i = 0; i < n_dim; i += 1) {\n dind[i] = this.getLength();\n }\n }\n var msg = \"this.hdf5MsgDataspace V\" + ver + \" N\" + n_dim + \" F\" + flag;\n if (this.getMetadata(\"debug\")) {\n if (n_dim !== 0) {\n msg += \"[\" + dlen.join(', ') + \"]\";\n }\n console.log(msg);\n }\n if (cb < sz) {\n this.skip(sz - cb);\n }\n if (link) {\n link.dims = dlen;\n }\n return n_items;\n }\n\n\n /**\n * [PRIVATE]\n *\n *\n * link info messages may contain a fractal heap address where we\n * can find additional link messages for this object. This\n * happens, for example, when there are lots of links in a\n * particular group.\n */\n hdf5MsgLinkInfo(link) {\n var that = this;\n\n var ver = this.getU8();\n var flags = this.getU8();\n if ((flags & 1) !== 0) {\n this.getU64(); // max. creation index (IGNORE).\n }\n var fh_address = this.getOffset(); // fractal heap address\n var bt_address = this.getOffset(); // v2 btree for name index\n if ((flags & 2) !== 0) {\n this.getOffset(); // creation order index (IGNORE).\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgLinkInfo V\" + ver + \" F\" + flags +\n \" FH \" + fh_address + \" BT \" + bt_address);\n }\n var spp = this.tell();\n if (fh_address < this._superblk.eof_addr) {\n this.seek(fh_address);\n /* If there is a valid fractal heap address in the link info message, that\n * means the fractal heap is a collection of link messages. We can ignore\n * the btree address because we can get the names from the link messages.\n */\n var fh = this.hdf5FractalHeapHeader();\n var n_msg = 0;\n this.hdf5FractalHeapEnumerate( fh, function(row, address, block_offset, block_length) {\n var end_address = address + block_length;\n while (n_msg < fh.heap_nobj && that.tell() < end_address) {\n that.hdf5MsgLink(link);\n n_msg += 1;\n }\n return true; // continue with enumeration.\n });\n }\n this.seek(spp);\n }\n\n\n /**\n * [PRIVATE]\n */\n dt_class_name(cls) {\n var names = [\n \"Fixed-Point\", \"Floating-Point\", \"Time\", \"String\",\n \"BitField\", \"Opaque\", \"Compound\", \"Reference\",\n \"Enumerated\", \"Variable-Length\", \"Array\"\n ];\n\n if (cls < names.length) {\n return names[cls];\n }\n throw new Error('Unknown datatype class: ' + cls);\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"datatype\" message. These messages specify the data\n * type of a single element within a dataset or attribute. Data\n * types are extremely flexible, HDF5 supports a range of options\n * for bit widths and organization atomic types. We support only\n * fixed, float, and string atomic types, and those only for\n * certain restricted (but common) cases. At this point we\n * provide no support for more exotic types such as bit field,\n * enumerated, array, opaque, compound, time, reference,\n * variable-length, etc.\n *\n * TODO: should support enumerated types, possibly a few others.\n */\n hdf5MsgDatatype(sz) {\n var type = {};\n var cb = 8;\n var msg = \"\";\n var bit_offs;\n var bit_prec;\n var exp_loc;\n var exp_sz;\n var mnt_loc;\n var mnt_sz;\n var exp_bias;\n\n var cv = this.getU8();\n var ver = cv >> 4;\n var cls = cv & 15;\n var bf = [];\n var i;\n for (i = 0; i < 3; i += 1) {\n bf[i] = this.getU8();\n }\n var dt_size = this.getU32();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgDatatype V\" + ver + \" C\" + cls +\n \" \" + this.dt_class_name(cls) +\n \" \" + bf[0] + \".\" + bf[1] + \".\" + bf[2] +\n \" \" + dt_size);\n }\n\n switch (cls) {\n case 0: /* Fixed (integer): bit 0 for byte order, bit 3 for signed */\n bit_offs = this.getU16();\n bit_prec = this.getU16();\n switch (dt_size) {\n case 4:\n type.typ_type = (bf[0] & 8) ? this._type_enum.INT32 : this._type_enum.UINT32;\n break;\n case 2:\n type.typ_type = (bf[0] & 8) ? this._type_enum.INT16 : this._type_enum.UINT16;\n break;\n case 1:\n type.typ_type = (bf[0] & 8) ? this._type_enum.INT8 : this._type_enum.UINT8;\n break;\n default:\n throw new Error('Unknown type size ' + dt_size);\n }\n type.typ_length = dt_size;\n cb += 4;\n if (this.getMetadata(\"debug\")) {\n console.log(' (' + bit_offs + ' ' + bit_prec + ')');\n }\n break;\n case 1: /* Float: uses bits 0,6 for byte order */\n msg = \"\";\n if (this.getMetadata(\"debug\")) {\n switch (bf[0] & 0x41) {\n case 0:\n msg += \"LE \";\n break;\n case 1:\n msg += \"BE \";\n break;\n case 0x41:\n msg += \"VX \";\n break;\n default:\n throw new Error('Reserved fp byte order: ' + bf[0]);\n }\n }\n bit_offs = this.getU16();\n bit_prec = this.getU16();\n exp_loc = this.getU8();\n exp_sz = this.getU8();\n mnt_loc = this.getU8();\n mnt_sz = this.getU8();\n exp_bias = this.getU32();\n if (this.getMetadata(\"debug\")) {\n msg += (bit_offs + \" \" + bit_prec + \" \" + exp_loc + \" \" + exp_sz +\n \" \" + mnt_loc + \" \" + mnt_sz + \" \" + exp_bias);\n }\n /* See if it's one of the formats we recognize.\n IEEE 64-bit or IEEE 32-bit are the only two we handle.\n */\n if (bit_prec === 64 && bit_offs === 0 &&\n exp_loc === 52 && exp_sz === 11 &&\n mnt_loc === 0 && mnt_sz === 52 &&\n exp_bias === 1023 && dt_size === 8) {\n type.typ_type = this._type_enum.DBL;\n } else if (bit_prec === 32 && bit_offs === 0 &&\n exp_loc === 23 && exp_sz === 8 &&\n mnt_loc === 0 && mnt_sz === 23 &&\n exp_bias === 127 && dt_size === 4) {\n type.typ_type = this._type_enum.FLT;\n } else {\n throw new Error(\"Unsupported floating-point type\");\n }\n if (this.getMetadata(\"debug\")) {\n console.log(msg);\n }\n type.typ_length = dt_size;\n cb += 12;\n break;\n\n case 3: // string\n /* bits 0-3 = 0: null terminate, 1: null pad, 2: space pad */\n /* bits 4-7 = 0: ASCII, 1: UTF-8 */\n type.typ_type = this._type_enum.STR;\n type.typ_length = dt_size;\n break;\n\n default:\n throw new Error('Unimplemented HDF5 data class ' + cls);\n }\n if (sz > cb) {\n this.skip(sz - cb);\n }\n return type;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"layout\" message. These messages specify the location and organization\n * of data in a dataset. The organization can be either compact, contiguous, or\n * chunked. Compact data is stored in the message as a contiguous block. Contiguous\n * data is stored elsewhere in the file in a single chunk. Chunked data is stored within\n * a V1 Btree as a series of possibly filtered (e.g. compressed) chunks.\n */\n hdf5MsgLayout(link) {\n var msg = \"\";\n\n var ver = this.getU8();\n var cls;\n var n_dim;\n var cdsz;\n var dim = [];\n var i;\n var dtadr;\n var dtsz;\n var elsz;\n\n var n_items = 1;\n if (ver === 1 || ver === 2) {\n n_dim = this.getU8();\n cls = this.getU8();\n this.skip(5);\n if (this.getMetadata(\"debug\")) {\n msg += \"this.hdf5MsgLayout V\" + ver + \" N\" + n_dim + \" C\" + cls;\n }\n if (cls === 1 || cls === 2) { // contiguous or chunked\n var addr = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n msg += \" A\" + addr;\n }\n link.data_offset = addr;\n }\n\n for (i = 0; i < n_dim; i += 1) {\n dim[i] = this.getU32();\n n_items *= dim[i];\n }\n\n if (this.getMetadata(\"debug\")) {\n msg += \"[\" + dim.join(', ') + \"]\";\n }\n\n if (cls === 2) { // chunked\n elsz = this.getU32();\n link.chunk_size = n_items * elsz;\n if (this.getMetadata(\"debug\")) {\n msg += \" E\" + elsz;\n }\n }\n if (cls === 0) { // compact\n cdsz = this.getU32();\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + cdsz + \")\";\n }\n link.data_offset = this.tell();\n link.data_length = cdsz;\n } else if (cls === 1) {\n link.data_length = n_items;\n }\n } else if (ver === 3) {\n cls = this.getU8();\n msg = \"this.hdf5MsgLayout V\" + ver + \" C\" + cls;\n\n if (cls === 0) {\n cdsz = this.getU16();\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + cdsz + \")\";\n }\n link.data_offset = this.tell();\n link.data_length = cdsz;\n } else if (cls === 1) {\n dtadr = this.getOffset();\n dtsz = this.getLength();\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + dtadr + \", \" + dtsz + \")\";\n }\n link.data_offset = dtadr;\n link.data_length = dtsz;\n } else if (cls === 2) {\n n_dim = this.getU8();\n dtadr = this.getOffset();\n link.data_offset = dtadr;\n link.chunk_size = 1;\n for (i = 0; i < n_dim - 1; i += 1) {\n dim[i] = this.getU32();\n n_items *= dim[i];\n }\n if (this.getMetadata(\"debug\")) {\n msg += \"(N\" + n_dim + \", A\" + dtadr + \" [\" + dim.join(',') + \"]\";\n }\n elsz = this.getU32();\n link.chunk_size = n_items * elsz;\n if (this.getMetadata(\"debug\")) {\n msg += \" E\" + elsz;\n }\n }\n } else {\n throw new Error(\"Illegal layout version \" + ver);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(msg);\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a \"filter pipeline\" message. At the moment we _only_ handle\n * deflate/inflate. Anything else will cause us to throw an exception.\n */\n hdf5MsgPipeline(link) {\n var ver = this.getU8();\n var nflt = this.getU8();\n\n var msg = \"this.hdf5MsgPipeline V\" + ver + \" N\" + nflt;\n if (ver === 1) {\n this.skip(6);\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(msg);\n }\n\n var i;\n var fiv;\n var nlen;\n var flags;\n var ncdv;\n for (i = 0; i < nflt; i += 1) {\n fiv = this.getU16();\n if (fiv !== 1) { /* deflate */\n throw new Error(\"Unimplemented HDF5 filter \" + fiv);\n }\n else {\n if (typeof pako !== 'object') {\n throw new Error('Need pako to inflate data.');\n }\n link.inflate = true;\n }\n if (ver === 1 || fiv > 256) {\n nlen = this.getU16();\n } else {\n nlen = 0;\n }\n\n flags = this.getU16();\n ncdv = this.getU16();\n if ((ncdv & 1) !== 0) {\n ncdv += 1;\n }\n if (nlen !== 0) {\n this.skip(nlen); // ignore name.\n }\n\n this.skip(ncdv * 4);\n\n if (this.getMetadata(\"debug\")) {\n console.log(\" \" + i + \" ID\" + fiv + \" F\" + flags + \" \" + ncdv);\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process an \"attribute\" message. This actually defines an attribute that is\n * to be associated with a group or dataset (what I generally call a \"link\"\n * in this code. Attributes include a name, a datatype, and a dataspace, followed\n * by the actual data.\n */\n hdf5MsgAttribute(sz, link) {\n var ver = this.getU8();\n var flags = this.getU8();\n var nm_len = this.getU16();\n var dt_len = this.getU16();\n var ds_len = this.getU16();\n var msg = \"this.hdf5MsgAttribute V\" + ver + \" F\" + flags + \" \" + sz + \": \";\n\n if ((flags & 3) !== 0) {\n throw new Error('Shared dataspaces and datatypes are not supported.');\n }\n\n if (ver === 3) {\n var cset = this.getU8();\n if (this.getMetadata(\"debug\")) {\n msg += (cset === 0) ? \"ASCII\" : \"UTF-8\";\n }\n }\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + nm_len + \" \" + dt_len + \" \" + ds_len + \")\";\n }\n if (ver < 3) {\n nm_len = Math.floor((nm_len + 7) / 8) * 8;\n dt_len = Math.floor((dt_len + 7) / 8) * 8;\n ds_len = Math.floor((ds_len + 7) / 8) * 8;\n\n if (this.getMetadata(\"debug\")) {\n msg += \"/(\" + nm_len + \" \" + dt_len + \" \" + ds_len + \")\";\n }\n }\n\n var att_name = this.getString(nm_len);\n if (this.getMetadata(\"debug\")) {\n msg += \" Name: \" + att_name;\n console.log(msg);\n }\n var val_type = this.hdf5MsgDatatype(dt_len);\n var n_items = this.hdf5MsgDataspace(ds_len);\n var val_len = 0;\n if (sz > 0) {\n if (ver < 3) {\n val_len = sz - (8 + nm_len + dt_len + ds_len);\n } else {\n val_len = sz - (9 + nm_len + dt_len + ds_len);\n }\n } else {\n val_len = val_type.typ_length * n_items;\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\" attribute data size \" + val_len + \" \" + this.tell());\n }\n var att_value;\n if (val_type.typ_type === this._type_enum.STR) {\n att_value = this.getString(val_len);\n } else {\n att_value = this.getArray(val_type.typ_type, val_len);\n }\n link.attributes[att_name] = att_value;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"group info\" message. We don't actually do anything with these.\n */\n hdf5MsgGroupInfo() {\n var n_ent = 4;\n var n_lnl = 8;\n var ver = this.getU8();\n var flags = this.getU8();\n if ((flags & 1) !== 0) {\n this.getU16(); // link phase change: max compact value (IGNORE)\n this.getU16(); // link phase cange: max dense value (IGNORE)\n }\n if ((flags & 2) !== 0) {\n n_ent = this.getU16();\n n_lnl = this.getU16();\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgGroupInfo V\" + ver + \" F\" + flags + \" ENT \" + n_ent + \" LNL \" + n_lnl);\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"link\" message. This specifies the name and header location of either a\n * group or a dataset within the current group. It is probably also used to implement\n * internal links but we don't really support that.\n */\n hdf5MsgLink(link) {\n var ver = this.getU8();\n var ltype = 0;\n if (ver !== 1) {\n throw new Error(\"Bad link message version \" + ver);\n }\n var flags = this.getU8();\n if ((flags & 8) !== 0) {\n ltype = this.getU8();\n }\n if ((flags & 4) !== 0) {\n this.getU64(); // creation order (IGNORE)\n }\n if ((flags & 16) !== 0) {\n this.getU8(); // link name character set (IGNORE)\n }\n var cb = 1 << (flags & 3);\n var lnsz = this.getUXX(cb);\n\n var child = this.createLink();\n\n child.name = this.getString(lnsz);\n\n if ((flags & 8) === 0) {\n child.hdr_offset = this.getOffset();\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgLink V\" + ver + \" F\" + flags + \" T\" + ltype +\n \" NM \" + child.name + \" OF \" + child.hdr_offset);\n }\n link.children.push(child);\n }\n\n\n /**\n * [PRIVATE]\n *\n * The fractal heap direct block contains:\n * 1. A signature.\n * 2. a byte version.\n * 3. an offset pointing to the header (for integrity checking).\n * 4. A variably-sized block offset that gives (_I think_) the mininum block offset\n * associated with this block.\n * 5. Variably-sized data. Block size varies with row number in a slightly tricky\n * fashion. Each \"row\" consists of \"table_width\" blocks. The first two rows, row 0 and 1,\n * have blocks of the \"starting block size\". Row 2-N have blocks of size 2^(row-1) times\n * the starting block size.\n */\n hdf5FractalHeapDirectBlock(fh, row, address, callback) {\n if (!this.checkSignature(\"FHDB\")) {\n throw new Error(\"Bad or missing FHDB signature\");\n }\n var ver = this.getU8();\n if (ver !== 0) {\n throw new Error('Bad FHDB version: ' + ver);\n }\n this.getOffset(); // heap header address (IGNORE)\n var cb = Math.ceil(fh.max_heapsz / 8.0);\n var block_offset = this.getUXX(cb); // block offset\n if ((fh.flags & 2) !== 0) {\n this.getU32(); // checksum (IGNORE)\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"FHDB V:\" + ver + \" R:\" + row + \" O:\" + block_offset + \" A:\" + address);\n }\n var header_length = 5 + this._superblk.offsz + cb;\n if ((fh.flags & 2) !== 0) {\n header_length += 4;\n }\n var block_length;\n if (row <= 1) {\n block_length = fh.start_blksz;\n }\n else {\n block_length = Math.pow(2, row - 1) * fh.start_blksz;\n }\n if (callback) {\n return callback(row, address, block_offset, block_length);\n }\n else {\n return true; // continue enumeration.\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * The fractal heap indirect block contains:\n * 1. A signature.\n * 2. a byte version\n * 3. an offset pointing to the header (for integrity checking).\n * 4. a variably-sized block offset that gives (_I think_) the mininum block offset\n * associated with children of this block.\n * 5. pointers to K direct blocks\n * 6. pointers to N indirect blocks\n * 7. A checksum. This code completely ignores checksums.\n * See calculations of K and N in this.hdf5FractalHeapHeader(). Note that there can also\n * be additional information in the header if \"filtered\" direct blocks are used. I have\n * made no attempt to support this.\n */\n hdf5FractalHeapIndirectBlock(fh, callback) {\n if (!this.checkSignature(\"FHIB\")) {\n throw new Error(\"Bad or missing FHIB signature\");\n }\n var ver = this.getU8();\n if (ver !== 0) {\n throw new Error('Bad FHIB version: ' + ver);\n }\n this.getOffset(); // heap header address (IGNORE)\n var cb = Math.ceil(fh.max_heapsz / 8.0);\n var block_offset = this.getUXX(cb); // block offset\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"FHIB V:\" + ver + \" O:\" + block_offset);\n }\n var i;\n var address;\n var db_addrs = [];\n for (i = 0; i < fh.K; i += 1) {\n address = this.getOffset();\n if (address < this._superblk.eof_addr) {\n if (this.getMetadata(\"debug\")) {\n console.log(\"direct block at \" + address);\n }\n db_addrs.push(address);\n }\n }\n\n var ib_addrs = [];\n for (i = 0; i < fh.N; i += 1) {\n address = this.getOffset();\n if (address < this._superblk.eof_addr) {\n if (this.getMetadata(\"debug\")) {\n console.log(\"indirect block at \" + address);\n }\n ib_addrs.push(address);\n }\n }\n this.getU32(); // checksum (IGNORE)\n\n /* Finished reading the indirect block, now go read its children.\n */\n for (i = 0; i < db_addrs.length; i++) {\n this.seek(db_addrs[i]);\n /* TODO: check row calculation!\n */\n if (!this.hdf5FractalHeapDirectBlock(fh, i / fh.table_width, db_addrs[i], callback)) {\n return false;\n }\n }\n for (i = 0; i < ib_addrs.length; i++) {\n this.seek(ib_addrs[i]);\n if (!this.hdf5FractalHeapIndirectBlock(fh, callback)) {\n return false;\n }\n }\n return true;\n }\n\n\n /**\n * [PRIVATE]\n *\n * enumerate over all of the direct blocks in the fractal heap.\n */\n hdf5FractalHeapEnumerate(fh, callback) {\n this.seek(fh.root_addr);\n if (fh.K === 0) {\n this.hdf5FractalHeapDirectBlock(fh, 0, fh.root_addr, callback);\n }\n else {\n this.hdf5FractalHeapIndirectBlock(fh, callback);\n }\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5FractalHeapOffset(fh, offset) {\n var location;\n this.hdf5FractalHeapEnumerate(fh, function(row, address, block_offset, block_length) {\n if (offset >= block_offset && offset < block_offset + block_length) {\n location = address + (offset - block_offset);\n return false; // stop enumeration.\n }\n return true; // continue enumeration.\n });\n return location;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Attribute info messages contain pointers to a fractal heap and a v2 btree.\n * If these pointers are valid, we must follow them to find more attributes.\n * The attributes are indexed by records in the \"type 8\" btree. These btree\n * records\n */\n hdf5MsgAttrInfo(link) {\n var ver = this.getU8();\n if (ver !== 0) {\n throw new Error('Bad attribute information message version: ' + ver);\n }\n\n var flags = this.getU8();\n\n if ((flags & 1) !== 0) {\n this.getU16(); // maximum creation index (IGNORE)\n }\n var fh_addr = this.getOffset();\n var bt_addr = this.getOffset();\n if ((flags & 2) !== 0) {\n this.getOffset(); // attribute creation order (IGNORE)\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgAttrInfo V\" + ver + \" F\" + flags + \" HP \" + fh_addr +\n \" AN \" + bt_addr);\n }\n\n var spp = this.tell();\n var fh; // fractal heap header.\n if (fh_addr < this._superblk.eof_addr) {\n this.seek(fh_addr);\n fh = this.hdf5FractalHeapHeader();\n }\n if (bt_addr < this._superblk.eof_addr) {\n this.seek(bt_addr);\n var bh = this.hdf5V2BtreeHeader();\n if (bh.type !== 8) {\n throw new Error(\"Can only handle indexed attributes.\");\n }\n this.seek(bh.root_addr);\n if (bh.depth > 0) {\n this.hdf5V2BtreeInternalNode(fh, bh.root_nrec, bh.depth, link);\n }\n else {\n this.hdf5V2BtreeLeafNode(fh, bh.root_nrec, link);\n }\n }\n this.seek(spp);\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a single message, given a message header. Assumes that\n * the data view offset is pointing to the remainder of the\n * message.\n *\n * V1 and V2 files use different sets of messages to accomplish\n * similar things. For example, V1 files tend to use \"symbol\n * table\" messages to describe links within a group, whereas V2\n * files use \"link\" and \"linkinfo\" messages.\n */\n hdf5ProcessMessage(msg, link) {\n var cq_new = {};\n var val_type;\n\n switch (msg.hm_type) {\n case 1:\n this.hdf5MsgDataspace(msg.hm_size, link);\n break;\n case 2:\n this.hdf5MsgLinkInfo(link);\n break;\n case 3:\n val_type = this.hdf5MsgDatatype(msg.hm_size);\n if (link) {\n link.type = val_type.typ_type;\n }\n break;\n case 6:\n this.hdf5MsgLink(link);\n break;\n case 8:\n this.hdf5MsgLayout(link);\n break;\n case 10:\n this.hdf5MsgGroupInfo();\n break;\n case 11:\n this.hdf5MsgPipeline(link);\n break;\n case 12:\n this.hdf5MsgAttribute(msg.hm_size, link);\n break;\n case 16:\n /* Process an object header continuation message. These\n * basically just say this header continues with a new segment\n * with a given location and length. They can come before the\n * end of the current message segment, and multiple\n * continuation messages can occur in any particular segment.\n * This means we have to enqueue them and shift them off the\n * queue when we finish processing the current segment.\n */\n cq_new.cq_off = this.getOffset();\n cq_new.cq_len = this.getLength();\n this._continuation_queue.push(cq_new);\n if (this.getMetadata(\"debug\")) {\n console.log(\"hdf5MsgObjHdrContinue \" + cq_new.cq_off + \" \" + cq_new.cq_len);\n }\n break;\n case 17: // SymbolTable\n link.sym_btree = this.getOffset();\n link.sym_lheap = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n console.log(\"hdf5MsgSymbolTable \" + link.sym_btree + \" \" + link.sym_lheap);\n }\n break;\n case 21:\n this.hdf5MsgAttrInfo(link);\n break;\n case 0:\n case 4:\n case 5:\n case 7:\n case 18:\n case 19:\n case 20:\n case 22:\n case 24:\n this.skip(msg.hm_size);\n break;\n default:\n throw new Error('Unknown message type: ' + msg.hm_type);\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a V2 object header. Object headers contain a series of messages that define\n * an HDF5 object, primarily a group or a dataset. V2 object headers, and V2 objects\n * generally, are much less concerned about alignment than V1 objects.\n */\n hdf5V2ObjectHeader(link) {\n if (!this.checkSignature(\"OHDR\")) {\n throw new Error('Bad or missing OHDR signature');\n }\n\n var ver = this.getU8();\n var flags = this.getU8();\n\n if ((flags & 0x20) !== 0) {\n this.getU32(); // access time (IGNORE)\n this.getU32(); // modify time (IGNORE)\n this.getU32(); // change time (IGNORE)\n this.getU32(); // birth time (IGNORE)\n }\n\n if ((flags & 0x10) !== 0) {\n this.getU16(); // maximum number of compact attributes (IGNORE)\n this.getU16(); // maximum number of dense attributes (IGNORE)\n }\n\n var cb = 1 << (flags & 3);\n var ck0_size = this.getUXX(cb);\n\n var msg_num = 0;\n var msg_offs = 0;\n var msg_bytes = ck0_size;\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5V2ObjectHeader V\" + ver + \" F\" + flags + \" HS\" + ck0_size);\n }\n\n var hmsg;\n var cq_head;\n var spp;\n\n while (true) {\n while (msg_bytes - msg_offs >= 8) {\n hmsg = {};\n hmsg.hm_type = this.getU8();\n hmsg.hm_size = this.getU16();\n hmsg.hm_flags = this.getU8();\n if (this.getMetadata(\"debug\")) {\n console.log(\" msg\" + msg_num + \" F\" + hmsg.hm_flags + \" T \" +\n hmsg.hm_type + \" S \" + hmsg.hm_size +\n \" (\" + msg_offs + \"/\" + msg_bytes + \") \" +\n this.hdf5GetMsgName(hmsg.hm_type));\n }\n if ((flags & 0x04) !== 0) {\n hmsg.hm_corder = this.getU16();\n }\n spp = this.tell();\n this.hdf5ProcessMessage(hmsg, link);\n this.seek(spp + hmsg.hm_size); // this.skip past message.\n\n msg_offs += hmsg.hm_size + 4;\n\n msg_num += 1;\n }\n\n if ((msg_bytes - msg_offs) > 4) {\n this.skip(msg_bytes - (msg_offs + 4));\n }\n\n this.getU32(); // checksum (IGNORE)\n\n if (this._continuation_queue.length !== 0) {\n cq_head = this._continuation_queue.shift();\n this.seek(cq_head.cq_off);\n msg_bytes = cq_head.cq_len - 4;\n msg_offs = 0;\n if (this.getMetadata(\"debug\")) {\n console.log('continuing with ' + cq_head.cq_len + ' bytes at ' + this.tell());\n }\n if (!this.checkSignature(\"OCHK\")) {\n throw new Error(\"Bad v2 object continuation\");\n }\n } else {\n break;\n }\n }\n\n link.children.forEach(function (child, link_num) {\n that.seek(child.hdr_offset);\n if (that.getMetadata(\"debug\")) {\n console.log(link_num + \" \" + child.hdr_offset + \" \" + child.name);\n }\n if (this.checkSignature(\"OHDR\")) {\n that.seek(child.hdr_offset);\n that.hdf5V2ObjectHeader(child);\n }\n else {\n that.seek(child.hdr_offset);\n that.hdf5V1ObjectHeader(child);\n }\n });\n }\n\n\n /**\n * [PRIVATE]\n */\n loadData(link) {\n var that = this;\n\n if (link.chunk_size !== 0) {\n this.seek(link.data_offset);\n\n var n_bytes = 1;\n var i;\n for (i = 0; i < link.dims.length; i += 1) {\n n_bytes *= link.dims[i];\n }\n n_bytes *= this.typeSize(link.type);\n if (this.getMetadata(\"debug\")) {\n console.log('allocating ' + n_bytes + ' bytes');\n }\n var ab = new ArrayBuffer(n_bytes);\n link.n_filled = 0;\n switch (link.type) {\n case this._type_enum.INT8:\n link.array = new Int8Array(ab);\n break;\n case this._type_enum.UINT8:\n link.array = new Uint8Array(ab);\n break;\n case this._type_enum.INT16:\n link.array = new Int16Array(ab);\n break;\n case this._type_enum.UINT16:\n link.array = new Uint16Array(ab);\n break;\n case this._type_enum.INT32:\n link.array = new Int32Array(ab);\n break;\n case this._type_enum.UINT32:\n link.array = new Uint32Array(ab);\n break;\n case this._type_enum.FLT:\n link.array = new Float32Array(ab);\n break;\n case this._type_enum.DBL:\n link.array = new Float64Array(ab);\n break;\n default:\n throw new Error('Illegal type: ' + link.type);\n }\n this.hdf5V1BtreeNode(link);\n } else {\n if (link.data_offset > 0 && link.data_offset < this._superblk.eof_addr) {\n if (this.getMetadata(\"debug\")) {\n console.log('loading ' + link.data_length + ' bytes from ' + link.data_offset + ' to ' + link.name);\n }\n link.array = this.getArray(link.type, link.data_length,\n link.data_offset);\n } else {\n if (this.getMetadata(\"debug\")) {\n console.log('data not present for /' + link.name + '/');\n }\n }\n }\n\n link.children.forEach(function (child) {\n that.loadData(child);\n });\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a v1 object header. Object headers contain a series of\n * messages that define an HDF5 object, primarily a group or a\n * dataset. The v1 object header, like most of the v1 format, is\n * very careful about alignment. Every message must be on an\n * 8-byte alignment RELATIVE TO THE START OF THE HEADER. So if the\n * header starts on an odd boundary, messages may start on odd\n * boundaries as well. No, this doesn't make much sense.\n */\n hdf5V1ObjectHeader(link) {\n var that = this;\n var oh = {};\n this.startAlignment();\n oh.oh_ver = this.getU8();\n this.skip(1); // reserved\n oh.oh_n_msgs = this.getU16();\n oh.oh_ref_cnt = this.getU32();\n oh.oh_hdr_sz = this.getU32();\n if (oh.oh_ver !== 1) {\n throw new Error(\"Bad v1 object header version: \" + oh.oh_ver);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5V1ObjectHeader V\" + oh.oh_ver +\n \" #M \" + oh.oh_n_msgs +\n \" RC \" + oh.oh_ref_cnt +\n \" HS \" + oh.oh_hdr_sz);\n }\n\n var msg_bytes = oh.oh_hdr_sz;\n var cq_head;\n var msg_num;\n var hmsg;\n var spp;\n\n for (msg_num = 0; msg_num < oh.oh_n_msgs; msg_num += 1) {\n if (msg_bytes <= 8) {\n if (this._continuation_queue.length !== 0) {\n cq_head = this._continuation_queue.shift();\n this.seek(cq_head.cq_off);\n msg_bytes = cq_head.cq_len;\n if (this.getMetadata(\"debug\")) {\n console.log('continuing with ' + msg_bytes + ' bytes at ' + this.tell());\n }\n this.startAlignment();\n } else {\n break;\n }\n }\n\n this.checkAlignment();\n\n hmsg = {};\n hmsg.hm_type = this.getU16();\n hmsg.hm_size = this.getU16();\n hmsg.hm_flags = this.getU8();\n\n if ((hmsg.hm_size % 8) !== 0) {\n throw new Error('Size is not 8-byte aligned: ' + hmsg.hm_size);\n }\n this.skip(3); // this.skip reserved\n msg_bytes -= (8 + hmsg.hm_size);\n if (this.getMetadata(\"debug\")) {\n console.log(\" msg\" + msg_num +\n \" F \" + hmsg.hm_flags +\n \" T \" + hmsg.hm_type +\n \" S \" + hmsg.hm_size +\n \"(\" + msg_bytes + \") \" + this.hdf5GetMsgName(hmsg.hm_type));\n }\n\n spp = this.tell();\n this.hdf5ProcessMessage(hmsg, link);\n this.seek(spp + hmsg.hm_size); // this.skip whole message.\n }\n\n if (link.sym_btree !== 0 && link.sym_lheap !== 0) {\n this.seek(link.sym_btree);\n var bt = this.hdf5V1BtreeNode();\n this.seek(link.sym_lheap);\n var lh = this.hdf5LocalHeap();\n var i;\n for (i = 0; i < bt.entries_used; i += 1) {\n this.seek(bt.keys[i].child_address);\n if (this.checkSignature(\"SNOD\")) {\n this.seek(bt.keys[i].child_address);\n this.hdf5GroupSymbolTable(lh, link);\n } else {\n this.seek(bt.keys[i].child_address);\n this.hdf5V1ObjectHeader(link);\n }\n }\n\n link.children.forEach(function (child) {\n that.seek(child.hdr_offset);\n that.hdf5V1ObjectHeader(child);\n });\n }\n }\n\n\n//------------------------------------------------------------------------------\n// FROM hdf5_tools.js\n\n getTypeMatchMinc(typeEnumVal){\n return this._type_matching[typeEnumVal - 1];\n }\n\n\n\n defined(x) {\n return typeof x !== 'undefined';\n }\n\n\n typeName(x) {\n if (! this.defined(x)) {\n return \"undefined\";\n }\n return x.constructor.name;\n }\n\n\n\n typeSize(typ) {\n if (typ >= this._type_enum.INT8 && typ < this.type_sizes.length) {\n return this.type_sizes[typ];\n }\n throw new Error('Unknown type ' + typ);\n }\n\n\n typeIsFloat(typ) {\n return (typ >= this._type_enum.FLT && typ <=this._type_enum.DBL);\n }\n\n\n /*\n * The remaining code after this point is not truly HDF5 specific -\n * it's mostly about converting the MINC file into the form\n * BrainBrowser is able to use. Therefore it is used for both HDF5\n * and NetCDF files.\n */\n\n /*\n * Join does not seem to be defined on the typed arrays in\n * javascript, so I've re-implemented it here, sadly.\n */\n join(array, string) {\n var result = \"\";\n if (array && array.length) {\n var i;\n for (i = 0; i < array.length - 1; i += 1) {\n result += array[i];\n result += string;\n }\n result += array[i];\n }\n return result;\n }\n\n /*\n * Recursively print out the structure and contents of the file.\n * Primarily useful for debugging.\n */\n printStructure(link, level) {\n var that = this;\n\n var i;\n var msg = \"\";\n for (i = 0; i < level * 2; i += 1) {\n msg += \" \";\n }\n msg += link.name + (link.children.length ? \"/\" : \"\");\n if (link.type > 0) {\n msg += ' ' + this.typeName(link.array);\n if (link.dims.length) {\n msg += '[' + link.dims.join(', ') + ']';\n }\n if (link.array) {\n msg += \":\" + link.array.length;\n } else {\n msg += \" NULL\";\n }\n }\n console.log(msg);\n\n Object.keys(link.attributes).forEach(function (name) {\n var value = link.attributes[name];\n\n msg = \"\";\n for (i = 0; i < level * 2 + 1; i += 1) {\n msg += \" \";\n }\n msg += link.name + ':' + name + \" \" +\n that.typeName(value) + \"[\" + value.length + \"] \";\n if (typeof value === \"string\") {\n msg += JSON.stringify(value);\n } else {\n msg += \"{\" + that.join(value.slice(0, 16), ', ');\n if (value.length > 16) {\n msg += \", ...\";\n }\n msg += \"}\";\n }\n console.log(msg);\n });\n\n link.children.forEach(function (child) {\n that.printStructure(child, level + 1);\n });\n }\n\n /* Find a dataset with a given name, by recursively searching through\n * the links. Groups will have 'type' fields of -1, since they contain\n * no data.\n * TODO (maybe): Use associative array for children?\n */\n findDataset(link, name, level) {\n var that = this;\n var result;\n if (link && link.name === name && link.type > 0) {\n result = link;\n } else {\n link.children.find( function( child ) {\n result = that.findDataset(child, name, level + 1);\n return that.defined(result);\n });\n }\n return result;\n }\n\n /* Find an attribute with a given name.\n */\n findAttribute(link, name, level) {\n var that = this;\n var result = link.attributes[name];\n if (result)\n return result;\n\n link.children.find( function (child ) {\n result = that.findAttribute( child, name, level + 1);\n return that.defined(result);\n });\n return result;\n }\n\n /**\n * @doc function\n * @name hdf5.this.scaleVoxels\n * @param {object} image The link object corresponding to the image data.\n * @param {object} image_min The link object corresponding to the image-min\n * data.\n * @param {object} image_max The link object corresponding to the image-max\n * data.\n * @param {object} valid_range An array of exactly two items corresponding\n * to the minimum and maximum valid _raw_ voxel values.\n * @param {boolean} debug True if we should print debugging information.\n * @returns A new ArrayBuffer containing the rescaled data.\n * @description\n * Convert the MINC data from voxel to real range. This returns a\n * new buffer that contains the \"real\" voxel values. It does less\n * work for floating-point volumes, since they don't need scaling.\n *\n * For debugging/testing purposes, also gathers basic voxel statistics,\n * for comparison against mincstats.\n */\n scaleVoxels(image, image_min, image_max, valid_range, debug) {\n /*\n var new_abuf = new ArrayBuffer(image.array.length *\n Float32Array.BYTES_PER_ELEMENT);\n var new_data = new Float32Array(new_abuf);\n\n */\n\n // 1D array to store the voxel data,\n // not initialized yet because it depends on the hdf5 type.\n var new_abuf = null;\n var new_data = null;\n\n // we could simply use image.type, but written types are easier to read...\n switch (this.getTypeMatchMinc(image.type)) {\n case 'int8':\n new_abuf = new ArrayBuffer(image.array.length * Int8Array.BYTES_PER_ELEMENT);\n new_data = new Int8Array(new_abuf);\n break;\n\n case 'int16':\n new_abuf = new ArrayBuffer(image.array.length * Int16Array.BYTES_PER_ELEMENT);\n new_data = new Int16Array(new_abuf);\n break;\n\n case 'int32':\n new_abuf = new ArrayBuffer(image.array.length * Int32Array.BYTES_PER_ELEMENT);\n new_data = new Int32Array(new_abuf);\n break;\n\n case 'float32':\n new_abuf = new ArrayBuffer(image.array.length * Float32Array.BYTES_PER_ELEMENT);\n new_data = new Float32Array(new_abuf);\n break;\n\n case 'float64':\n new_abuf = new ArrayBuffer(image.array.length * Float64Array.BYTES_PER_ELEMENT);\n new_data = new Float64Array(new_abuf);\n break;\n\n case 'uint8':\n new_abuf = new ArrayBuffer(image.array.length * Uint8Array.BYTES_PER_ELEMENT);\n new_data = new Uint8Array(new_abuf);\n break;\n\n case 'uint16':\n new_abuf = new ArrayBuffer(image.array.length * Uint16Array.BYTES_PER_ELEMENT);\n new_data = new Uint16Array(new_abuf);\n break;\n\n case 'uint32':\n new_abuf = new ArrayBuffer(image.array.length * Uint32Array.BYTES_PER_ELEMENT);\n new_data = new Uint32Array(new_abuf);\n break;\n\n default:\n var error_message = \"Unsupported data type: \" + header.datatype;\n console.log({ message: error_message } );\n //BrainBrowser.events.triggerEvent(\"error\", { message: error_message } );\n throw new Error(error_message);\n\n }\n\n\n var n_slice_dims = image.dims.length - image_min.dims.length;\n\n if (n_slice_dims < 1) {\n throw new Error(\"Too few slice dimensions: \" + image.dims.length +\n \" \" + image_min.dims.length);\n }\n var n_slice_elements = 1;\n var i;\n for (i = image_min.dims.length; i < image.dims.length; i += 1) {\n n_slice_elements *= image.dims[i];\n }\n if (debug) {\n console.log(n_slice_elements + \" voxels in slice.\");\n }\n var s = 0;\n var c = 0;\n var x = -Number.MAX_VALUE;\n var n = Number.MAX_VALUE;\n var im = image.array;\n var im_max = image_max.array;\n var im_min = image_min.array;\n if (debug) {\n console.log(\"valid range is \" + valid_range[0] + \" to \" + valid_range[1]);\n }\n\n var vrange;\n var rrange;\n var vmin = valid_range[0];\n var rmin;\n var j;\n var v;\n var is_float = this.typeIsFloat(image.type);\n for (i = 0; i < image_min.array.length; i += 1) {\n if (debug) {\n console.log(i + \" \" + im_min[i] + \" \" + im_max[i] + \" \" +\n im[i * n_slice_elements]);\n }\n if (is_float) {\n /* For floating-point volumes there is no scaling to be performed.\n * We do scan the data and make sure voxels are within the valid\n * range, and collect our statistics.\n */\n for (j = 0; j < n_slice_elements; j += 1) {\n v = im[c];\n if (v < valid_range[0] || v > valid_range[1]) {\n new_data[c] = 0.0;\n }\n else {\n new_data[c] = v;\n s += v;\n if (v > x) {\n x = v;\n }\n if (v < n) {\n n = v;\n }\n }\n c += 1;\n }\n }\n else {\n /* For integer volumes we have to scale each slice according to image-min,\n * image-max, and valid_range.\n */\n vrange = (valid_range[1] - valid_range[0]);\n rrange = (im_max[i] - im_min[i]);\n rmin = im_min[i];\n\n /*\n console.log(n_slice_elements);\n console.log(vrange);\n console.log(rrange);\n console.log(rmin);\n console.log(\"-----------------\");\n */\n\n\n for (j = 0; j < n_slice_elements; j += 1) {\n\n // v normalization to avoid \"flickering\".\n // v is scaled to the range [0, im_max[i]]\n // (possibly uint16 if the original per-slice min-max was not scaled up/down)\n v = (im[c] - vmin) / vrange * rrange + rmin;\n\n // we scale up/down to match the type of the target array\n v = v / im_max[i] * valid_range[1];\n\n\n new_data[c] = v;\n s += v;\n c += 1;\n if (v > x) {\n x = v;\n }\n if (v < n) {\n n = v;\n }\n\n }\n\n }\n }\n\n if (debug) {\n console.log(\"Min: \" + n);\n console.log(\"Max: \" + x);\n console.log(\"Sum: \" + s);\n console.log(\"Mean: \" + s / c);\n }\n\n return new_abuf;\n }\n\n /**\n * @doc function\n * @name hdf5.this.isRgbVolume\n * @param {object} header The header object representing the structure\n * of the MINC file.\n * @param {object} image The typed array object used to represent the\n * image data.\n * @returns {boolean} True if this is an RGB volume.\n * @description\n * A MINC volume is an RGB volume if all three are true:\n * 1. The voxel type is unsigned byte.\n * 2. It has a vector_dimension in the last (fastest-varying) position.\n * 3. The vector dimension has length 3.\n */\n isRgbVolume(header, image) {\n var order = header.order;\n return (image.array.constructor.name === 'Uint8Array' &&\n order.length > 0 &&\n order[order.length - 1] === \"vector_dimension\" &&\n header.vector_dimension.space_length === 3);\n }\n\n /**\n * @doc function\n * @name hdf5.this.rgbVoxels\n * @param {object} image The 'link' object created using createLink(),\n * that corresponds to the image within the HDF5 or NetCDF file.\n * @returns {object} A new ArrayBuffer that contains the original RGB\n * data augmented with alpha values.\n * @description\n * This function copies the RGB voxels to the destination buffer.\n * Essentially we just convert from 24 to 32 bits per voxel. This\n * is another MINC-specific function.\n */\n rgbVoxels(image) {\n var im = image.array;\n var n = im.length;\n var new_abuf = new ArrayBuffer(n / 3 * 4);\n var new_byte = new Uint8Array(new_abuf);\n var i, j = 0;\n for (i = 0; i < n; i += 3) {\n new_byte[j+0] = im[i+0];\n new_byte[j+1] = im[i+1];\n new_byte[j+2] = im[i+2];\n new_byte[j+3] = 255;\n j += 4;\n }\n return new_abuf;\n }\n\n\n //----------------------------------------------------------------------------\n // FROM minc_reader.js\n parseHeader(header_text) {\n var header;\n var error_message;\n\n try{\n header = JSON.parse(header_text);\n } catch(error) {\n error_message = \"server did not respond with valid JSON\" + \"\\n\" +\n \"Response was: \\n\" + header_text;\n\n console.log( { message: error_message });\n\n // BrainBrowser.events.triggerEvent(\"error\", { message: error_message });\n throw new Error(error_message);\n }\n\n if(header.order.length === 4) {\n header.order = header.order.slice(1);\n }\n\n header.datatype = header.datatype || \"uint8\";\n\n header.xspace.space_length = parseFloat(header.xspace.space_length);\n header.yspace.space_length = parseFloat(header.yspace.space_length);\n header.zspace.space_length = parseFloat(header.zspace.space_length);\n\n header.xspace.start = parseFloat(header.xspace.start);\n header.yspace.start = parseFloat(header.yspace.start);\n header.zspace.start = parseFloat(header.zspace.start);\n\n header.xspace.step = parseFloat(header.xspace.step);\n header.yspace.step = parseFloat(header.yspace.step);\n header.zspace.step = parseFloat(header.zspace.step);\n\n header.xspace.direction_cosines = header.xspace.direction_cosines || [1, 0, 0];\n header.yspace.direction_cosines = header.yspace.direction_cosines || [0, 1, 0];\n header.zspace.direction_cosines = header.zspace.direction_cosines || [0, 0, 1];\n\n header.xspace.direction_cosines = header.xspace.direction_cosines.map(parseFloat);\n header.yspace.direction_cosines = header.yspace.direction_cosines.map(parseFloat);\n header.zspace.direction_cosines = header.zspace.direction_cosines.map(parseFloat);\n\n /* Incrementation offsets for each dimension of the volume.\n * Note that this somewhat format-specific, so it does not\n * belong in the generic \"createVolume()\" code.\n */\n header[header.order[0]].offset = header[header.order[1]].space_length * header[header.order[2]].space_length;\n header[header.order[1]].offset = header[header.order[2]].space_length;\n header[header.order[2]].offset = 1;\n\n if(header.time) {\n header.time.space_length = parseFloat(header.time.space_length);\n header.time.start = parseFloat(header.time.start);\n header.time.step = parseFloat(header.time.step);\n header.time.offset = header.xspace.space_length * header.yspace.space_length * header.zspace.space_length;\n }\n\n return header;\n }\n\n\n/*\n createMincVolume(header, raw_data){\n var volume = createVolume(header, this.createMincData(header, raw_data));\n volume.type = \"minc\";\n\n volume.saveOriginAndTransform(header);\n volume.intensity_min = header.voxel_min;\n volume.intensity_max = header.voxel_max;\n\n return volume;\n\n }\n*/\n\n\n /*\n initialize the large 1D array of data depending on the type found.\n Rearange the original ArrayBuffer into a typed array.\n args:\n header: obj - header of the data\n raw_data: ArrayBuffer - sub object given by hdf5Loader\n */\n createMincData(header, raw_data){\n\n var native_data = null;\n\n switch (header.datatype) {\n case 'int8':\n native_data = new Int8Array(raw_data);\n break;\n case 'int16':\n native_data = new Int16Array(raw_data);\n break;\n case 'int32':\n native_data = new Int32Array(raw_data);\n break;\n case 'float32':\n native_data = new Float32Array(raw_data);\n break;\n case 'float64':\n native_data = new Float64Array(raw_data);\n break;\n case 'uint8':\n native_data = new Uint8Array(raw_data);\n break;\n case 'uint16':\n native_data = new Uint16Array(raw_data);\n break;\n case 'uint32':\n case 'rgb8':\n native_data = new Uint32Array(raw_data);\n break;\n default:\n var error_message = \"Unsupported data type: \" + header.datatype;\n console.log({ message: error_message } );\n //BrainBrowser.events.triggerEvent(\"error\", { message: error_message } );\n throw new Error(error_message);\n }\n\n return native_data;\n }\n\n\n\n\n //----------------------------------------------------------------------------\n\n _run(){\n var that = this;\n\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"Minc2Decoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n\n this._dv = new DataView(inputBuffer);\n\n\n /* Patch in the missing function to get 64-bit integers.\n * Note: this won't really quite work b/c Javascript doesn't\n * have support for 64-bit integers.\n */\n this._dv.getUint64 = function (off, little_endian) {\n var l4 = that._dv.getUint32(off + 0, little_endian);\n var u4 = that._dv.getUint32(off + 4, little_endian);\n if (little_endian) {\n return (u4 << 32) + l4;\n } else {\n return (l4 << 32) + u4;\n }\n };\n\n\n var root = this.createLink();\n\n try{\n this._superblk = this.hdf5Superblock();\n }catch(e){\n //console.error(e);\n console.warn(\"The input file is not a Minc2 file.\");\n return;\n }\n\n\n this.seek(this._superblk.root_addr);\n\n if (this._superblk.sbver <= 1) {\n this.hdf5V1ObjectHeader(root);\n } else {\n this.hdf5V2ObjectHeader(root);\n }\n\n this.loadData(root);\n\n\n\n\n\n if (this.getMetadata(\"debug\")) {\n this.printStructure(root, 0);\n }\n\n /* The rest of this code is MINC-specific, so like some of the\n * functions above, it can migrate into minc.js once things have\n * stabilized.\n *\n * This code is responsible for collecting up the various pieces\n * of important data and metadata, and reorganizing them into the\n * form the volume viewer can handle.\n */\n var image = this.findDataset(root, \"image\");\n if (!this.defined(image)) {\n throw new Error(\"Can't find image dataset.\");\n }\n\n var valid_range = this.findAttribute(image, \"valid_range\", 0);\n /* If no valid_range is found, we substitute our own. */\n if (!this.defined(valid_range)) {\n var min_val;\n var max_val;\n switch (image.type) {\n case this._type_enum.INT8:\n min_val = -(1 << 7);\n max_val = (1 << 7) - 1;\n break;\n case this._type_enum.UINT8:\n min_val = 0;\n max_val = (1 << 8) - 1;\n break;\n case this._type_enum.INT16:\n min_val = -(1 << 15);\n max_val = (1 << 15) - 1;\n break;\n case this._type_enum.UINT16:\n min_val = 0;\n max_val = (1 << 16) - 1;\n break;\n case this._type_enum.INT32:\n min_val = -(1 << 31);\n max_val = (1 << 31) - 1;\n break;\n case this._type_enum.UINT32:\n min_val = 0;\n max_val = (1 << 32) - 1;\n break;\n }\n valid_range = Float32Array.of(min_val, max_val);\n }\n\n\n var image_min = this.findDataset(root, \"image-min\");\n if (!this.defined(image_min)) {\n image_min = {\n array: Float32Array.of(0),\n dims: []\n };\n }\n\n var image_max = this.findDataset(root, \"image-max\");\n if (!this.defined(image_max)) {\n image_max = {\n array: Float32Array.of(1),\n dims: []\n };\n }\n\n\n /* Create the header expected by the existing brainbrowser code.\n */\n var header = {};\n var tmp = this.findAttribute(image, \"dimorder\", 0);\n if (typeof tmp !== 'string') {\n throw new Error(\"Can't find dimension order.\");\n }\n header.order = tmp.split(',');\n\n header.order.forEach(function(dimname) {\n var dim = that.findDataset(root, dimname);\n if (!that.defined(dim)) {\n throw new Error(\"Can't find dimension variable \" + dimname);\n }\n\n header[dimname] = {};\n\n tmp = that.findAttribute(dim, \"step\", 0);\n if (!that.defined(tmp)) {\n tmp = Float32Array.of(1);\n }\n header[dimname].step = tmp[0];\n\n tmp = that.findAttribute(dim, \"start\", 0);\n if (!that.defined(tmp)) {\n tmp = Float32Array.of(0);\n }\n header[dimname].start = tmp[0];\n\n tmp = that.findAttribute(dim, \"length\", 0);\n if (!that.defined(tmp)) {\n throw new Error(\"Can't find length for \" + dimname);\n }\n header[dimname].space_length = tmp[0];\n\n tmp = that.findAttribute(dim, \"direction_cosines\", 0);\n if (that.defined(tmp)) {\n // why is the bizarre call to slice needed?? it seems to work, though!\n header[dimname].direction_cosines = Array.prototype.slice.call(tmp);\n }\n else {\n if (dimname === \"xspace\") {\n header[dimname].direction_cosines = [1, 0, 0];\n } else if (dimname === \"yspace\") {\n header[dimname].direction_cosines = [0, 1, 0];\n } else if (dimname === \"zspace\") {\n header[dimname].direction_cosines = [0, 0, 1];\n }\n }\n });\n\n var new_abuf;\n\n if (this.isRgbVolume(header, image)) {\n header.order.pop();\n header.datatype = 'rgb8';\n new_abuf = this.rgbVoxels(image);\n }\n else {\n\n //header.datatype = 'float32';\n header.datatype = this.getTypeMatchMinc(image.type)\n\n new_abuf = this.scaleVoxels(image, image_min, image_max, valid_range, this.getMetadata(\"debug\"));\n }\n\n var minc_header = this.parseHeader( JSON.stringify(header) );\n var dataArray = this.createMincData(minc_header, new_abuf)\n\n // add the output to this filter\n this._addOutput(MniVolume);\n var mniVol = this.getOutput();\n mniVol.setData(dataArray, minc_header);\n mniVol.setMetadata(\"format\", \"minc2\");\n }\n\n\n\n} /* END of class Minc2Decoder */\n\nexport { Minc2Decoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* Robert D. Vincent\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport { Filter } from '../core/Filter.js';\nimport { MniVolume } from '../core/MniVolume.js';\n\n\n/**\n* Decodes a NIfTI file.\n* Takes an ArrayBuffer as input (0) and output a `MniVolume` (which inherit `Image3D`).\n*\n* **Usage**\n* - [examples/fileToNifti.html](../examples/fileToNifti.html)\n*/\nclass NiftiDecoder extends Filter {\n\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n this.setMetadata(\"debug\", false);\n }\n\n\n /**\n * [PRIVATE]\n */\n parseNifti1Header(raw_data) {\n var header = {\n order: [\"zspace\", \"yspace\", \"xspace\"],\n xspace: {},\n yspace: {},\n zspace: {}\n };\n var error_message = null;\n var dview = new DataView(raw_data, 0, 348);\n var bytes = new Uint8Array(raw_data, 0, 348);\n var littleEndian = true;\n\n var sizeof_hdr = dview.getUint32(0, true);\n if (sizeof_hdr === 0x0000015c) {\n littleEndian = true;\n } else if (sizeof_hdr === 0x5c010000) {\n littleEndian = false;\n } else {\n error_message = \"This does not look like a NIfTI-1 file.\";\n }\n\n var ndims = dview.getUint16(40, littleEndian);\n if (ndims < 3 || ndims > 4) {\n error_message = \"Cannot handle \" + ndims + \"-dimensional images yet.\";\n }\n\n var magic = String.fromCharCode.apply(null, bytes.subarray(344, 348));\n if (magic !== \"n+1\\0\") {\n error_message = \"Bad magic number: '\" + magic + \"'\";\n }\n\n if (error_message) {\n //throw new Error(error_message);\n console.warn(\"The input file is not a NIfTI file.\");\n return null;\n }\n\n header.xspace.space_length = dview.getUint16(42, littleEndian);\n header.yspace.space_length = dview.getUint16(44, littleEndian);\n header.zspace.space_length = dview.getUint16(46, littleEndian);\n var tlength = dview.getUint16(48, littleEndian);\n\n var datatype = dview.getUint16(70, littleEndian);\n var bitpix = dview.getUint16(72, littleEndian);\n\n var xstep = dview.getFloat32(80, littleEndian);\n var ystep = dview.getFloat32(84, littleEndian);\n var zstep = dview.getFloat32(88, littleEndian);\n var tstep = dview.getFloat32(92, littleEndian);\n\n var vox_offset = dview.getFloat32(108, littleEndian);\n if (vox_offset < 352) {\n vox_offset = 352;\n }\n\n var scl_slope = dview.getFloat32(112, littleEndian);\n var scl_inter = dview.getFloat32(116, littleEndian);\n\n var qform_code = dview.getUint16(252, littleEndian);\n var sform_code = dview.getUint16(254, littleEndian);\n\n var transform = [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ];\n\n if (tlength >= 1) {\n header.time = {};\n header.time.space_length = tlength;\n header.time.step = tstep;\n header.time.start = 0;\n header.time.name = \"time\";\n header.order = [\"time\", \"zspace\", \"yspace\", \"xspace\"];\n }\n\n /* Record the number of bytes per voxel, and note whether we need\n * to swap bytes in the voxel data.\n */\n header.bytes_per_voxel = bitpix / 8;\n header.must_swap_data = !littleEndian && header.bytes_per_voxel > 1;\n\n if (sform_code > 0) {\n /* The \"Sform\", if present, defines an affine transform which is\n * generally assumed to correspond to some standard coordinate\n * space (e.g. Talairach).\n */\n transform[0][0] = dview.getFloat32(280, littleEndian);\n transform[0][1] = dview.getFloat32(284, littleEndian);\n transform[0][2] = dview.getFloat32(288, littleEndian);\n transform[0][3] = dview.getFloat32(292, littleEndian);\n transform[1][0] = dview.getFloat32(296, littleEndian);\n transform[1][1] = dview.getFloat32(300, littleEndian);\n transform[1][2] = dview.getFloat32(304, littleEndian);\n transform[1][3] = dview.getFloat32(308, littleEndian);\n transform[2][0] = dview.getFloat32(312, littleEndian);\n transform[2][1] = dview.getFloat32(316, littleEndian);\n transform[2][2] = dview.getFloat32(320, littleEndian);\n transform[2][3] = dview.getFloat32(324, littleEndian);\n }\n else if (qform_code > 0) {\n /* The \"Qform\", if present, defines a quaternion which specifies\n * a less general transformation, often to scanner space.\n */\n var quatern_b = dview.getFloat32(256, littleEndian);\n var quatern_c = dview.getFloat32(260, littleEndian);\n var quatern_d = dview.getFloat32(264, littleEndian);\n var qoffset_x = dview.getFloat32(268, littleEndian);\n var qoffset_y = dview.getFloat32(272, littleEndian);\n var qoffset_z = dview.getFloat32(276, littleEndian);\n var qfac = (dview.getFloat32(76, littleEndian) < 0) ? -1.0 : 1.0;\n\n transform = this.niftiQuaternToMat44(quatern_b, quatern_c, quatern_d,\n qoffset_x, qoffset_y, qoffset_z,\n xstep, ystep, zstep, qfac);\n }\n else {\n transform[0][0] = xstep;\n transform[1][1] = ystep;\n transform[2][2] = zstep;\n }\n\n MniVolume.transformToMinc(transform, header);\n\n header.datatype = datatype;\n header.vox_offset = vox_offset;\n header.scl_slope = scl_slope;\n header.scl_inter = scl_inter;\n\n return header;\n }\n\n\n /**\n * [PRIVATE]\n * This function is a direct translation of the identical function\n * found in the standard NIfTI-1 library (nifti1_io.c).\n */\n niftiQuaternToMat44( qb, qc, qd,\n qx, qy, qz,\n dx, dy, dz, qfac )\n {\n var m = [ // 4x4 transform\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 1]\n ];\n var b = qb;\n var c = qc;\n var d = qd;\n var a, xd, yd, zd;\n\n // compute a parameter from b,c,d\n\n a = 1.0 - (b * b + c * c + d * d);\n if ( a < 1.e-7 ) { // special case\n a = 1.0 / Math.sqrt(b * b + c * c + d * d);\n b *= a; // normalize (b,c,d) vector\n c *= a;\n d *= a;\n a = 0.0; // a = 0 ==> 180 degree rotation\n } else {\n a = Math.sqrt(a); // angle = 2*arccos(a)\n }\n\n // load rotation matrix, including scaling factors for voxel sizes\n\n xd = (dx > 0.0) ? dx : 1.0; // make sure are positive\n yd = (dy > 0.0) ? dy : 1.0;\n zd = (dz > 0.0) ? dz : 1.0;\n\n if ( qfac < 0.0 ) // left handedness?\n zd = -zd;\n\n m[0][0] = (a * a + b * b - c * c - d * d) * xd;\n m[0][1] = 2.0 * (b * c - a * d ) * yd;\n m[0][2] = 2.0 * (b * d + a * c ) * zd;\n m[1][0] = 2.0 * (b * c + a * d ) * xd;\n m[1][1] = (a * a + c * c - b * b - d * d) * yd;\n m[1][2] = 2.0 * (c * d - a * b ) * zd;\n m[2][0] = 2.0 * (b * d - a * c ) * xd;\n m[2][1] = 2.0 * (c * d + a * b ) * yd;\n m[2][2] = (a * a + d * d - c * c - b * b) * zd;\n\n // load offsets\n m[0][3] = qx;\n m[1][3] = qy;\n m[2][3] = qz;\n\n return m;\n }\n\n\n /**\n * [PRIVATE]\n */\n createNifti1Data(header, raw_data) {\n var native_data = null;\n\n if (header.must_swap_data) {\n MniVolume.swapn(\n new Uint8Array(raw_data, header.vox_offset),\n header.bytes_per_voxel\n );\n }\n\n switch (header.datatype) {\n case 2: // DT_UNSIGNED_CHAR\n // no translation necessary; could optimize this out.\n native_data = new Uint8Array(raw_data, header.vox_offset);\n break;\n case 4: // DT_SIGNED_SHORT\n native_data = new Int16Array(raw_data, header.vox_offset);\n break;\n case 8: // DT_SIGNED_INT\n native_data = new Int32Array(raw_data, header.vox_offset);\n break;\n case 16: // DT_FLOAT\n native_data = new Float32Array(raw_data, header.vox_offset);\n break;\n case 64: // DT_DOUBLE\n native_data = new Float64Array(raw_data, header.vox_offset);\n break;\n // Values above 256 are NIfTI-specific, and rarely used.\n case 256: // DT_INT8\n native_data = new Int8Array(raw_data, header.vox_offset);\n break;\n case 512: // DT_UINT16\n native_data = new Uint16Array(raw_data, header.vox_offset);\n break;\n case 768: // DT_UINT32\n native_data = new Uint32Array(raw_data, header.vox_offset);\n break;\n default:\n // We don't yet support 64-bit, complex, RGB, and float 128 types.\n throw new Error(\"Unsupported data type: \" + header.datatype);\n }\n\n var d = 0; // Generic loop counter.\n var slope = header.scl_slope;\n var inter = header.scl_inter;\n\n // According to the NIfTI specification, a slope value of zero means\n // that the data should _not_ be scaled. Otherwise, every voxel is\n // transformed according to value = value * slope + inter\n //\n if (slope !== 0.0) {\n var float_data = new Float32Array(native_data.length);\n\n for (d = 0; d < native_data.length; d++) {\n float_data[d] = native_data[d] * slope + inter;\n }\n native_data = float_data; // Return the new float buffer.\n }\n\n if(header.order.length === 4) {\n header.order = header.order.slice(1);\n }\n\n // Incrementation offsets for each dimension of the volume.\n header[header.order[0]].offset = header[header.order[1]].space_length * header[header.order[2]].space_length;\n header[header.order[1]].offset = header[header.order[2]].space_length;\n header[header.order[2]].offset = 1;\n\n if(header.time) {\n header.time.offset = header.xspace.space_length * header.yspace.space_length * header.zspace.space_length;\n }\n\n return native_data;\n }\n\n\n //----------------------------------------------------------------------------\n\n _run(){\n var that = this;\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"NiftiDecoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n\n var header = this.parseNifti1Header( inputBuffer );\n\n // abort if header not valid\n if(!header)\n return;\n\n var dataArray = this.createNifti1Data(header, inputBuffer)\n\n // add the output to this filter\n this._addOutput(MniVolume);\n var mniVol = this.getOutput();\n mniVol.setData(dataArray, header);\n mniVol.setMetadata(\"format\", \"nifti\");\n\n }\n\n\n} /* END class NiftiDecoder */\n\nexport { NiftiDecoder }\n","/* FileSaver.js\n * A saveAs() FileSaver implementation.\n * 1.3.2\n * 2016-06-16 18:25:19\n *\n * By Eli Grey, http://eligrey.com\n * License: MIT\n * See https://github.com/eligrey/FileSaver.js/blob/master/LICENSE.md\n */\n\n/*global self */\n/*jslint bitwise: true, indent: 4, laxbreak: true, laxcomma: true, smarttabs: true, plusplus: true */\n\n/*! @source http://purl.eligrey.com/github/FileSaver.js/blob/master/FileSaver.js */\n\nvar saveAs = saveAs || (function(view) {\n\t\"use strict\";\n\t// IE <10 is explicitly unsupported\n\tif (typeof view === \"undefined\" || typeof navigator !== \"undefined\" && /MSIE [1-9]\\./.test(navigator.userAgent)) {\n\t\treturn;\n\t}\n\tvar\n\t\t doc = view.document\n\t\t // only get URL when necessary in case Blob.js hasn't overridden it yet\n\t\t, get_URL = function() {\n\t\t\treturn view.URL || view.webkitURL || view;\n\t\t}\n\t\t, save_link = doc.createElementNS(\"http://www.w3.org/1999/xhtml\", \"a\")\n\t\t, can_use_save_link = \"download\" in save_link\n\t\t, click = function(node) {\n\t\t\tvar event = new MouseEvent(\"click\");\n\t\t\tnode.dispatchEvent(event);\n\t\t}\n\t\t, is_safari = /constructor/i.test(view.HTMLElement) || view.safari\n\t\t, is_chrome_ios =/CriOS\\/[\\d]+/.test(navigator.userAgent)\n\t\t, throw_outside = function(ex) {\n\t\t\t(view.setImmediate || view.setTimeout)(function() {\n\t\t\t\tthrow ex;\n\t\t\t}, 0);\n\t\t}\n\t\t, force_saveable_type = \"application/octet-stream\"\n\t\t// the Blob API is fundamentally broken as there is no \"downloadfinished\" event to subscribe to\n\t\t, arbitrary_revoke_timeout = 1000 * 40 // in ms\n\t\t, revoke = function(file) {\n\t\t\tvar revoker = function() {\n\t\t\t\tif (typeof file === \"string\") { // file is an object URL\n\t\t\t\t\tget_URL().revokeObjectURL(file);\n\t\t\t\t} else { // file is a File\n\t\t\t\t\tfile.remove();\n\t\t\t\t}\n\t\t\t};\n\t\t\tsetTimeout(revoker, arbitrary_revoke_timeout);\n\t\t}\n\t\t, dispatch = function(filesaver, event_types, event) {\n\t\t\tevent_types = [].concat(event_types);\n\t\t\tvar i = event_types.length;\n\t\t\twhile (i--) {\n\t\t\t\tvar listener = filesaver[\"on\" + event_types[i]];\n\t\t\t\tif (typeof listener === \"function\") {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tlistener.call(filesaver, event || filesaver);\n\t\t\t\t\t} catch (ex) {\n\t\t\t\t\t\tthrow_outside(ex);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t, auto_bom = function(blob) {\n\t\t\t// prepend BOM for UTF-8 XML and text/* types (including HTML)\n\t\t\t// note: your browser will automatically convert UTF-16 U+FEFF to EF BB BF\n\t\t\tif (/^\\s*(?:text\\/\\S*|application\\/xml|\\S*\\/\\S*\\+xml)\\s*;.*charset\\s*=\\s*utf-8/i.test(blob.type)) {\n\t\t\t\treturn new Blob([String.fromCharCode(0xFEFF), blob], {type: blob.type});\n\t\t\t}\n\t\t\treturn blob;\n\t\t}\n\t\t, FileSaver = function(blob, name, no_auto_bom) {\n\t\t\tif (!no_auto_bom) {\n\t\t\t\tblob = auto_bom(blob);\n\t\t\t}\n\t\t\t// First try a.download, then web filesystem, then object URLs\n\t\t\tvar\n\t\t\t\t filesaver = this\n\t\t\t\t, type = blob.type\n\t\t\t\t, force = type === force_saveable_type\n\t\t\t\t, object_url\n\t\t\t\t, dispatch_all = function() {\n\t\t\t\t\tdispatch(filesaver, \"writestart progress write writeend\".split(\" \"));\n\t\t\t\t}\n\t\t\t\t// on any filesys errors revert to saving with object URLs\n\t\t\t\t, fs_error = function() {\n\t\t\t\t\tif ((is_chrome_ios || (force && is_safari)) && view.FileReader) {\n\t\t\t\t\t\t// Safari doesn't allow downloading of blob urls\n\t\t\t\t\t\tvar reader = new FileReader();\n\t\t\t\t\t\treader.onloadend = function() {\n\t\t\t\t\t\t\tvar url = is_chrome_ios ? reader.result : reader.result.replace(/^data:[^;]*;/, 'data:attachment/file;');\n\t\t\t\t\t\t\tvar popup = view.open(url, '_blank');\n\t\t\t\t\t\t\tif(!popup) view.location.href = url;\n\t\t\t\t\t\t\turl=undefined; // release reference before dispatching\n\t\t\t\t\t\t\tfilesaver.readyState = filesaver.DONE;\n\t\t\t\t\t\t\tdispatch_all();\n\t\t\t\t\t\t};\n\t\t\t\t\t\treader.readAsDataURL(blob);\n\t\t\t\t\t\tfilesaver.readyState = filesaver.INIT;\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\t// don't create more object URLs than needed\n\t\t\t\t\tif (!object_url) {\n\t\t\t\t\t\tobject_url = get_URL().createObjectURL(blob);\n\t\t\t\t\t}\n\t\t\t\t\tif (force) {\n\t\t\t\t\t\tview.location.href = object_url;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar opened = view.open(object_url, \"_blank\");\n\t\t\t\t\t\tif (!opened) {\n\t\t\t\t\t\t\t// Apple does not allow window.open, see https://developer.apple.com/library/safari/documentation/Tools/Conceptual/SafariExtensionGuide/WorkingwithWindowsandTabs/WorkingwithWindowsandTabs.html\n\t\t\t\t\t\t\tview.location.href = object_url;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfilesaver.readyState = filesaver.DONE;\n\t\t\t\t\tdispatch_all();\n\t\t\t\t\trevoke(object_url);\n\t\t\t\t}\n\t\t\t;\n\t\t\tfilesaver.readyState = filesaver.INIT;\n\n\t\t\tif (can_use_save_link) {\n\t\t\t\tobject_url = get_URL().createObjectURL(blob);\n\t\t\t\tsetTimeout(function() {\n\t\t\t\t\tsave_link.href = object_url;\n\t\t\t\t\tsave_link.download = name;\n\t\t\t\t\tclick(save_link);\n\t\t\t\t\tdispatch_all();\n\t\t\t\t\trevoke(object_url);\n\t\t\t\t\tfilesaver.readyState = filesaver.DONE;\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tfs_error();\n\t\t}\n\t\t, FS_proto = FileSaver.prototype\n\t\t, saveAs = function(blob, name, no_auto_bom) {\n\t\t\treturn new FileSaver(blob, name || blob.name || \"download\", no_auto_bom);\n\t\t}\n\t;\n\t// IE 10+ (native saveAs)\n\tif (typeof navigator !== \"undefined\" && navigator.msSaveOrOpenBlob) {\n\t\treturn function(blob, name, no_auto_bom) {\n\t\t\tname = name || blob.name || \"download\";\n\n\t\t\tif (!no_auto_bom) {\n\t\t\t\tblob = auto_bom(blob);\n\t\t\t}\n\t\t\treturn navigator.msSaveOrOpenBlob(blob, name);\n\t\t};\n\t}\n\n\tFS_proto.abort = function(){};\n\tFS_proto.readyState = FS_proto.INIT = 0;\n\tFS_proto.WRITING = 1;\n\tFS_proto.DONE = 2;\n\n\tFS_proto.error =\n\tFS_proto.onwritestart =\n\tFS_proto.onprogress =\n\tFS_proto.onwrite =\n\tFS_proto.onabort =\n\tFS_proto.onerror =\n\tFS_proto.onwriteend =\n\t\tnull;\n\n\treturn saveAs;\n}(\n\t typeof self !== \"undefined\" && self\n\t|| typeof window !== \"undefined\" && window\n\t|| this.content\n));\n// `self` is undefined in Firefox for Android content script context\n// while `this` is nsIContentFrameMessageManager\n// with an attribute `content` that corresponds to the window\n\nif (typeof module !== \"undefined\" && module.exports) {\n module.exports.saveAs = saveAs;\n} else if ((typeof define !== \"undefined\" && define !== null) && (define.amd !== null)) {\n define(\"FileSaver.js\", function() {\n return saveAs;\n });\n}\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport FileSaver from 'file-saver';\n//import JSZip from \"jszip\";\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* A PixpEncoder instance takes an Image2D or Image3D as input with `addInput(...)`\n* and encode it so that it can be saved as a *.pixp file.\n* An output filename can be specified using `.setMetadata(\"filename\", \"yourName.pixp\");`,\n* by default, the name is \"untitled.pixp\".\n* When `update()` is called, a gzip blog is prepared as output[0] and can then be downloaded\n* when calling the method `.download()`. The gzip blob could also be sent over AJAX\n* using a third party library.\n*\n* **Usage**\n* - [examples/savePixpFile.html](../examples/savePixpFile.html)\n*/\nclass PixpEncoder extends Filter {\n constructor(){\n super();\n this.setMetadata(\"filename\", \"untitled.pixp\");\n\n }\n\n\n /**\n * [PRIVATE]\n * overwrite the original from Filter\n * Only accept Image2D and Image3D\n */\n hasValidInput(){\n var input = this._getInput();\n return input && ( input.isOfType(Image2D.TYPE()) || input.isOfType(Image3D.TYPE()) );\n }\n\n\n _run(){\n\n if(! this.hasValidInput() ){\n console.warn(\"PixpEncoder can only encode Image2D and Image3D.\");\n return;\n }\n\n var input = this._getInput();\n\n var arrayAndMeta = {\n dataType: input.getData().constructor.name, // typed array type\n data: Array.prototype.slice.call( input.getData() ), // data of pixel/voxel\n metadata: input.getMetadataCopy(), // Image2D/Image3D._metadata\n pixpipeType: input.constructor.name // most likely \"Image2D\", \"Image3D\", \"MniVolume\", \"LineString\", etc.\n }\n\n var pixpString = JSON.stringify( arrayAndMeta );\n\n var deflator = new pako.Deflate({\n level: 6,\n //to: 'string',\n gzip: true,\n header: {\n text: true,\n time: + new Date(),\n comment: \"This file was created by Pixpipe.js\"\n }\n });\n\n deflator.push(pixpString, true);\n\n // making a blob to be saved\n this._output[0] = new Blob([deflator.result], {type: \"application/gzip\"} );\n }\n\n\n /**\n * Download the generated file\n */\n download(){\n var output = this.getOutput();\n\n if(output){\n FileSaver.saveAs( this.getOutput(), this.getMetadata(\"filename\"));\n }else{\n console.warn(\"No output computed yet.\");\n }\n }\n\n} /* END of class PixpEncoder */\n\nexport { PixpEncoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport FileSaver from 'file-saver';\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* A PixpDecoder instance decodes a *.pixp file and output an Image2D or Image3D.\n* The input, specified by `.addInput(...)` must be an ArrayBuffer\n* (from an `UrlToArrayBufferFilter`, an `UrlToArrayBufferReader` or anothrer source ).\n*\n* **Usage**\n* - [examples/pixpFileToImage2D.html](../examples/pixpFileToImage2D.html)\n*/\nclass PixpDecoder extends Filter {\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n }\n\n\n _run(){\n\n if(! this.hasValidInput() ){\n console.warn(\"PixpDecoder can only decode ArrayBuffer.\");\n return;\n }\n\n var input = this._getInput();\n\n //var pixpString2 = pako.inflate(input /*, { to: 'string' }*/);\n //var pixpObject = JSON.parse( pixpString2 );\n\n var inflator = new pako.Inflate({\n level: 6,\n to: 'string'\n });\n\n inflator.push( input, true );\n\n // quit if not a gz file\n if( inflator.err ){\n console.warn(\"This file is not a Pixp file.\");\n return;\n }\n \n var pixpObject = null;\n\n try{\n pixpObject = JSON.parse( inflator.result );\n }catch(e){\n console.warn(\"Could not parse pixp file.\");\n console.error(e);\n return;\n }\n\n if( ! (pixpObject.pixpipeType in pixpipe)){\n console.warn(\"Unknown type pixpipe.\" + pixpObject.pixpipeType + \", cannot create any output.\" );\n return;\n }\n\n var constructorHost = null;\n \n try{\n constructorHost = window;\n }catch( e ){\n try{\n constructorHost = GLOBAL;\n }catch( e ){\n console.warn( \"You are not in a Javascript environment?? Weird.\" );\n return;\n }\n }\n \n if(! constructorHost[ pixpObject.dataType ]){\n console.warn( \"Data array from pixp file is unknown: \" + pixpObject.dataType );\n return;\n }\n\n var outputRawData = new constructorHost[ pixpObject.dataType ]( pixpObject.data );\n var output = new pixpipe[ pixpObject.pixpipeType ];\n output.setRawData( outputRawData );\n output.setRawMetadata( pixpObject.metadata );\n\n this._output[0] = output;\n\n }\n\n\n\n} /* END of class PixpDecoder */\n\nexport { PixpDecoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* Robert D. Vincent\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport { Filter } from '../core/Filter.js';\nimport { MniVolume } from '../core/MniVolume.js';\n\n/**\n* Decodes a MGH file.\n* Takes an ArrayBuffer as input (0) and output a `MniVolume` (which inherit `Image3D`).\n*\n* **Usage**\n* - [examples/fileToMgh.html](../examples/fileToMgh.html)\n*/\nclass MghDecoder extends Filter {\n \n constructor() {\n super();\n this.addInputValidator(0, ArrayBuffer);\n this.setMetadata(\"debug\", false);\n }\n \n \n /* Function to parse the basic MGH header. This is a 284-byte binary\n * object that begins at offset zero in the file.\n * The resulting header object will contain the following fields:\n *\n * header.order[] - An array of strings that gives the order of the\n * spatial dimensions.\n * header.xspace - Description of the X axis (patient left to right)\n * header.yspace - Description of the Y axis (patient posterior to anterior)\n * header.zspace - Description of the Z axis (patient inferior to superior)\n * header.time - Description of time axis, if any.\n\n * Non-standard fields used internally only:\n *\n * header.nvoxels - Total number of voxels in the image.\n * header.datatype - MGH data type of image.\n * header.little_endian - True if data is little endian (should be false!)\n */\n _parseMGHHeader(raw_data, callback) {\n var header = {\n order: [\"xspace\", \"yspace\", \"zspace\"],\n xspace: {},\n yspace: {},\n zspace: {}\n };\n var error_message;\n var dview = new DataView(raw_data, 0, 284);\n var little_endian = true;\n\n /* Read the header version, which should always have the value\n * 0x00000001. We use this to test the endian-ness of the data,\n * but it should always be big-endian.\n */\n var hdr_version = dview.getUint32(0, true);\n if (hdr_version === 0x00000001) {\n little_endian = true;\n } else if (hdr_version === 0x01000000) {\n little_endian = false; // Generally files are big-endian.\n }\n else {\n console.warn( \"This does not look like an MGH file.\" );\n return null;\n }\n\n /* Now read the dimension lengths. There are at most 4 dimensions\n * in the file. The lengths fields are always present, but they\n * unused dimensions may have the value 0 or 1.\n */\n var ndims = 0;\n var sizes = [0, 0, 0, 0];\n var header_offset = 4;\n var nvoxels = 1;\n for (ndims = 0; ndims < 4; ndims++) {\n sizes[ndims] = dview.getUint32(header_offset, little_endian);\n if (sizes[ndims] <= 1) {\n break;\n }\n nvoxels *= sizes[ndims];\n header_offset += 4;\n }\n\n if (ndims < 3 || ndims > 4) {\n console.warn( \"Cannot handle \" + ndims + \"-dimensional images yet.\" );\n return null;\n }\n\n var datatype = dview.getUint32(20, little_endian);\n // IGNORED var dof = dview.getUint32(24, little_endian);\n var good_transform_flag = dview.getUint16(28, little_endian);\n var spacing = [1.0, 1.0, 1.0];\n var i, j;\n var dircos = [\n [-1.0, 0.0, 0.0],\n [ 0.0, 0.0, -1.0],\n [ 0.0, 1.0, 0.0],\n [ 0.0, 0.0, 0.0]\n ];\n if (good_transform_flag) {\n header_offset = 30;\n for (i = 0; i < 3; i++) {\n spacing[i] = dview.getFloat32(header_offset, little_endian);\n header_offset += 4;\n }\n for (i = 0; i < 4; i++) {\n for (j = 0; j < 3; j++) {\n dircos[i][j] = dview.getFloat32(header_offset, little_endian);\n header_offset += 4;\n }\n }\n }\n\n if ( this._metadata.debug ) {\n // Prints out the transform in a format similar to the output\n // of FreeSurfer's mri_info tool.\n //\n for (i = 0; i < 3; i++) {\n var s1 = \"\";\n for (j = 0; j < 4; j++) {\n s1 += \"xyzc\"[j] + \"_\" + \"ras\"[i] + \" \" + dircos[j][i] + \" \";\n }\n console.log(s1);\n }\n }\n\n var axis_index_from_file = [0, 1, 2];\n\n for ( var axis = 0; axis < 3; axis++) {\n var spatial_axis = 0;\n var c_x = Math.abs(dircos[axis][0]);\n var c_y = Math.abs(dircos[axis][1]);\n var c_z = Math.abs(dircos[axis][2]);\n\n header.order[axis] = \"xspace\";\n if (c_y > c_x && c_y > c_z) {\n spatial_axis = 1;\n header.order[axis] = \"yspace\";\n }\n if (c_z > c_x && c_z > c_y) {\n spatial_axis = 2;\n header.order[axis] = \"zspace\";\n }\n axis_index_from_file[axis] = spatial_axis;\n }\n\n /* If there are four dimensions, assume the last is the time\n * dimension. I use default values for step and start because as\n * far as I know MGH files do not carry any descriptive\n * information about the 4th dimension.\n */\n if (ndims === 4) {\n if (this._metadata.debug) {\n console.log(\"Creating time dimension: \" + sizes[3]);\n }\n header.time = {\n space_length: sizes[3],\n step: 1,\n start: 0,\n name: \"time\"\n };\n header.order.push(\"time\");\n }\n\n /** This is here because there are two different ways of interpreting\n * the origin of an MGH file. One can ignore the offsets in the\n * transform, using the centre of the voxel grid. Or you can correct\n * these naive grid centres using the values stored in the transform.\n * The first approach is what is used by surface files, so to get them\n * to register nicely, we want ignore_offsets to be true. However,\n * getting volumetric files to register correctly implies setting\n * ignore_offsets to false.\n */\n var ignore_offsets = false;\n var mgh_xform = [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ];\n for (i = 0; i < 3; i++) {\n for (j = 0; j < 3; j++) {\n mgh_xform[i][j] = dircos[j][i] * spacing[i];\n }\n }\n\n for (i = 0; i < 3; i++) {\n var temp = 0.0;\n for (j = 0; j < 3; j++) {\n temp += mgh_xform[i][j] * (sizes[j] / 2.0);\n }\n\n if (ignore_offsets) {\n mgh_xform[i][4 - 1] = -temp;\n }\n else {\n mgh_xform[i][4 - 1] = dircos[4 - 1][i] - temp;\n }\n }\n\n var transform = [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ];\n\n for (i = 0; i < 3; i++) {\n for (j = 0; j < 4; j++) {\n var volume_axis = j;\n if (j < 3) {\n volume_axis = axis_index_from_file[j];\n }\n transform[i][volume_axis] = mgh_xform[i][j];\n }\n }\n\n // Now that we have the transform, need to convert it to MINC-like\n // steps and direction_cosines.\n\n MniVolume.transformToMinc(transform, header);\n\n // Save the datatype so that we can refer to it later.\n header.datatype = datatype;\n header.little_endian = little_endian;\n header.nvoxels = nvoxels;\n\n // Save the voxel dimension lengths.\n for (i = 0; i < 3; i++) {\n header[header.order[i]].space_length = sizes[i];\n }\n\n return header;\n }\n \n \n _createMGHData(header, raw_data) {\n \n var native_data = null;\n var bytes_per_voxel = 1;\n\n switch (header.datatype) {\n case 0: // Unsigned characters.\n bytes_per_voxel = 1;\n break;\n case 1: // 4-byte signed integers.\n case 3: // 4-byte float.\n bytes_per_voxel = 4;\n break;\n case 4: // 2-byte signed integers.\n bytes_per_voxel = 2;\n break;\n default:\n console.warn( \"Unsupported data type: \" + header.datatype );\n return null;\n }\n\n var nbytes = header.nvoxels * bytes_per_voxel;\n\n if (bytes_per_voxel > 1 && !header.little_endian) {\n MniVolume.swapn( new Uint8Array(raw_data, 284, nbytes), bytes_per_voxel );\n }\n\n switch (header.datatype) {\n case 0: // unsigned char\n native_data = new Uint8Array(raw_data, 284, header.nvoxels);\n break;\n case 1: // signed int\n native_data = new Int32Array(raw_data, 284, header.nvoxels);\n break;\n case 3:\n native_data = new Float32Array(raw_data, 284, header.nvoxels);\n break;\n case 4: // signed short\n native_data = new Int16Array(raw_data, 284, header.nvoxels);\n break;\n }\n\n // Incrementation offsets for each dimension of the volume. MGH\n // files store the fastest-varying dimension _first_, so the\n // \"first\" dimension actually has the smallest offset. That is\n // why this calculation is different from that for NIfTI-1.\n //\n var offset = 1;\n for (var d = 0; d < header.order.length; d++) {\n header[header.order[d]].offset = offset;\n offset *= header[header.order[d]].space_length;\n }\n return native_data;\n\n }\n\n \n _run(){\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"MghDecoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n\n var header = this._parseMGHHeader( inputBuffer );\n\n // abort if header not valid\n if(!header)\n return;\n\n\n var dataArray = this._createMGHData(header, inputBuffer)\n \n if(!dataArray)\n return null;\n\n // add the output to this filter\n this._addOutput(MniVolume);\n var mniVol = this.getOutput();\n mniVol.setData(dataArray, header);\n mniVol.setMetadata(\"format\", \"mgh\");\n \n }\n \n} /* END of class MghDecoder */\n\nexport { MghDecoder };\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport FileSaver from 'file-saver';\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* A PixBinDecoder instance decodes a *.pixp file and output an Image2D or Image3D.\n* The input, specified by `.addInput(...)` must be an ArrayBuffer\n* (from an `UrlToArrayBufferFilter`, an `UrlToArrayBufferReader` or anothrer source ).\n*\n* **Usage**\n* - [examples/pixpFileToImage2D.html](../examples/pixpFileToImage2D.html)\n*/\nclass PixBinDecoder extends Filter {\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n }\n\n\n _run(){\n\n if(! this.hasValidInput() ){\n console.warn(\"PixBinDecoder can only decode ArrayBuffer.\");\n return;\n }\n\n var input = this._getInput();\n var inputByteLength = input.byteLength;\n\n // the view to decode the buffer\n var view = new DataView( input );\n var offsetFromHere = 0;\n \n // fetch the extendedMetadata string length\n var extendedMetadataStringLength = view.getUint32( offsetFromHere );\n offsetFromHere += 4;\n \n // getting extendedMetadata\n var extendedMetadataBytes = new Uint8Array(input, offsetFromHere, extendedMetadataStringLength);\n var extendedMetadata = JSON.parse( String.fromCharCode( ...extendedMetadataBytes ) );\n offsetFromHere += extendedMetadataStringLength;\n \n // getting the data\n var constructorHost = null;\n \n try{\n constructorHost = window; // in a web browser\n }catch( e ){\n try{\n constructorHost = GLOBAL; // in node\n }catch( e ){\n console.warn( \"You are not in a Javascript environment?? Weird.\" );\n return;\n }\n }\n \n if(! constructorHost[ extendedMetadata.dataType ]){\n console.warn( \"Data array from pixb file is unknown: \" + extendedMetadata.dataType );\n return;\n }\n \n /*\n There is a known issues in JS that a TypedArray cannot be created starting at a non-multiple-of-2 start offset \n if the type of data within this array is supposed to take more than one byte (ie. Uint16, Float32, etc.).\n The error is stated like that (in Chrome):\n \"Uncaught RangeError: start offset of Uint16Array should be a multiple of 2\"\n When it comes to Float32, Chrome wants an offset that is multiple of 4, and so on.\n \n The workaround is to slice the buffer to take only the data part of it (basically to remove what is before)\n so that this new array starts with an offset 0, no matter what was before.\n */\n \n var data = new constructorHost[ extendedMetadata.dataType ]( input.slice( offsetFromHere ) )\n \n var output = new pixpipe[ extendedMetadata.pixpipeType ];\n output.setRawData( data );\n output.setRawMetadata( extendedMetadata.metadata );\n\n this._output[0] = output;\n }\n\n\n} /* END of class PixBinDecoder */\n\nexport { PixBinDecoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image3D } from '../core/Image3D.js';\n\n// decoders\nimport { Minc2Decoder } from './Minc2Decoder.js';\nimport { NiftiDecoder } from './NiftiDecoder.js';\nimport { MghDecoder } from './MghDecoder.js';\nimport { PixpDecoder } from './PixpDecoder.js';\nimport { PixBinDecoder } from './PixBinDecoder.js';\n\n\n/**\n* An instance of Image3DGenericDecoder takes a ArrayBuffer \n* as input 0 (`.addInput(myArrayBuffer)`) and output an Image3D.\n* The `update` method will perform several decoding attempts, using the readers\n* specified in the constructor.\n* In case of success (one of the registered decoder was compatible to the data)\n* the metadata `decoderConstructor` and `decoderName` are made accessible and give\n* information about the file format. If no decoder managed to decode the input buffer,\n* this filter will not have any output.\n*\n* Developers: if a new 3D dataset decoder is added, reference it here.\n*/\nclass Image3DGenericDecoder extends Filter {\n \n constructor(){\n super();\n \n this._decoders = [\n Minc2Decoder,\n NiftiDecoder,\n MghDecoder,\n PixpDecoder,\n PixBinDecoder\n ];\n }\n \n \n _run(){\n var inputBuffer = this._getInput(0);\n \n if(!inputBuffer){\n console.warn(\"The input buffer must not be null.\");\n return;\n }\n \n // try with each decoder\n for(var d=0; d\r\n\r\nfunction XMLReader(){\r\n\t\r\n}\r\n\r\nXMLReader.prototype = {\r\n\tparse:function(source,defaultNSMap,entityMap){\r\n\t\tvar domBuilder = this.domBuilder;\r\n\t\tdomBuilder.startDocument();\r\n\t\t_copy(defaultNSMap ,defaultNSMap = {})\r\n\t\tparse(source,defaultNSMap,entityMap,\r\n\t\t\t\tdomBuilder,this.errorHandler);\r\n\t\tdomBuilder.endDocument();\r\n\t}\r\n}\r\nfunction parse(source,defaultNSMapCopy,entityMap,domBuilder,errorHandler){\r\n\tfunction fixedFromCharCode(code) {\r\n\t\t// String.prototype.fromCharCode does not supports\r\n\t\t// > 2 bytes unicode chars directly\r\n\t\tif (code > 0xffff) {\r\n\t\t\tcode -= 0x10000;\r\n\t\t\tvar surrogate1 = 0xd800 + (code >> 10)\r\n\t\t\t\t, surrogate2 = 0xdc00 + (code & 0x3ff);\r\n\r\n\t\t\treturn String.fromCharCode(surrogate1, surrogate2);\r\n\t\t} else {\r\n\t\t\treturn String.fromCharCode(code);\r\n\t\t}\r\n\t}\r\n\tfunction entityReplacer(a){\r\n\t\tvar k = a.slice(1,-1);\r\n\t\tif(k in entityMap){\r\n\t\t\treturn entityMap[k]; \r\n\t\t}else if(k.charAt(0) === '#'){\r\n\t\t\treturn fixedFromCharCode(parseInt(k.substr(1).replace('x','0x')))\r\n\t\t}else{\r\n\t\t\terrorHandler.error('entity not found:'+a);\r\n\t\t\treturn a;\r\n\t\t}\r\n\t}\r\n\tfunction appendText(end){//has some bugs\r\n\t\tif(end>start){\r\n\t\t\tvar xt = source.substring(start,end).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\tlocator&&position(start);\r\n\t\t\tdomBuilder.characters(xt,0,end-start);\r\n\t\t\tstart = end\r\n\t\t}\r\n\t}\r\n\tfunction position(p,m){\r\n\t\twhile(p>=lineEnd && (m = linePattern.exec(source))){\r\n\t\t\tlineStart = m.index;\r\n\t\t\tlineEnd = lineStart + m[0].length;\r\n\t\t\tlocator.lineNumber++;\r\n\t\t\t//console.log('line++:',locator,startPos,endPos)\r\n\t\t}\r\n\t\tlocator.columnNumber = p-lineStart+1;\r\n\t}\r\n\tvar lineStart = 0;\r\n\tvar lineEnd = 0;\r\n\tvar linePattern = /.*(?:\\r\\n?|\\n)|.*$/g\r\n\tvar locator = domBuilder.locator;\r\n\t\r\n\tvar parseStack = [{currentNSMap:defaultNSMapCopy}]\r\n\tvar closeMap = {};\r\n\tvar start = 0;\r\n\twhile(true){\r\n\t\ttry{\r\n\t\t\tvar tagStart = source.indexOf('<',start);\r\n\t\t\tif(tagStart<0){\r\n\t\t\t\tif(!source.substr(start).match(/^\\s*$/)){\r\n\t\t\t\t\tvar doc = domBuilder.doc;\r\n\t \t\t\tvar text = doc.createTextNode(source.substr(start));\r\n\t \t\t\tdoc.appendChild(text);\r\n\t \t\t\tdomBuilder.currentElement = text;\r\n\t\t\t\t}\r\n\t\t\t\treturn;\r\n\t\t\t}\r\n\t\t\tif(tagStart>start){\r\n\t\t\t\tappendText(tagStart);\r\n\t\t\t}\r\n\t\t\tswitch(source.charAt(tagStart+1)){\r\n\t\t\tcase '/':\r\n\t\t\t\tvar end = source.indexOf('>',tagStart+3);\r\n\t\t\t\tvar tagName = source.substring(tagStart+2,end);\r\n\t\t\t\tvar config = parseStack.pop();\r\n\t\t\t\tif(end<0){\r\n\t\t\t\t\t\r\n\t \t\ttagName = source.substring(tagStart+2).replace(/[\\s<].*/,'');\r\n\t \t\t//console.error('#@@@@@@'+tagName)\r\n\t \t\terrorHandler.error(\"end tag name: \"+tagName+' is not complete:'+config.tagName);\r\n\t \t\tend = tagStart+1+tagName.length;\r\n\t \t}else if(tagName.match(/\\s\r\n\t\t\t\tlocator&&position(tagStart);\r\n\t\t\t\tend = parseInstruction(source,tagStart,domBuilder);\r\n\t\t\t\tbreak;\r\n\t\t\tcase '!':// start){\r\n\t\t\tstart = end;\r\n\t\t}else{\r\n\t\t\t//TODO: 这里有可能sax回退,有位置错误风险\r\n\t\t\tappendText(Math.max(tagStart,start)+1);\r\n\t\t}\r\n\t}\r\n}\r\nfunction copyLocator(f,t){\r\n\tt.lineNumber = f.lineNumber;\r\n\tt.columnNumber = f.columnNumber;\r\n\treturn t;\r\n}\r\n\r\n/**\r\n * @see #appendElement(source,elStartEnd,el,selfClosed,entityReplacer,domBuilder,parseStack);\r\n * @return end of the elementStartPart(end of elementEndPart for selfClosed el)\r\n */\r\nfunction parseElementStartPart(source,start,el,currentNSMap,entityReplacer,errorHandler){\r\n\tvar attrName;\r\n\tvar value;\r\n\tvar p = ++start;\r\n\tvar s = S_TAG;//status\r\n\twhile(true){\r\n\t\tvar c = source.charAt(p);\r\n\t\tswitch(c){\r\n\t\tcase '=':\r\n\t\t\tif(s === S_ATTR){//attrName\r\n\t\t\t\tattrName = source.slice(start,p);\r\n\t\t\t\ts = S_EQ;\r\n\t\t\t}else if(s === S_ATTR_SPACE){\r\n\t\t\t\ts = S_EQ;\r\n\t\t\t}else{\r\n\t\t\t\t//fatalError: equal must after attrName or space after attrName\r\n\t\t\t\tthrow new Error('attribute equal must after attrName');\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\tcase '\\'':\r\n\t\tcase '\"':\r\n\t\t\tif(s === S_EQ || s === S_ATTR //|| s == S_ATTR_SPACE\r\n\t\t\t\t){//equal\r\n\t\t\t\tif(s === S_ATTR){\r\n\t\t\t\t\terrorHandler.warning('attribute value must after \"=\"')\r\n\t\t\t\t\tattrName = source.slice(start,p)\r\n\t\t\t\t}\r\n\t\t\t\tstart = p+1;\r\n\t\t\t\tp = source.indexOf(c,start)\r\n\t\t\t\tif(p>0){\r\n\t\t\t\t\tvalue = source.slice(start,p).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\t\tel.add(attrName,value,start-1);\r\n\t\t\t\t\ts = S_ATTR_END;\r\n\t\t\t\t}else{\r\n\t\t\t\t\t//fatalError: no end quot match\r\n\t\t\t\t\tthrow new Error('attribute value no end \\''+c+'\\' match');\r\n\t\t\t\t}\r\n\t\t\t}else if(s == S_ATTR_NOQUOT_VALUE){\r\n\t\t\t\tvalue = source.slice(start,p).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\t//console.log(attrName,value,start,p)\r\n\t\t\t\tel.add(attrName,value,start);\r\n\t\t\t\t//console.dir(el)\r\n\t\t\t\terrorHandler.warning('attribute \"'+attrName+'\" missed start quot('+c+')!!');\r\n\t\t\t\tstart = p+1;\r\n\t\t\t\ts = S_ATTR_END\r\n\t\t\t}else{\r\n\t\t\t\t//fatalError: no equal before\r\n\t\t\t\tthrow new Error('attribute value must after \"=\"');\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\tcase '/':\r\n\t\t\tswitch(s){\r\n\t\t\tcase S_TAG:\r\n\t\t\t\tel.setTagName(source.slice(start,p));\r\n\t\t\tcase S_ATTR_END:\r\n\t\t\tcase S_TAG_SPACE:\r\n\t\t\tcase S_TAG_CLOSE:\r\n\t\t\t\ts =S_TAG_CLOSE;\r\n\t\t\t\tel.closed = true;\r\n\t\t\tcase S_ATTR_NOQUOT_VALUE:\r\n\t\t\tcase S_ATTR:\r\n\t\t\tcase S_ATTR_SPACE:\r\n\t\t\t\tbreak;\r\n\t\t\t//case S_EQ:\r\n\t\t\tdefault:\r\n\t\t\t\tthrow new Error(\"attribute invalid close char('/')\")\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\tcase ''://end document\r\n\t\t\t//throw new Error('unexpected end of input')\r\n\t\t\terrorHandler.error('unexpected end of input');\r\n\t\t\tif(s == S_TAG){\r\n\t\t\t\tel.setTagName(source.slice(start,p));\r\n\t\t\t}\r\n\t\t\treturn p;\r\n\t\tcase '>':\r\n\t\t\tswitch(s){\r\n\t\t\tcase S_TAG:\r\n\t\t\t\tel.setTagName(source.slice(start,p));\r\n\t\t\tcase S_ATTR_END:\r\n\t\t\tcase S_TAG_SPACE:\r\n\t\t\tcase S_TAG_CLOSE:\r\n\t\t\t\tbreak;//normal\r\n\t\t\tcase S_ATTR_NOQUOT_VALUE://Compatible state\r\n\t\t\tcase S_ATTR:\r\n\t\t\t\tvalue = source.slice(start,p);\r\n\t\t\t\tif(value.slice(-1) === '/'){\r\n\t\t\t\t\tel.closed = true;\r\n\t\t\t\t\tvalue = value.slice(0,-1)\r\n\t\t\t\t}\r\n\t\t\tcase S_ATTR_SPACE:\r\n\t\t\t\tif(s === S_ATTR_SPACE){\r\n\t\t\t\t\tvalue = attrName;\r\n\t\t\t\t}\r\n\t\t\t\tif(s == S_ATTR_NOQUOT_VALUE){\r\n\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed quot(\")!!');\r\n\t\t\t\t\tel.add(attrName,value.replace(/&#?\\w+;/g,entityReplacer),start)\r\n\t\t\t\t}else{\r\n\t\t\t\t\tif(currentNSMap[''] !== 'http://www.w3.org/1999/xhtml' || !value.match(/^(?:disabled|checked|selected)$/i)){\r\n\t\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed value!! \"'+value+'\" instead!!')\r\n\t\t\t\t\t}\r\n\t\t\t\t\tel.add(value,value,start)\r\n\t\t\t\t}\r\n\t\t\t\tbreak;\r\n\t\t\tcase S_EQ:\r\n\t\t\t\tthrow new Error('attribute value missed!!');\r\n\t\t\t}\r\n//\t\t\tconsole.log(tagName,tagNamePattern,tagNamePattern.test(tagName))\r\n\t\t\treturn p;\r\n\t\t/*xml space '\\x20' | #x9 | #xD | #xA; */\r\n\t\tcase '\\u0080':\r\n\t\t\tc = ' ';\r\n\t\tdefault:\r\n\t\t\tif(c<= ' '){//space\r\n\t\t\t\tswitch(s){\r\n\t\t\t\tcase S_TAG:\r\n\t\t\t\t\tel.setTagName(source.slice(start,p));//tagName\r\n\t\t\t\t\ts = S_TAG_SPACE;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_ATTR:\r\n\t\t\t\t\tattrName = source.slice(start,p)\r\n\t\t\t\t\ts = S_ATTR_SPACE;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_ATTR_NOQUOT_VALUE:\r\n\t\t\t\t\tvar value = source.slice(start,p).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed quot(\")!!');\r\n\t\t\t\t\tel.add(attrName,value,start)\r\n\t\t\t\tcase S_ATTR_END:\r\n\t\t\t\t\ts = S_TAG_SPACE;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t//case S_TAG_SPACE:\r\n\t\t\t\t//case S_EQ:\r\n\t\t\t\t//case S_ATTR_SPACE:\r\n\t\t\t\t//\tvoid();break;\r\n\t\t\t\t//case S_TAG_CLOSE:\r\n\t\t\t\t\t//ignore warning\r\n\t\t\t\t}\r\n\t\t\t}else{//not space\r\n//S_TAG,\tS_ATTR,\tS_EQ,\tS_ATTR_NOQUOT_VALUE\r\n//S_ATTR_SPACE,\tS_ATTR_END,\tS_TAG_SPACE, S_TAG_CLOSE\r\n\t\t\t\tswitch(s){\r\n\t\t\t\t//case S_TAG:void();break;\r\n\t\t\t\t//case S_ATTR:void();break;\r\n\t\t\t\t//case S_ATTR_NOQUOT_VALUE:void();break;\r\n\t\t\t\tcase S_ATTR_SPACE:\r\n\t\t\t\t\tvar tagName = el.tagName;\r\n\t\t\t\t\tif(currentNSMap[''] !== 'http://www.w3.org/1999/xhtml' || !attrName.match(/^(?:disabled|checked|selected)$/i)){\r\n\t\t\t\t\t\terrorHandler.warning('attribute \"'+attrName+'\" missed value!! \"'+attrName+'\" instead2!!')\r\n\t\t\t\t\t}\r\n\t\t\t\t\tel.add(attrName,attrName,start);\r\n\t\t\t\t\tstart = p;\r\n\t\t\t\t\ts = S_ATTR;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_ATTR_END:\r\n\t\t\t\t\terrorHandler.warning('attribute space is required\"'+attrName+'\"!!')\r\n\t\t\t\tcase S_TAG_SPACE:\r\n\t\t\t\t\ts = S_ATTR;\r\n\t\t\t\t\tstart = p;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_EQ:\r\n\t\t\t\t\ts = S_ATTR_NOQUOT_VALUE;\r\n\t\t\t\t\tstart = p;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_TAG_CLOSE:\r\n\t\t\t\t\tthrow new Error(\"elements closed character '/' and '>' must be connected to\");\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}//end outer switch\r\n\t\t//console.log('p++',p)\r\n\t\tp++;\r\n\t}\r\n}\r\n/**\r\n * @return true if has new namespace define\r\n */\r\nfunction appendElement(el,domBuilder,currentNSMap){\r\n\tvar tagName = el.tagName;\r\n\tvar localNSMap = null;\r\n\t//var currentNSMap = parseStack[parseStack.length-1].currentNSMap;\r\n\tvar i = el.length;\r\n\twhile(i--){\r\n\t\tvar a = el[i];\r\n\t\tvar qName = a.qName;\r\n\t\tvar value = a.value;\r\n\t\tvar nsp = qName.indexOf(':');\r\n\t\tif(nsp>0){\r\n\t\t\tvar prefix = a.prefix = qName.slice(0,nsp);\r\n\t\t\tvar localName = qName.slice(nsp+1);\r\n\t\t\tvar nsPrefix = prefix === 'xmlns' && localName\r\n\t\t}else{\r\n\t\t\tlocalName = qName;\r\n\t\t\tprefix = null\r\n\t\t\tnsPrefix = qName === 'xmlns' && ''\r\n\t\t}\r\n\t\t//can not set prefix,because prefix !== ''\r\n\t\ta.localName = localName ;\r\n\t\t//prefix == null for no ns prefix attribute \r\n\t\tif(nsPrefix !== false){//hack!!\r\n\t\t\tif(localNSMap == null){\r\n\t\t\t\tlocalNSMap = {}\r\n\t\t\t\t//console.log(currentNSMap,0)\r\n\t\t\t\t_copy(currentNSMap,currentNSMap={})\r\n\t\t\t\t//console.log(currentNSMap,1)\r\n\t\t\t}\r\n\t\t\tcurrentNSMap[nsPrefix] = localNSMap[nsPrefix] = value;\r\n\t\t\ta.uri = 'http://www.w3.org/2000/xmlns/'\r\n\t\t\tdomBuilder.startPrefixMapping(nsPrefix, value) \r\n\t\t}\r\n\t}\r\n\tvar i = el.length;\r\n\twhile(i--){\r\n\t\ta = el[i];\r\n\t\tvar prefix = a.prefix;\r\n\t\tif(prefix){//no prefix attribute has no namespace\r\n\t\t\tif(prefix === 'xml'){\r\n\t\t\t\ta.uri = 'http://www.w3.org/XML/1998/namespace';\r\n\t\t\t}if(prefix !== 'xmlns'){\r\n\t\t\t\ta.uri = currentNSMap[prefix || '']\r\n\t\t\t\t\r\n\t\t\t\t//{console.log('###'+a.qName,domBuilder.locator.systemId+'',currentNSMap,a.uri)}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tvar nsp = tagName.indexOf(':');\r\n\tif(nsp>0){\r\n\t\tprefix = el.prefix = tagName.slice(0,nsp);\r\n\t\tlocalName = el.localName = tagName.slice(nsp+1);\r\n\t}else{\r\n\t\tprefix = null;//important!!\r\n\t\tlocalName = el.localName = tagName;\r\n\t}\r\n\t//no prefix element has default namespace\r\n\tvar ns = el.uri = currentNSMap[prefix || ''];\r\n\tdomBuilder.startElement(ns,localName,tagName,el);\r\n\t//endPrefixMapping and startPrefixMapping have not any help for dom builder\r\n\t//localNSMap = null\r\n\tif(el.closed){\r\n\t\tdomBuilder.endElement(ns,localName,tagName);\r\n\t\tif(localNSMap){\r\n\t\t\tfor(prefix in localNSMap){\r\n\t\t\t\tdomBuilder.endPrefixMapping(prefix) \r\n\t\t\t}\r\n\t\t}\r\n\t}else{\r\n\t\tel.currentNSMap = currentNSMap;\r\n\t\tel.localNSMap = localNSMap;\r\n\t\t//parseStack.push(el);\r\n\t\treturn true;\r\n\t}\r\n}\r\nfunction parseHtmlSpecialContent(source,elStartEnd,tagName,entityReplacer,domBuilder){\r\n\tif(/^(?:script|textarea)$/i.test(tagName)){\r\n\t\tvar elEndStart = source.indexOf('',elStartEnd);\r\n\t\tvar text = source.substring(elStartEnd+1,elEndStart);\r\n\t\tif(/[&<]/.test(text)){\r\n\t\t\tif(/^script$/i.test(tagName)){\r\n\t\t\t\t//if(!/\\]\\]>/.test(text)){\r\n\t\t\t\t\t//lexHandler.startCDATA();\r\n\t\t\t\t\tdomBuilder.characters(text,0,text.length);\r\n\t\t\t\t\t//lexHandler.endCDATA();\r\n\t\t\t\t\treturn elEndStart;\r\n\t\t\t\t//}\r\n\t\t\t}//}else{//text area\r\n\t\t\t\ttext = text.replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\tdomBuilder.characters(text,0,text.length);\r\n\t\t\t\treturn elEndStart;\r\n\t\t\t//}\r\n\t\t\t\r\n\t\t}\r\n\t}\r\n\treturn elStartEnd+1;\r\n}\r\nfunction fixSelfClosed(source,elStartEnd,tagName,closeMap){\r\n\t//if(tagName in closeMap){\r\n\tvar pos = closeMap[tagName];\r\n\tif(pos == null){\r\n\t\t//console.log(tagName)\r\n\t\tpos = source.lastIndexOf('')\r\n\t\tif(pos',start+4);\r\n\t\t\t//append comment source.substring(4,end)//\");\n\tcase DOCUMENT_TYPE_NODE:\n\t\tvar pubid = node.publicId;\n\t\tvar sysid = node.systemId;\n\t\tbuf.push('');\n\t\t}else if(sysid && sysid!='.'){\n\t\t\tbuf.push(' SYSTEM \"',sysid,'\">');\n\t\t}else{\n\t\t\tvar sub = node.internalSubset;\n\t\t\tif(sub){\n\t\t\t\tbuf.push(\" [\",sub,\"]\");\n\t\t\t}\n\t\t\tbuf.push(\">\");\n\t\t}\n\t\treturn;\n\tcase PROCESSING_INSTRUCTION_NODE:\n\t\treturn buf.push( \"\");\n\tcase ENTITY_REFERENCE_NODE:\n\t\treturn buf.push( '&',node.nodeName,';');\n\t//case ENTITY_NODE:\n\t//case NOTATION_NODE:\n\tdefault:\n\t\tbuf.push('??',node.nodeName);\n\t}\n}\nfunction importNode(doc,node,deep){\n\tvar node2;\n\tswitch (node.nodeType) {\n\tcase ELEMENT_NODE:\n\t\tnode2 = node.cloneNode(false);\n\t\tnode2.ownerDocument = doc;\n\t\t//var attrs = node2.attributes;\n\t\t//var len = attrs.length;\n\t\t//for(var i=0;i','amp':'&','quot':'\"','apos':\"'\"}\r\n\tif(locator){\r\n\t\tdomBuilder.setDocumentLocator(locator)\r\n\t}\r\n\t\r\n\tsax.errorHandler = buildErrorHandler(errorHandler,domBuilder,locator);\r\n\tsax.domBuilder = options.domBuilder || domBuilder;\r\n\tif(/\\/x?html?$/.test(mimeType)){\r\n\t\tentityMap.nbsp = '\\xa0';\r\n\t\tentityMap.copy = '\\xa9';\r\n\t\tdefaultNSMap['']= 'http://www.w3.org/1999/xhtml';\r\n\t}\r\n\tdefaultNSMap.xml = defaultNSMap.xml || 'http://www.w3.org/XML/1998/namespace';\r\n\tif(source){\r\n\t\tsax.parse(source,defaultNSMap,entityMap);\r\n\t}else{\r\n\t\tsax.errorHandler.error(\"invalid doc source\");\r\n\t}\r\n\treturn domBuilder.doc;\r\n}\r\nfunction buildErrorHandler(errorImpl,domBuilder,locator){\r\n\tif(!errorImpl){\r\n\t\tif(domBuilder instanceof DOMHandler){\r\n\t\t\treturn domBuilder;\r\n\t\t}\r\n\t\terrorImpl = domBuilder ;\r\n\t}\r\n\tvar errorHandler = {}\r\n\tvar isCallback = errorImpl instanceof Function;\r\n\tlocator = locator||{}\r\n\tfunction build(key){\r\n\t\tvar fn = errorImpl[key];\r\n\t\tif(!fn && isCallback){\r\n\t\t\tfn = errorImpl.length == 2?function(msg){errorImpl(key,msg)}:errorImpl;\r\n\t\t}\r\n\t\terrorHandler[key] = fn && function(msg){\r\n\t\t\tfn('[xmldom '+key+']\\t'+msg+_locator(locator));\r\n\t\t}||function(){};\r\n\t}\r\n\tbuild('warning');\r\n\tbuild('error');\r\n\tbuild('fatalError');\r\n\treturn errorHandler;\r\n}\r\n\r\n//console.log('#\\n\\n\\n\\n\\n\\n\\n####')\r\n/**\r\n * +ContentHandler+ErrorHandler\r\n * +LexicalHandler+EntityResolver2\r\n * -DeclHandler-DTDHandler \r\n * \r\n * DefaultHandler:EntityResolver, DTDHandler, ContentHandler, ErrorHandler\r\n * DefaultHandler2:DefaultHandler,LexicalHandler, DeclHandler, EntityResolver2\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/helpers/DefaultHandler.html\r\n */\r\nfunction DOMHandler() {\r\n this.cdata = false;\r\n}\r\nfunction position(locator,node){\r\n\tnode.lineNumber = locator.lineNumber;\r\n\tnode.columnNumber = locator.columnNumber;\r\n}\r\n/**\r\n * @see org.xml.sax.ContentHandler#startDocument\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ContentHandler.html\r\n */ \r\nDOMHandler.prototype = {\r\n\tstartDocument : function() {\r\n \tthis.doc = new DOMImplementation().createDocument(null, null, null);\r\n \tif (this.locator) {\r\n \tthis.doc.documentURI = this.locator.systemId;\r\n \t}\r\n\t},\r\n\tstartElement:function(namespaceURI, localName, qName, attrs) {\r\n\t\tvar doc = this.doc;\r\n\t var el = doc.createElementNS(namespaceURI, qName||localName);\r\n\t var len = attrs.length;\r\n\t appendElement(this, el);\r\n\t this.currentElement = el;\r\n\t \r\n\t\tthis.locator && position(this.locator,el)\r\n\t for (var i = 0 ; i < len; i++) {\r\n\t var namespaceURI = attrs.getURI(i);\r\n\t var value = attrs.getValue(i);\r\n\t var qName = attrs.getQName(i);\r\n\t\t\tvar attr = doc.createAttributeNS(namespaceURI, qName);\r\n\t\t\tthis.locator &&position(attrs.getLocator(i),attr);\r\n\t\t\tattr.value = attr.nodeValue = value;\r\n\t\t\tel.setAttributeNode(attr)\r\n\t }\r\n\t},\r\n\tendElement:function(namespaceURI, localName, qName) {\r\n\t\tvar current = this.currentElement\r\n\t\tvar tagName = current.tagName;\r\n\t\tthis.currentElement = current.parentNode;\r\n\t},\r\n\tstartPrefixMapping:function(prefix, uri) {\r\n\t},\r\n\tendPrefixMapping:function(prefix) {\r\n\t},\r\n\tprocessingInstruction:function(target, data) {\r\n\t var ins = this.doc.createProcessingInstruction(target, data);\r\n\t this.locator && position(this.locator,ins)\r\n\t appendElement(this, ins);\r\n\t},\r\n\tignorableWhitespace:function(ch, start, length) {\r\n\t},\r\n\tcharacters:function(chars, start, length) {\r\n\t\tchars = _toString.apply(this,arguments)\r\n\t\t//console.log(chars)\r\n\t\tif(chars){\r\n\t\t\tif (this.cdata) {\r\n\t\t\t\tvar charNode = this.doc.createCDATASection(chars);\r\n\t\t\t} else {\r\n\t\t\t\tvar charNode = this.doc.createTextNode(chars);\r\n\t\t\t}\r\n\t\t\tif(this.currentElement){\r\n\t\t\t\tthis.currentElement.appendChild(charNode);\r\n\t\t\t}else if(/^\\s*$/.test(chars)){\r\n\t\t\t\tthis.doc.appendChild(charNode);\r\n\t\t\t\t//process xml\r\n\t\t\t}\r\n\t\t\tthis.locator && position(this.locator,charNode)\r\n\t\t}\r\n\t},\r\n\tskippedEntity:function(name) {\r\n\t},\r\n\tendDocument:function() {\r\n\t\tthis.doc.normalize();\r\n\t},\r\n\tsetDocumentLocator:function (locator) {\r\n\t if(this.locator = locator){// && !('lineNumber' in locator)){\r\n\t \tlocator.lineNumber = 0;\r\n\t }\r\n\t},\r\n\t//LexicalHandler\r\n\tcomment:function(chars, start, length) {\r\n\t\tchars = _toString.apply(this,arguments)\r\n\t var comm = this.doc.createComment(chars);\r\n\t this.locator && position(this.locator,comm)\r\n\t appendElement(this, comm);\r\n\t},\r\n\t\r\n\tstartCDATA:function() {\r\n\t //used in characters() methods\r\n\t this.cdata = true;\r\n\t},\r\n\tendCDATA:function() {\r\n\t this.cdata = false;\r\n\t},\r\n\t\r\n\tstartDTD:function(name, publicId, systemId) {\r\n\t\tvar impl = this.doc.implementation;\r\n\t if (impl && impl.createDocumentType) {\r\n\t var dt = impl.createDocumentType(name, publicId, systemId);\r\n\t this.locator && position(this.locator,dt)\r\n\t appendElement(this, dt);\r\n\t }\r\n\t},\r\n\t/**\r\n\t * @see org.xml.sax.ErrorHandler\r\n\t * @link http://www.saxproject.org/apidoc/org/xml/sax/ErrorHandler.html\r\n\t */\r\n\twarning:function(error) {\r\n\t\tconsole.warn('[xmldom warning]\\t'+error,_locator(this.locator));\r\n\t},\r\n\terror:function(error) {\r\n\t\tconsole.error('[xmldom error]\\t'+error,_locator(this.locator));\r\n\t},\r\n\tfatalError:function(error) {\r\n\t\tconsole.error('[xmldom fatalError]\\t'+error,_locator(this.locator));\r\n\t throw error;\r\n\t}\r\n}\r\nfunction _locator(l){\r\n\tif(l){\r\n\t\treturn '\\n@'+(l.systemId ||'')+'#[line:'+l.lineNumber+',col:'+l.columnNumber+']'\r\n\t}\r\n}\r\nfunction _toString(chars,start,length){\r\n\tif(typeof chars == 'string'){\r\n\t\treturn chars.substr(start,length)\r\n\t}else{//java sax connect width xmldom on rhino(what about: \"? && !(chars instanceof String)\")\r\n\t\tif(chars.length >= start+length || start){\r\n\t\t\treturn new java.lang.String(chars,start,length)+'';\r\n\t\t}\r\n\t\treturn chars;\r\n\t}\r\n}\r\n\r\n/*\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ext/LexicalHandler.html\r\n * used method of org.xml.sax.ext.LexicalHandler:\r\n * #comment(chars, start, length)\r\n * #startCDATA()\r\n * #endCDATA()\r\n * #startDTD(name, publicId, systemId)\r\n *\r\n *\r\n * IGNORED method of org.xml.sax.ext.LexicalHandler:\r\n * #endDTD()\r\n * #startEntity(name)\r\n * #endEntity(name)\r\n *\r\n *\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ext/DeclHandler.html\r\n * IGNORED method of org.xml.sax.ext.DeclHandler\r\n * \t#attributeDecl(eName, aName, type, mode, value)\r\n * #elementDecl(name, model)\r\n * #externalEntityDecl(name, publicId, systemId)\r\n * #internalEntityDecl(name, value)\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ext/EntityResolver2.html\r\n * IGNORED method of org.xml.sax.EntityResolver2\r\n * #resolveEntity(String name,String publicId,String baseURI,String systemId)\r\n * #resolveEntity(publicId, systemId)\r\n * #getExternalSubset(name, baseURI)\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/DTDHandler.html\r\n * IGNORED method of org.xml.sax.DTDHandler\r\n * #notationDecl(name, publicId, systemId) {};\r\n * #unparsedEntityDecl(name, publicId, systemId, notationName) {};\r\n */\r\n\"endDTD,startEntity,endEntity,attributeDecl,elementDecl,externalEntityDecl,internalEntityDecl,resolveEntity,getExternalSubset,notationDecl,unparsedEntityDecl\".replace(/\\w+/g,function(key){\r\n\tDOMHandler.prototype[key] = function(){return null}\r\n})\r\n\r\n/* Private static helpers treated below as private instance methods, so don't need to add these to the public API; we might use a Relator to also get rid of non-standard public properties */\r\nfunction appendElement (hander,node) {\r\n if (!hander.currentElement) {\r\n hander.doc.appendChild(node);\r\n } else {\r\n hander.currentElement.appendChild(node);\r\n }\r\n}//appendChild and setAttributeNS are preformance key\r\n\r\n//if(typeof require == 'function'){\r\n\tvar XMLReader = require('./sax').XMLReader;\r\n\tvar DOMImplementation = exports.DOMImplementation = require('./dom').DOMImplementation;\r\n\texports.XMLSerializer = require('./dom').XMLSerializer ;\r\n\texports.DOMParser = DOMParser;\r\n//}\r\n","\"use strict\";\n\nvar fieldTagNames = {\n // TIFF Baseline\n 0x013B: 'Artist',\n 0x0102: 'BitsPerSample',\n 0x0109: 'CellLength',\n 0x0108: 'CellWidth',\n 0x0140: 'ColorMap',\n 0x0103: 'Compression',\n 0x8298: 'Copyright',\n 0x0132: 'DateTime',\n 0x0152: 'ExtraSamples',\n 0x010A: 'FillOrder',\n 0x0121: 'FreeByteCounts',\n 0x0120: 'FreeOffsets',\n 0x0123: 'GrayResponseCurve',\n 0x0122: 'GrayResponseUnit',\n 0x013C: 'HostComputer',\n 0x010E: 'ImageDescription',\n 0x0101: 'ImageLength',\n 0x0100: 'ImageWidth',\n 0x010F: 'Make',\n 0x0119: 'MaxSampleValue',\n 0x0118: 'MinSampleValue',\n 0x0110: 'Model',\n 0x00FE: 'NewSubfileType',\n 0x0112: 'Orientation',\n 0x0106: 'PhotometricInterpretation',\n 0x011C: 'PlanarConfiguration',\n 0x0128: 'ResolutionUnit',\n 0x0116: 'RowsPerStrip',\n 0x0115: 'SamplesPerPixel',\n 0x0131: 'Software',\n 0x0117: 'StripByteCounts',\n 0x0111: 'StripOffsets',\n 0x00FF: 'SubfileType',\n 0x0107: 'Threshholding',\n 0x011A: 'XResolution',\n 0x011B: 'YResolution',\n\n // TIFF Extended\n 0x0146: 'BadFaxLines',\n 0x0147: 'CleanFaxData',\n 0x0157: 'ClipPath',\n 0x0148: 'ConsecutiveBadFaxLines',\n 0x01B1: 'Decode',\n 0x01B2: 'DefaultImageColor',\n 0x010D: 'DocumentName',\n 0x0150: 'DotRange',\n 0x0141: 'HalftoneHints',\n 0x015A: 'Indexed',\n 0x015B: 'JPEGTables',\n 0x011D: 'PageName',\n 0x0129: 'PageNumber',\n 0x013D: 'Predictor',\n 0x013F: 'PrimaryChromaticities',\n 0x0214: 'ReferenceBlackWhite',\n 0x0153: 'SampleFormat',\n 0x0154: 'SMinSampleValue',\n 0x0155: 'SMaxSampleValue',\n 0x022F: 'StripRowCounts',\n 0x014A: 'SubIFDs',\n 0x0124: 'T4Options',\n 0x0125: 'T6Options',\n 0x0145: 'TileByteCounts',\n 0x0143: 'TileLength',\n 0x0144: 'TileOffsets',\n 0x0142: 'TileWidth',\n 0x012D: 'TransferFunction',\n 0x013E: 'WhitePoint',\n 0x0158: 'XClipPathUnits',\n 0x011E: 'XPosition',\n 0x0211: 'YCbCrCoefficients',\n 0x0213: 'YCbCrPositioning',\n 0x0212: 'YCbCrSubSampling',\n 0x0159: 'YClipPathUnits',\n 0x011F: 'YPosition',\n\n // EXIF\n 0x9202: 'ApertureValue',\n 0xA001: 'ColorSpace',\n 0x9004: 'DateTimeDigitized',\n 0x9003: 'DateTimeOriginal',\n 0x8769: 'Exif IFD',\n 0x9000: 'ExifVersion',\n 0x829A: 'ExposureTime',\n 0xA300: 'FileSource',\n 0x9209: 'Flash',\n 0xA000: 'FlashpixVersion',\n 0x829D: 'FNumber',\n 0xA420: 'ImageUniqueID',\n 0x9208: 'LightSource',\n 0x927C: 'MakerNote',\n 0x9201: 'ShutterSpeedValue',\n 0x9286: 'UserComment',\n\n // IPTC\n 0x83BB: 'IPTC',\n\n // ICC\n 0x8773: 'ICC Profile',\n\n // XMP\n 0x02BC: 'XMP',\n\n // GDAL\n 0xA480: 'GDAL_METADATA',\n 0xA481: 'GDAL_NODATA',\n\n // Photoshop\n 0x8649: 'Photoshop',\n\n // GeoTiff\n 0x830E: 'ModelPixelScale',\n 0x8482: 'ModelTiepoint',\n 0x85D8: 'ModelTransformation',\n 0x87AF: 'GeoKeyDirectory',\n 0x87B0: 'GeoDoubleParams',\n 0x87B1: 'GeoAsciiParams'\n};\n\nvar key;\nvar fieldTags = {};\nfor (key in fieldTagNames) {\n fieldTags[fieldTagNames[key]] = parseInt(key);\n}\n\nvar arrayFields = [fieldTags.BitsPerSample, fieldTags.ExtraSamples, fieldTags.SampleFormat, fieldTags.StripByteCounts, fieldTags.StripOffsets, fieldTags.StripRowCounts, fieldTags.TileByteCounts, fieldTags.TileOffsets];\n\nvar fieldTypeNames = {\n 0x0001: 'BYTE',\n 0x0002: 'ASCII',\n 0x0003: 'SHORT',\n 0x0004: 'LONG',\n 0x0005: 'RATIONAL',\n 0x0006: 'SBYTE',\n 0x0007: 'UNDEFINED',\n 0x0008: 'SSHORT',\n 0x0009: 'SLONG',\n 0x000A: 'SRATIONAL',\n 0x000B: 'FLOAT',\n 0x000C: 'DOUBLE',\n // introduced by BigTIFF\n 0x0010: 'LONG8',\n 0x0011: 'SLONG8',\n 0x0012: 'IFD8'\n};\n\nvar fieldTypes = {};\nfor (key in fieldTypeNames) {\n fieldTypes[fieldTypeNames[key]] = parseInt(key);\n}\n\nvar photometricInterpretations = {\n WhiteIsZero: 0,\n BlackIsZero: 1,\n RGB: 2,\n Palette: 3,\n TransparencyMask: 4,\n CMYK: 5,\n YCbCr: 6,\n\n CIELab: 8,\n ICCLab: 9\n};\n\nvar geoKeyNames = {\n 1024: 'GTModelTypeGeoKey',\n 1025: 'GTRasterTypeGeoKey',\n 1026: 'GTCitationGeoKey',\n 2048: 'GeographicTypeGeoKey',\n 2049: 'GeogCitationGeoKey',\n 2050: 'GeogGeodeticDatumGeoKey',\n 2051: 'GeogPrimeMeridianGeoKey',\n 2052: 'GeogLinearUnitsGeoKey',\n 2053: 'GeogLinearUnitSizeGeoKey',\n 2054: 'GeogAngularUnitsGeoKey',\n 2055: 'GeogAngularUnitSizeGeoKey',\n 2056: 'GeogEllipsoidGeoKey',\n 2057: 'GeogSemiMajorAxisGeoKey',\n 2058: 'GeogSemiMinorAxisGeoKey',\n 2059: 'GeogInvFlatteningGeoKey',\n 2060: 'GeogAzimuthUnitsGeoKey',\n 2061: 'GeogPrimeMeridianLongGeoKey',\n 2062: 'GeogTOWGS84GeoKey',\n 3072: 'ProjectedCSTypeGeoKey',\n 3073: 'PCSCitationGeoKey',\n 3074: 'ProjectionGeoKey',\n 3075: 'ProjCoordTransGeoKey',\n 3076: 'ProjLinearUnitsGeoKey',\n 3077: 'ProjLinearUnitSizeGeoKey',\n 3078: 'ProjStdParallel1GeoKey',\n 3079: 'ProjStdParallel2GeoKey',\n 3080: 'ProjNatOriginLongGeoKey',\n 3081: 'ProjNatOriginLatGeoKey',\n 3082: 'ProjFalseEastingGeoKey',\n 3083: 'ProjFalseNorthingGeoKey',\n 3084: 'ProjFalseOriginLongGeoKey',\n 3085: 'ProjFalseOriginLatGeoKey',\n 3086: 'ProjFalseOriginEastingGeoKey',\n 3087: 'ProjFalseOriginNorthingGeoKey',\n 3088: 'ProjCenterLongGeoKey',\n 3089: 'ProjCenterLatGeoKey',\n 3090: 'ProjCenterEastingGeoKey',\n 3091: 'ProjCenterNorthingGeoKey',\n 3092: 'ProjScaleAtNatOriginGeoKey',\n 3093: 'ProjScaleAtCenterGeoKey',\n 3094: 'ProjAzimuthAngleGeoKey',\n 3095: 'ProjStraightVertPoleLongGeoKey',\n 3096: 'ProjRectifiedGridAngleGeoKey',\n 4096: 'VerticalCSTypeGeoKey',\n 4097: 'VerticalCitationGeoKey',\n 4098: 'VerticalDatumGeoKey',\n 4099: 'VerticalUnitsGeoKey'\n};\n\nvar geoKeys = {};\nfor (key in geoKeyNames) {\n geoKeys[geoKeyNames[key]] = parseInt(key);\n}\n\nvar parseXml;\n// node.js version\nif (typeof window === \"undefined\") {\n parseXml = function parseXml(xmlStr) {\n // requires xmldom module\n var DOMParser = require('xmldom').DOMParser;\n return new DOMParser().parseFromString(xmlStr, \"text/xml\");\n };\n} else if (typeof window.DOMParser !== \"undefined\") {\n parseXml = function parseXml(xmlStr) {\n return new window.DOMParser().parseFromString(xmlStr, \"text/xml\");\n };\n} else if (typeof window.ActiveXObject !== \"undefined\" && new window.ActiveXObject(\"Microsoft.XMLDOM\")) {\n parseXml = function parseXml(xmlStr) {\n var xmlDoc = new window.ActiveXObject(\"Microsoft.XMLDOM\");\n xmlDoc.async = \"false\";\n xmlDoc.loadXML(xmlStr);\n return xmlDoc;\n };\n}\n\nmodule.exports = {\n fieldTags: fieldTags,\n fieldTagNames: fieldTagNames,\n arrayFields: arrayFields,\n fieldTypes: fieldTypes,\n fieldTypeNames: fieldTypeNames,\n photometricInterpretations: photometricInterpretations,\n geoKeys: geoKeys,\n geoKeyNames: geoKeyNames,\n parseXml: parseXml\n};","\"use strict\";\n\nfunction fromWhiteIsZero(raster, max, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var value;\n for (var i = 0, j = 0; i < raster.length; ++i, j += 3) {\n value = 256 - raster[i] / max * 256;\n rgbRaster[j] = value;\n rgbRaster[j + 1] = value;\n rgbRaster[j + 2] = value;\n }\n return rgbRaster;\n}\n\nfunction fromBlackIsZero(raster, max, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var value;\n for (var i = 0, j = 0; i < raster.length; ++i, j += 3) {\n value = raster[i] / max * 256;\n rgbRaster[j] = value;\n rgbRaster[j + 1] = value;\n rgbRaster[j + 2] = value;\n }\n return rgbRaster;\n}\n\nfunction fromPalette(raster, colorMap, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var greenOffset = colorMap.length / 3;\n var blueOffset = colorMap.length / 3 * 2;\n for (var i = 0, j = 0; i < raster.length; ++i, j += 3) {\n var mapIndex = raster[i];\n rgbRaster[j] = colorMap[mapIndex] / 65536 * 256;\n rgbRaster[j + 1] = colorMap[mapIndex + greenOffset] / 65536 * 256;\n rgbRaster[j + 2] = colorMap[mapIndex + blueOffset] / 65536 * 256;\n }\n return rgbRaster;\n}\n\nfunction fromCMYK(cmykRaster, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var c, m, y, k;\n for (var i = 0, j = 0; i < cmykRaster.length; i += 4, j += 3) {\n c = cmykRaster[i];\n m = cmykRaster[i + 1];\n y = cmykRaster[i + 2];\n k = cmykRaster[i + 3];\n\n rgbRaster[j] = 255 * ((255 - c) / 256) * ((255 - k) / 256);\n rgbRaster[j + 1] = 255 * ((255 - m) / 256) * ((255 - k) / 256);\n rgbRaster[j + 2] = 255 * ((255 - y) / 256) * ((255 - k) / 256);\n }\n return rgbRaster;\n}\n\nfunction fromYCbCr(yCbCrRaster, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var y, cb, cr;\n for (var i = 0, j = 0; i < yCbCrRaster.length; i += 3, j += 3) {\n y = yCbCrRaster[i];\n cb = yCbCrRaster[i + 1];\n cr = yCbCrRaster[i + 2];\n\n rgbRaster[j] = y + 1.40200 * (cr - 0x80);\n rgbRaster[j + 1] = y - 0.34414 * (cb - 0x80) - 0.71414 * (cr - 0x80);\n rgbRaster[j + 2] = y + 1.77200 * (cb - 0x80);\n }\n return rgbRaster;\n}\n\n// converted from here:\n// http://de.mathworks.com/matlabcentral/fileexchange/24010-lab2rgb/content/Lab2RGB.m\n// still buggy\nfunction fromCIELab(cieLabRaster, width, height) {\n var T1 = 0.008856;\n var T2 = 0.206893;\n var MAT = [3.240479, -1.537150, -0.498535, -0.969256, 1.875992, 0.041556, 0.055648, -0.204043, 1.057311];\n var rgbRaster = new Uint8Array(width * height * 3);\n var L, a, b;\n var fX, fY, fZ, XT, YT, ZT, X, Y, Z;\n for (var i = 0, j = 0; i < cieLabRaster.length; i += 3, j += 3) {\n L = cieLabRaster[i];\n a = cieLabRaster[i + 1];\n b = cieLabRaster[i + 2];\n\n // Compute Y\n fY = Math.pow((L + 16) / 116, 3);\n YT = fY > T1;\n fY = (YT !== 0) * (L / 903.3) + YT * fY;\n Y = fY;\n\n fY = YT * Math.pow(fY, 1 / 3) + (YT !== 0) * (7.787 * fY + 16 / 116);\n\n // Compute X\n fX = a / 500 + fY;\n XT = fX > T2;\n X = XT * Math.pow(fX, 3) + (XT !== 0) * ((fX - 16 / 116) / 7.787);\n\n // Compute Z\n fZ = fY - b / 200;\n ZT = fZ > T2;\n Z = ZT * Math.pow(fZ, 3) + (ZT !== 0) * ((fZ - 16 / 116) / 7.787);\n\n // Normalize for D65 white point\n X = X * 0.950456;\n Z = Z * 1.088754;\n\n rgbRaster[j] = X * MAT[0] + Y * MAT[1] + Z * MAT[2];\n rgbRaster[j + 1] = X * MAT[3] + Y * MAT[4] + Z * MAT[5];\n rgbRaster[j + 2] = X * MAT[6] + Y * MAT[7] + Z * MAT[8];\n }\n return rgbRaster;\n}\n\nmodule.exports = {\n fromWhiteIsZero: fromWhiteIsZero,\n fromBlackIsZero: fromBlackIsZero,\n fromPalette: fromPalette,\n fromCMYK: fromCMYK,\n fromYCbCr: fromYCbCr,\n fromCIELab: fromCIELab\n};","\"use strict\";\n\nfunction AbstractDecoder() {}\n\nAbstractDecoder.prototype = {\n isAsync: function isAsync() {\n // TODO: check if async reading func is enabled or not.\n return typeof this.decodeBlock === \"undefined\";\n }\n};\n\nmodule.exports = AbstractDecoder;","\"use strict\";\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\n\nfunction RawDecoder() {}\n\nRawDecoder.prototype = Object.create(AbstractDecoder.prototype);\nRawDecoder.prototype.constructor = RawDecoder;\nRawDecoder.prototype.decodeBlock = function (buffer) {\n return buffer;\n};\n\nmodule.exports = RawDecoder;","\"use strict\";\n\n//var lzwCompress = require(\"lzwcompress\");\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\n\nvar MIN_BITS = 9;\nvar MAX_BITS = 12;\nvar CLEAR_CODE = 256; // clear code\nvar EOI_CODE = 257; // end of information\n\nfunction LZW() {\n this.littleEndian = false;\n this.position = 0;\n\n this._makeEntryLookup = false;\n this.dictionary = [];\n}\n\nLZW.prototype = {\n constructor: LZW,\n initDictionary: function initDictionary() {\n this.dictionary = new Array(258);\n this.entryLookup = {};\n this.byteLength = MIN_BITS;\n for (var i = 0; i <= 257; i++) {\n // i really feal like i <= 257, but I get strange unknown words that way.\n this.dictionary[i] = [i];\n if (this._makeEntryLookup) {\n this.entryLookup[i] = i;\n }\n }\n },\n\n decompress: function decompress(input) {\n this._makeEntryLookup = false; // for speed\n this.initDictionary();\n this.position = 0;\n this.result = [];\n if (!input.buffer) {\n input = new Uint8Array(input);\n }\n var mydataview = new DataView(input.buffer);\n var code = this.getNext(mydataview);\n var oldCode;\n while (code !== EOI_CODE) {\n if (code === CLEAR_CODE) {\n this.initDictionary();\n code = this.getNext(mydataview);\n while (code === CLEAR_CODE) {\n code = this.getNext(mydataview);\n }\n if (code > CLEAR_CODE) {\n throw 'corrupted code at scanline ' + code;\n }\n if (code === EOI_CODE) {\n break;\n } else {\n var val = this.dictionary[code];\n this.appendArray(this.result, val);\n oldCode = code;\n }\n } else {\n if (this.dictionary[code] !== undefined) {\n var _val = this.dictionary[code];\n this.appendArray(this.result, _val);\n var newVal = this.dictionary[oldCode].concat(this.dictionary[code][0]);\n this.addToDictionary(newVal);\n oldCode = code;\n } else {\n var oldVal = this.dictionary[oldCode];\n if (!oldVal) {\n throw \"Bogus entry. Not in dictionary, \" + oldCode + \" / \" + this.dictionary.length + \", position: \" + this.position;\n }\n var _newVal = oldVal.concat(this.dictionary[oldCode][0]);\n this.appendArray(this.result, _newVal);\n this.addToDictionary(_newVal);\n oldCode = code;\n }\n }\n // This is strange. It seems like the\n if (this.dictionary.length >= Math.pow(2, this.byteLength) - 1) {\n this.byteLength++;\n }\n code = this.getNext(mydataview);\n }\n return new Uint8Array(this.result);\n },\n\n appendArray: function appendArray(dest, source) {\n for (var i = 0; i < source.length; i++) {\n dest.push(source[i]);\n }\n return dest;\n },\n\n haveBytesChanged: function haveBytesChanged() {\n if (this.dictionary.length >= Math.pow(2, this.byteLength)) {\n this.byteLength++;\n return true;\n }\n return false;\n },\n\n addToDictionary: function addToDictionary(arr) {\n this.dictionary.push(arr);\n if (this._makeEntryLookup) {\n this.entryLookup[arr] = this.dictionary.length - 1;\n }\n this.haveBytesChanged();\n return this.dictionary.length - 1;\n },\n\n getNext: function getNext(dataview) {\n var byte = this.getByte(dataview, this.position, this.byteLength);\n this.position += this.byteLength;\n return byte;\n },\n\n // This binary representation might actually be as fast as the completely illegible bit shift approach\n //\n getByte: function getByte(dataview, position, length) {\n var d = position % 8;\n var a = Math.floor(position / 8);\n var de = 8 - d;\n var ef = position + length - (a + 1) * 8;\n var fg = 8 * (a + 2) - (position + length);\n var dg = (a + 2) * 8 - position;\n fg = Math.max(0, fg);\n if (a >= dataview.byteLength) {\n console.warn('ran off the end of the buffer before finding EOI_CODE (end on input code)');\n return EOI_CODE;\n }\n var chunk1 = dataview.getUint8(a, this.littleEndian) & Math.pow(2, 8 - d) - 1;\n chunk1 = chunk1 << length - de;\n var chunks = chunk1;\n if (a + 1 < dataview.byteLength) {\n var chunk2 = dataview.getUint8(a + 1, this.littleEndian) >>> fg;\n chunk2 = chunk2 << Math.max(0, length - dg);\n chunks += chunk2;\n }\n if (ef > 8 && a + 2 < dataview.byteLength) {\n var hi = (a + 3) * 8 - (position + length);\n var chunk3 = dataview.getUint8(a + 2, this.littleEndian) >>> hi;\n chunks += chunk3;\n }\n return chunks;\n },\n\n // compress has not been optimized and uses a uint8 array to hold binary values.\n compress: function compress(input) {\n this._makeEntryLookup = true;\n this.initDictionary();\n this.position = 0;\n var resultBits = [];\n var omega = [];\n resultBits = this.appendArray(resultBits, this.binaryFromByte(CLEAR_CODE, this.byteLength)); // resultBits.concat(Array.from(this.binaryFromByte(this.CLEAR_CODE, this.byteLength)))\n for (var i = 0; i < input.length; i++) {\n var k = [input[i]];\n var omk = omega.concat(k);\n if (this.entryLookup[omk] !== undefined) {\n omega = omk;\n } else {\n var _code = this.entryLookup[omega];\n var _bin = this.binaryFromByte(_code, this.byteLength);\n resultBits = this.appendArray(resultBits, _bin);\n this.addToDictionary(omk);\n omega = k;\n if (this.dictionary.length >= Math.pow(2, MAX_BITS)) {\n resultBits = this.appendArray(resultBits, this.binaryFromByte(CLEAR_CODE, this.byteLength));\n this.initDictionary();\n }\n }\n }\n var code = this.entryLookup[omega];\n var bin = this.binaryFromByte(code, this.byteLength);\n resultBits = this.appendArray(resultBits, bin);\n resultBits = resultBits = this.appendArray(resultBits, this.binaryFromByte(EOI_CODE, this.byteLength));\n this.binary = resultBits;\n this.result = this.binaryToUint8(resultBits);\n return this.result;\n },\n\n byteFromCode: function byteFromCode(code) {\n var res = this.dictionary[code];\n return res;\n },\n\n binaryFromByte: function binaryFromByte(byte) {\n var byteLength = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 8;\n\n var res = new Uint8Array(byteLength);\n for (var i = 0; i < res.length; i++) {\n var mask = Math.pow(2, i);\n var isOne = (byte & mask) > 0;\n res[res.length - 1 - i] = isOne;\n }\n return res;\n },\n\n binaryToNumber: function binaryToNumber(bin) {\n var res = 0;\n for (var i = 0; i < bin.length; i++) {\n res += Math.pow(2, bin.length - i - 1) * bin[i];\n }\n return res;\n },\n\n inputToBinary: function inputToBinary(input) {\n var inputByteLength = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 8;\n\n var res = new Uint8Array(input.length * inputByteLength);\n for (var i = 0; i < input.length; i++) {\n var bin = this.binaryFromByte(input[i], inputByteLength);\n res.set(bin, i * inputByteLength);\n }\n return res;\n },\n\n binaryToUint8: function binaryToUint8(bin) {\n var result = new Uint8Array(Math.ceil(bin.length / 8));\n var index = 0;\n for (var i = 0; i < bin.length; i += 8) {\n var val = 0;\n for (var j = 0; j < 8 && i + j < bin.length; j++) {\n val = val + bin[i + j] * Math.pow(2, 8 - j - 1);\n }\n result[index] = val;\n index++;\n }\n return result;\n }\n};\n\n// the actual decoder interface\n\nfunction LZWDecoder() {\n this.decompressor = new LZW();\n}\n\nLZWDecoder.prototype = Object.create(AbstractDecoder.prototype);\nLZWDecoder.prototype.constructor = LZWDecoder;\nLZWDecoder.prototype.decodeBlock = function (buffer) {\n return this.decompressor.decompress(buffer).buffer;\n};\n\nmodule.exports = LZWDecoder;","\"use strict\";\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\nvar pakoInflate = require('pako/lib/inflate').inflate;\n\nfunction DeflateDecoder() {}\n\nDeflateDecoder.prototype = Object.create(AbstractDecoder.prototype);\nDeflateDecoder.prototype.constructor = DeflateDecoder;\nDeflateDecoder.prototype.decodeBlock = function (buffer) {\n return pakoInflate(new Uint8Array(buffer)).buffer;\n};\n\nmodule.exports = DeflateDecoder;","\"use strict\";\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\n\nfunction PackbitsDecoder() {}\n\nPackbitsDecoder.prototype = Object.create(AbstractDecoder.prototype);\nPackbitsDecoder.prototype.constructor = PackbitsDecoder;\nPackbitsDecoder.prototype.decodeBlock = function (buffer) {\n var dataView = new DataView(buffer);\n var out = [];\n var i, j;\n\n for (i = 0; i < buffer.byteLength; ++i) {\n var header = dataView.getInt8(i);\n if (header < 0) {\n var next = dataView.getUint8(i + 1);\n header = -header;\n for (j = 0; j <= header; ++j) {\n out.push(next);\n }\n i += 1;\n } else {\n for (j = 0; j <= header; ++j) {\n out.push(dataView.getUint8(i + j + 1));\n }\n i += header + 1;\n }\n }\n return new Uint8Array(out).buffer;\n};\n\nmodule.exports = PackbitsDecoder;","\"use strict\";\n\nvar globals = require(\"./globals.js\");\nvar RGB = require(\"./rgb.js\");\nvar RawDecoder = require(\"./compression/raw.js\");\nvar LZWDecoder = require(\"./compression/lzw.js\");\nvar DeflateDecoder = require(\"./compression/deflate.js\");\nvar PackbitsDecoder = require(\"./compression/packbits.js\");\n\nvar sum = function sum(array, start, end) {\n var s = 0;\n for (var i = start; i < end; ++i) {\n s += array[i];\n }\n return s;\n};\n\nvar arrayForType = function arrayForType(format, bitsPerSample, size) {\n switch (format) {\n case 1:\n // unsigned integer data\n switch (bitsPerSample) {\n case 8:\n return new Uint8Array(size);\n case 16:\n return new Uint16Array(size);\n case 32:\n return new Uint32Array(size);\n }\n break;\n case 2:\n // twos complement signed integer data\n switch (bitsPerSample) {\n case 8:\n return new Int8Array(size);\n case 16:\n return new Int16Array(size);\n case 32:\n return new Int32Array(size);\n }\n break;\n case 3:\n // floating point data\n switch (bitsPerSample) {\n case 32:\n return new Float32Array(size);\n case 64:\n return new Float64Array(size);\n }\n break;\n }\n throw Error(\"Unsupported data format/bitsPerSample\");\n};\n\n/**\n * GeoTIFF sub-file image.\n * @constructor\n * @param {Object} fileDirectory The parsed file directory\n * @param {Object} geoKeys The parsed geo-keys\n * @param {DataView} dataView The DataView for the underlying file.\n * @param {Boolean} littleEndian Whether the file is encoded in little or big endian\n * @param {Boolean} cache Whether or not decoded tiles shall be cached\n */\nfunction GeoTIFFImage(fileDirectory, geoKeys, dataView, littleEndian, cache) {\n this.fileDirectory = fileDirectory;\n this.geoKeys = geoKeys;\n this.dataView = dataView;\n this.littleEndian = littleEndian;\n this.tiles = cache ? {} : null;\n this.isTiled = fileDirectory.StripOffsets ? false : true;\n var planarConfiguration = fileDirectory.PlanarConfiguration;\n this.planarConfiguration = typeof planarConfiguration === \"undefined\" ? 1 : planarConfiguration;\n if (this.planarConfiguration !== 1 && this.planarConfiguration !== 2) {\n throw new Error(\"Invalid planar configuration.\");\n }\n\n switch (this.fileDirectory.Compression) {\n case undefined:\n case 1:\n // no compression\n this.decoder = new RawDecoder();\n break;\n case 5:\n // LZW\n this.decoder = new LZWDecoder();\n break;\n case 6:\n // JPEG\n throw new Error(\"JPEG compression not supported.\");\n case 8:\n // Deflate\n this.decoder = new DeflateDecoder();\n break;\n //case 32946: // deflate ??\n // throw new Error(\"Deflate compression not supported.\");\n case 32773:\n // packbits\n this.decoder = new PackbitsDecoder();\n break;\n default:\n throw new Error(\"Unknown compresseion method identifier: \" + this.fileDirectory.Compression);\n }\n}\n\nGeoTIFFImage.prototype = {\n /**\n * Returns the associated parsed file directory.\n * @returns {Object} the parsed file directory\n */\n getFileDirectory: function getFileDirectory() {\n return this.fileDirectory;\n },\n /**\n * Returns the associated parsed geo keys.\n * @returns {Object} the parsed geo keys\n */\n getGeoKeys: function getGeoKeys() {\n return this.geoKeys;\n },\n /**\n * Returns the width of the image.\n * @returns {Number} the width of the image\n */\n getWidth: function getWidth() {\n return this.fileDirectory.ImageWidth;\n },\n /**\n * Returns the height of the image.\n * @returns {Number} the height of the image\n */\n getHeight: function getHeight() {\n return this.fileDirectory.ImageLength;\n },\n /**\n * Returns the number of samples per pixel.\n * @returns {Number} the number of samples per pixel\n */\n getSamplesPerPixel: function getSamplesPerPixel() {\n return this.fileDirectory.SamplesPerPixel;\n },\n /**\n * Returns the width of each tile.\n * @returns {Number} the width of each tile\n */\n getTileWidth: function getTileWidth() {\n return this.isTiled ? this.fileDirectory.TileWidth : this.getWidth();\n },\n /**\n * Returns the height of each tile.\n * @returns {Number} the height of each tile\n */\n getTileHeight: function getTileHeight() {\n return this.isTiled ? this.fileDirectory.TileLength : this.fileDirectory.RowsPerStrip;\n },\n\n /**\n * Calculates the number of bytes for each pixel across all samples. Only full\n * bytes are supported, an exception is thrown when this is not the case.\n * @returns {Number} the bytes per pixel\n */\n getBytesPerPixel: function getBytesPerPixel() {\n var bitsPerSample = 0;\n for (var i = 0; i < this.fileDirectory.BitsPerSample.length; ++i) {\n var bits = this.fileDirectory.BitsPerSample[i];\n if (bits % 8 !== 0) {\n throw new Error(\"Sample bit-width of \" + bits + \" is not supported.\");\n } else if (bits !== this.fileDirectory.BitsPerSample[0]) {\n throw new Error(\"Differing size of samples in a pixel are not supported.\");\n }\n bitsPerSample += bits;\n }\n return bitsPerSample / 8;\n },\n\n getSampleByteSize: function getSampleByteSize(i) {\n if (i >= this.fileDirectory.BitsPerSample.length) {\n throw new RangeError(\"Sample index \" + i + \" is out of range.\");\n }\n var bits = this.fileDirectory.BitsPerSample[i];\n if (bits % 8 !== 0) {\n throw new Error(\"Sample bit-width of \" + bits + \" is not supported.\");\n }\n return bits / 8;\n },\n\n getReaderForSample: function getReaderForSample(sampleIndex) {\n var format = this.fileDirectory.SampleFormat ? this.fileDirectory.SampleFormat[sampleIndex] : 1;\n var bitsPerSample = this.fileDirectory.BitsPerSample[sampleIndex];\n switch (format) {\n case 1:\n // unsigned integer data\n switch (bitsPerSample) {\n case 8:\n return DataView.prototype.getUint8;\n case 16:\n return DataView.prototype.getUint16;\n case 32:\n return DataView.prototype.getUint32;\n }\n break;\n case 2:\n // twos complement signed integer data\n switch (bitsPerSample) {\n case 8:\n return DataView.prototype.getInt8;\n case 16:\n return DataView.prototype.getInt16;\n case 32:\n return DataView.prototype.getInt32;\n }\n break;\n case 3:\n switch (bitsPerSample) {\n case 32:\n return DataView.prototype.getFloat32;\n case 64:\n return DataView.prototype.getFloat64;\n }\n break;\n }\n },\n\n getArrayForSample: function getArrayForSample(sampleIndex, size) {\n var format = this.fileDirectory.SampleFormat ? this.fileDirectory.SampleFormat[sampleIndex] : 1;\n var bitsPerSample = this.fileDirectory.BitsPerSample[sampleIndex];\n return arrayForType(format, bitsPerSample, size);\n },\n\n getDecoder: function getDecoder() {\n return this.decoder;\n },\n\n /**\n * Returns the decoded strip or tile.\n * @param {Number} x the strip or tile x-offset\n * @param {Number} y the tile y-offset (0 for stripped images)\n * @param {Number} plane the planar configuration (1: \"chunky\", 2: \"separate samples\")\n * @returns {(Int8Array|Uint8Array|Int16Array|Uint16Array|Int32Array|Uint32Array|Float32Array|Float64Array)}\n */\n getTileOrStrip: function getTileOrStrip(x, y, sample, callback) {\n var numTilesPerRow = Math.ceil(this.getWidth() / this.getTileWidth());\n var numTilesPerCol = Math.ceil(this.getHeight() / this.getTileHeight());\n var index;\n var tiles = this.tiles;\n if (this.planarConfiguration === 1) {\n index = y * numTilesPerRow + x;\n } else if (this.planarConfiguration === 2) {\n index = sample * numTilesPerRow * numTilesPerCol + y * numTilesPerRow + x;\n }\n\n if (tiles !== null && index in tiles) {\n if (callback) {\n return callback(null, { x: x, y: y, sample: sample, data: tiles[index] });\n }\n return tiles[index];\n } else {\n var offset, byteCount;\n if (this.isTiled) {\n offset = this.fileDirectory.TileOffsets[index];\n byteCount = this.fileDirectory.TileByteCounts[index];\n } else {\n offset = this.fileDirectory.StripOffsets[index];\n byteCount = this.fileDirectory.StripByteCounts[index];\n }\n var slice = this.dataView.buffer.slice(offset, offset + byteCount);\n if (callback) {\n return this.getDecoder().decodeBlockAsync(slice, function (error, data) {\n if (!error && tiles !== null) {\n tiles[index] = data;\n }\n callback(error, { x: x, y: y, sample: sample, data: data });\n });\n }\n var block = this.getDecoder().decodeBlock(slice);\n if (tiles !== null) {\n tiles[index] = block;\n }\n return block;\n }\n },\n\n _readRasterAsync: function _readRasterAsync(imageWindow, samples, valueArrays, interleave, callback, callbackError) {\n var tileWidth = this.getTileWidth();\n var tileHeight = this.getTileHeight();\n\n var minXTile = Math.floor(imageWindow[0] / tileWidth);\n var maxXTile = Math.ceil(imageWindow[2] / tileWidth);\n var minYTile = Math.floor(imageWindow[1] / tileHeight);\n var maxYTile = Math.ceil(imageWindow[3] / tileHeight);\n\n var numTilesPerRow = Math.ceil(this.getWidth() / tileWidth);\n\n var windowWidth = imageWindow[2] - imageWindow[0];\n var windowHeight = imageWindow[3] - imageWindow[1];\n\n var bytesPerPixel = this.getBytesPerPixel();\n var imageWidth = this.getWidth();\n\n var predictor = this.fileDirectory.Predictor || 1;\n\n var srcSampleOffsets = [];\n var sampleReaders = [];\n for (var i = 0; i < samples.length; ++i) {\n if (this.planarConfiguration === 1) {\n srcSampleOffsets.push(sum(this.fileDirectory.BitsPerSample, 0, samples[i]) / 8);\n } else {\n srcSampleOffsets.push(0);\n }\n sampleReaders.push(this.getReaderForSample(samples[i]));\n }\n\n var allStacked = false;\n var unfinishedTiles = 0;\n var littleEndian = this.littleEndian;\n var globalError = null;\n\n function checkFinished() {\n if (allStacked && unfinishedTiles === 0) {\n if (globalError) {\n callbackError(globalError);\n } else {\n callback(valueArrays);\n }\n }\n }\n\n function onTileGot(error, tile) {\n if (!error) {\n var dataView = new DataView(tile.data);\n\n var firstLine = tile.y * tileHeight;\n var firstCol = tile.x * tileWidth;\n var lastLine = (tile.y + 1) * tileHeight;\n var lastCol = (tile.x + 1) * tileWidth;\n var sampleIndex = tile.sample;\n\n for (var y = Math.max(0, imageWindow[1] - firstLine); y < Math.min(tileHeight, tileHeight - (lastLine - imageWindow[3])); ++y) {\n for (var x = Math.max(0, imageWindow[0] - firstCol); x < Math.min(tileWidth, tileWidth - (lastCol - imageWindow[2])); ++x) {\n var pixelOffset = (y * tileWidth + x) * bytesPerPixel;\n var value = sampleReaders[sampleIndex].call(dataView, pixelOffset + srcSampleOffsets[sampleIndex], littleEndian);\n var windowCoordinate;\n if (interleave) {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0] - 1) * samples.length + sampleIndex;\n value += valueArrays[windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0]) * samples.length + sampleIndex;\n valueArrays[windowCoordinate] = value;\n } else {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x - 1 + firstCol - imageWindow[0];\n value += valueArrays[sampleIndex][windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x + firstCol - imageWindow[0];\n valueArrays[sampleIndex][windowCoordinate] = value;\n }\n }\n }\n } else {\n globalError = error;\n }\n\n // check end condition and call callbacks\n unfinishedTiles -= 1;\n checkFinished();\n }\n\n for (var yTile = minYTile; yTile <= maxYTile; ++yTile) {\n for (var xTile = minXTile; xTile <= maxXTile; ++xTile) {\n for (var sampleIndex = 0; sampleIndex < samples.length; ++sampleIndex) {\n var sample = samples[sampleIndex];\n if (this.planarConfiguration === 2) {\n bytesPerPixel = this.getSampleByteSize(sample);\n }\n var _sampleIndex = sampleIndex;\n unfinishedTiles += 1;\n this.getTileOrStrip(xTile, yTile, sample, onTileGot);\n }\n }\n }\n allStacked = true;\n checkFinished();\n },\n\n _readRaster: function _readRaster(imageWindow, samples, valueArrays, interleave, callback, callbackError) {\n try {\n var tileWidth = this.getTileWidth();\n var tileHeight = this.getTileHeight();\n\n var minXTile = Math.floor(imageWindow[0] / tileWidth);\n var maxXTile = Math.ceil(imageWindow[2] / tileWidth);\n var minYTile = Math.floor(imageWindow[1] / tileHeight);\n var maxYTile = Math.ceil(imageWindow[3] / tileHeight);\n\n var numTilesPerRow = Math.ceil(this.getWidth() / tileWidth);\n\n var windowWidth = imageWindow[2] - imageWindow[0];\n var windowHeight = imageWindow[3] - imageWindow[1];\n\n var bytesPerPixel = this.getBytesPerPixel();\n var imageWidth = this.getWidth();\n\n var predictor = this.fileDirectory.Predictor || 1;\n\n var srcSampleOffsets = [];\n var sampleReaders = [];\n for (var i = 0; i < samples.length; ++i) {\n if (this.planarConfiguration === 1) {\n srcSampleOffsets.push(sum(this.fileDirectory.BitsPerSample, 0, samples[i]) / 8);\n } else {\n srcSampleOffsets.push(0);\n }\n sampleReaders.push(this.getReaderForSample(samples[i]));\n }\n\n for (var yTile = minYTile; yTile < maxYTile; ++yTile) {\n for (var xTile = minXTile; xTile < maxXTile; ++xTile) {\n var firstLine = yTile * tileHeight;\n var firstCol = xTile * tileWidth;\n var lastLine = (yTile + 1) * tileHeight;\n var lastCol = (xTile + 1) * tileWidth;\n\n for (var sampleIndex = 0; sampleIndex < samples.length; ++sampleIndex) {\n var sample = samples[sampleIndex];\n if (this.planarConfiguration === 2) {\n bytesPerPixel = this.getSampleByteSize(sample);\n }\n var tile = new DataView(this.getTileOrStrip(xTile, yTile, sample));\n\n var reader = sampleReaders[sampleIndex];\n var ymax = Math.min(tileHeight, tileHeight - (lastLine - imageWindow[3]));\n var xmax = Math.min(tileWidth, tileWidth - (lastCol - imageWindow[2]));\n var totalbytes = (ymax * tileWidth + xmax) * bytesPerPixel;\n var tileLength = new Uint8Array(tile.buffer).length;\n if (2 * tileLength !== totalbytes && this._debugMessages) {\n console.warn('dimension mismatch', tileLength, totalbytes);\n }\n for (var y = Math.max(0, imageWindow[1] - firstLine); y < ymax; ++y) {\n for (var x = Math.max(0, imageWindow[0] - firstCol); x < xmax; ++x) {\n var pixelOffset = (y * tileWidth + x) * bytesPerPixel;\n var value = 0;\n if (pixelOffset < tileLength - 1) {\n value = reader.call(tile, pixelOffset + srcSampleOffsets[sampleIndex], this.littleEndian);\n }\n\n var windowCoordinate;\n if (interleave) {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0] - 1) * samples.length + sampleIndex;\n value += valueArrays[windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0]) * samples.length + sampleIndex;\n valueArrays[windowCoordinate] = value;\n } else {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x - 1 + firstCol - imageWindow[0];\n value += valueArrays[sampleIndex][windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x + firstCol - imageWindow[0];\n valueArrays[sampleIndex][windowCoordinate] = value;\n }\n }\n }\n }\n }\n }\n callback(valueArrays);\n return valueArrays;\n } catch (error) {\n return callbackError(error);\n }\n },\n\n /**\n * This callback is called upon successful reading of a GeoTIFF image. The\n * resulting arrays are passed as a single argument.\n * @callback GeoTIFFImage~readCallback\n * @param {(TypedArray|TypedArray[])} array the requested data as a either a\n * single typed array or a list of\n * typed arrays, depending on the\n * 'interleave' option.\n */\n\n /**\n * This callback is called upon encountering an error while reading of a\n * GeoTIFF image\n * @callback GeoTIFFImage~readErrorCallback\n * @param {Error} error the encountered error\n */\n\n /**\n * Reads raster data from the image. This function reads all selected samples\n * into separate arrays of the correct type for that sample. When provided,\n * only a subset of the raster is read for each sample.\n *\n * @param {Object} [options] optional parameters\n * @param {Array} [options.window=whole image] the subset to read data from.\n * @param {Array} [options.samples=all samples] the selection of samples to read from.\n * @param {Boolean} [options.interleave=false] whether the data shall be read\n * in one single array or separate\n * arrays.\n * @param {GeoTIFFImage~readCallback} [callback] the success callback. this\n * parameter is mandatory for\n * asynchronous decoders (some\n * compression mechanisms).\n * @param {GeoTIFFImage~readErrorCallback} [callbackError] the error callback\n * @returns {(TypedArray|TypedArray[]|null)} in synchonous cases, the decoded\n * array(s) is/are returned. In\n * asynchronous cases, nothing is\n * returned.\n */\n readRasters: function readRasters() /* arguments are read via the 'arguments' object */{\n // parse the arguments\n var options, callback, callbackError;\n switch (arguments.length) {\n case 0:\n break;\n case 1:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n } else {\n options = arguments[0];\n }\n break;\n case 2:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n callbackError = arguments[1];\n } else {\n options = arguments[0];\n callback = arguments[1];\n }\n break;\n case 3:\n options = arguments[0];\n callback = arguments[1];\n callbackError = arguments[2];\n break;\n default:\n throw new Error(\"Invalid number of arguments passed.\");\n }\n\n // set up default arguments\n options = options || {};\n callbackError = callbackError || function (error) {\n console.error(error);\n };\n\n var imageWindow = options.window || [0, 0, this.getWidth(), this.getHeight()],\n samples = options.samples,\n interleave = options.interleave;\n\n // check parameters\n if (imageWindow[0] < 0 || imageWindow[1] < 0 || imageWindow[2] > this.getWidth() || imageWindow[3] > this.getHeight()) {\n throw new Error(\"Select window is out of image bounds.\");\n } else if (imageWindow[0] > imageWindow[2] || imageWindow[1] > imageWindow[3]) {\n throw new Error(\"Invalid subsets\");\n }\n\n var imageWindowWidth = imageWindow[2] - imageWindow[0];\n var imageWindowHeight = imageWindow[3] - imageWindow[1];\n var numPixels = imageWindowWidth * imageWindowHeight;\n var i;\n\n if (!samples) {\n samples = [];\n for (i = 0; i < this.fileDirectory.SamplesPerPixel; ++i) {\n samples.push(i);\n }\n } else {\n for (i = 0; i < samples.length; ++i) {\n if (samples[i] >= this.fileDirectory.SamplesPerPixel) {\n throw new RangeError(\"Invalid sample index '\" + samples[i] + \"'.\");\n }\n }\n }\n var valueArrays;\n if (interleave) {\n var format = this.fileDirectory.SampleFormat ? Math.max.apply(null, this.fileDirectory.SampleFormat) : 1,\n bitsPerSample = Math.max.apply(null, this.fileDirectory.BitsPerSample);\n valueArrays = arrayForType(format, bitsPerSample, numPixels * samples.length);\n } else {\n valueArrays = [];\n for (i = 0; i < samples.length; ++i) {\n valueArrays.push(this.getArrayForSample(samples[i], numPixels));\n }\n }\n\n // start reading data, sync or async\n var decoder = this.getDecoder();\n if (decoder.isAsync()) {\n if (!callback) {\n throw new Error(\"No callback specified for asynchronous raster reading.\");\n }\n return this._readRasterAsync(imageWindow, samples, valueArrays, interleave, callback, callbackError);\n } else {\n callback = callback || function () {};\n return this._readRaster(imageWindow, samples, valueArrays, interleave, callback, callbackError);\n }\n },\n\n /**\n * Reads raster data from the image as RGB. The result is always an\n * interleaved typed array.\n * Colorspaces other than RGB will be transformed to RGB, color maps expanded.\n * When no other method is applicable, the first sample is used to produce a\n * greayscale image.\n * When provided, only a subset of the raster is read for each sample.\n *\n * @param {Object} [options] optional parameters\n * @param {Array} [options.window=whole image] the subset to read data from.\n * @param {GeoTIFFImage~readCallback} callback the success callback. this\n * parameter is mandatory.\n * @param {GeoTIFFImage~readErrorCallback} [callbackError] the error callback\n */\n readRGB: function readRGB() {\n // parse the arguments\n var options = null,\n callback = null,\n callbackError = null;\n switch (arguments.length) {\n case 0:\n break;\n case 1:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n } else {\n options = arguments[0];\n }\n break;\n case 2:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n callbackError = arguments[1];\n } else {\n options = arguments[0];\n callback = arguments[1];\n }\n break;\n case 3:\n options = arguments[0];\n callback = arguments[1];\n callbackError = arguments[2];\n break;\n default:\n throw new Error(\"Invalid number of arguments passed.\");\n }\n\n // set up default arguments\n options = options || {};\n callbackError = callbackError || function (error) {\n console.error(error);\n };\n\n var imageWindow = options.window || [0, 0, this.getWidth(), this.getHeight()];\n\n // check parameters\n if (imageWindow[0] < 0 || imageWindow[1] < 0 || imageWindow[2] > this.getWidth() || imageWindow[3] > this.getHeight()) {\n throw new Error(\"Select window is out of image bounds.\");\n } else if (imageWindow[0] > imageWindow[2] || imageWindow[1] > imageWindow[3]) {\n throw new Error(\"Invalid subsets\");\n }\n\n var width = imageWindow[2] - imageWindow[0];\n var height = imageWindow[3] - imageWindow[1];\n\n var pi = this.fileDirectory.PhotometricInterpretation;\n\n var bits = this.fileDirectory.BitsPerSample[0];\n var max = Math.pow(2, bits);\n\n if (pi === globals.photometricInterpretations.RGB) {\n return this.readRasters({\n window: options.window,\n interleave: true\n }, callback, callbackError);\n }\n\n var samples;\n switch (pi) {\n case globals.photometricInterpretations.WhiteIsZero:\n case globals.photometricInterpretations.BlackIsZero:\n case globals.photometricInterpretations.Palette:\n samples = [0];\n break;\n case globals.photometricInterpretations.CMYK:\n samples = [0, 1, 2, 3];\n break;\n case globals.photometricInterpretations.YCbCr:\n case globals.photometricInterpretations.CIELab:\n samples = [0, 1, 2];\n break;\n default:\n throw new Error(\"Invalid or unsupported photometric interpretation.\");\n }\n\n var subOptions = {\n window: options.window,\n interleave: true,\n samples: samples\n };\n var fileDirectory = this.fileDirectory;\n return this.readRasters(subOptions, function (raster) {\n switch (pi) {\n case globals.photometricInterpretations.WhiteIsZero:\n return callback(RGB.fromWhiteIsZero(raster, max, width, height));\n case globals.photometricInterpretations.BlackIsZero:\n return callback(RGB.fromBlackIsZero(raster, max, width, height));\n case globals.photometricInterpretations.Palette:\n return callback(RGB.fromPalette(raster, fileDirectory.ColorMap, width, height));\n case globals.photometricInterpretations.CMYK:\n return callback(RGB.fromCMYK(raster, width, height));\n case globals.photometricInterpretations.YCbCr:\n return callback(RGB.fromYCbCr(raster, width, height));\n case globals.photometricInterpretations.CIELab:\n return callback(RGB.fromCIELab(raster, width, height));\n }\n }, callbackError);\n },\n\n /**\n * Returns an array of tiepoints.\n * @returns {Object[]}\n */\n getTiePoints: function getTiePoints() {\n if (!this.fileDirectory.ModelTiepoint) {\n return [];\n }\n\n var tiePoints = [];\n for (var i = 0; i < this.fileDirectory.ModelTiepoint.length; i += 6) {\n tiePoints.push({\n i: this.fileDirectory.ModelTiepoint[i],\n j: this.fileDirectory.ModelTiepoint[i + 1],\n k: this.fileDirectory.ModelTiepoint[i + 2],\n x: this.fileDirectory.ModelTiepoint[i + 3],\n y: this.fileDirectory.ModelTiepoint[i + 4],\n z: this.fileDirectory.ModelTiepoint[i + 5]\n });\n }\n return tiePoints;\n },\n\n /**\n * Returns the parsed GDAL metadata items.\n * @returns {Object}\n */\n getGDALMetadata: function getGDALMetadata() {\n var metadata = {};\n if (!this.fileDirectory.GDAL_METADATA) {\n return null;\n }\n var string = this.fileDirectory.GDAL_METADATA;\n var xmlDom = globals.parseXml(string.substring(0, string.length - 1));\n var result = xmlDom.evaluate(\"GDALMetadata/Item\", xmlDom, null, XPathResult.UNORDERED_NODE_SNAPSHOT_TYPE, null);\n for (var i = 0; i < result.snapshotLength; ++i) {\n var node = result.snapshotItem(i);\n metadata[node.getAttribute(\"name\")] = node.textContent;\n }\n return metadata;\n },\n\n /**\n * Returns the image origin as a XYZ-vector. When the image has no affine\n * transformation, then an exception is thrown.\n * @returns {Array} The origin as a vector\n */\n getOrigin: function getOrigin() {\n var tiePoints = this.fileDirectory.ModelTiepoint;\n if (!tiePoints || tiePoints.length !== 6) {\n throw new Error(\"The image does not have an affine transformation.\");\n }\n\n return [tiePoints[3], tiePoints[4], tiePoints[5]];\n },\n\n /**\n * Returns the image resolution as a XYZ-vector. When the image has no affine\n * transformation, then an exception is thrown.\n * @returns {Array} The resolution as a vector\n */\n getResolution: function getResolution() {\n if (!this.fileDirectory.ModelPixelScale) {\n throw new Error(\"The image does not have an affine transformation.\");\n }\n\n return [this.fileDirectory.ModelPixelScale[0], this.fileDirectory.ModelPixelScale[1], this.fileDirectory.ModelPixelScale[2]];\n },\n\n /**\n * Returns whether or not the pixels of the image depict an area (or point).\n * @returns {Boolean} Whether the pixels are a point\n */\n pixelIsArea: function pixelIsArea() {\n return this.geoKeys.GTRasterTypeGeoKey === 1;\n },\n\n /**\n * Returns the image bounding box as an array of 4 values: min-x, min-y,\n * max-x and max-y. When the image has no affine transformation, then an\n * exception is thrown.\n * @returns {Array} The bounding box\n */\n getBoundingBox: function getBoundingBox() {\n var origin = this.getOrigin();\n var resolution = this.getResolution();\n\n var x1 = origin[0];\n var y1 = origin[1];\n\n var x2 = x1 + resolution[0] * this.getWidth();\n var y2 = y1 + resolution[1] * this.getHeight();\n\n return [Math.min(x1, x2), Math.min(y1, y2), Math.max(x1, x2), Math.max(y1, y2)];\n }\n};\n\nmodule.exports = GeoTIFFImage;","\"use strict\";\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nvar DataView64 = function () {\n function DataView64(arrayBuffer) {\n _classCallCheck(this, DataView64);\n\n this._dataView = new DataView(arrayBuffer);\n }\n\n _createClass(DataView64, [{\n key: \"getUint64\",\n value: function getUint64(offset, littleEndian) {\n var left = this.getUint32(offset, littleEndian);\n var right = this.getUint32(offset + 4, littleEndian);\n if (littleEndian) {\n return left << 32 | right;\n }\n return right << 32 | left;\n }\n }, {\n key: \"getInt64\",\n value: function getInt64(offset, littleEndian) {\n var left, right;\n if (littleEndian) {\n left = this.getInt32(offset, littleEndian);\n right = this.getUint32(offset + 4, littleEndian);\n\n return left << 32 | right;\n }\n left = this.getUint32(offset, littleEndian);\n right = this.getInt32(offset + 4, littleEndian);\n return right << 32 | left;\n }\n }, {\n key: \"getUint8\",\n value: function getUint8(offset, littleEndian) {\n return this._dataView.getUint8(offset, littleEndian);\n }\n }, {\n key: \"getInt8\",\n value: function getInt8(offset, littleEndian) {\n return this._dataView.getInt8(offset, littleEndian);\n }\n }, {\n key: \"getUint16\",\n value: function getUint16(offset, littleEndian) {\n return this._dataView.getUint16(offset, littleEndian);\n }\n }, {\n key: \"getInt16\",\n value: function getInt16(offset, littleEndian) {\n return this._dataView.getInt16(offset, littleEndian);\n }\n }, {\n key: \"getUint32\",\n value: function getUint32(offset, littleEndian) {\n return this._dataView.getUint32(offset, littleEndian);\n }\n }, {\n key: \"getInt32\",\n value: function getInt32(offset, littleEndian) {\n return this._dataView.getInt32(offset, littleEndian);\n }\n }, {\n key: \"getFloat32\",\n value: function getFloat32(offset, littleEndian) {\n return this._dataView.getFloat32(offset, littleEndian);\n }\n }, {\n key: \"getFloat64\",\n value: function getFloat64(offset, littleEndian) {\n return this._dataView.getFloat64(offset, littleEndian);\n }\n }, {\n key: \"buffer\",\n get: function get() {\n return this._dataView.buffer;\n }\n }]);\n\n return DataView64;\n}();\n\nmodule.exports = DataView64;","\"use strict\";\n\nvar globals = require(\"./globals.js\");\nvar GeoTIFFImage = require(\"./geotiffimage.js\");\nvar DataView64 = require(\"./dataview64.js\");\n\nvar fieldTypes = globals.fieldTypes,\n fieldTagNames = globals.fieldTagNames,\n arrayFields = globals.arrayFields,\n geoKeyNames = globals.geoKeyNames;\n\n/**\n * The abstraction for a whole GeoTIFF file.\n * @constructor\n * @param {ArrayBuffer} rawData the raw data stream of the file as an ArrayBuffer.\n * @param {Object} [options] further options.\n * @param {Boolean} [options.cache=false] whether or not decoded tiles shall be cached.\n */\nfunction GeoTIFF(rawData, options) {\n this.dataView = new DataView64(rawData);\n options = options || {};\n this.cache = options.cache || false;\n\n var BOM = this.dataView.getUint16(0, 0);\n if (BOM === 0x4949) {\n this.littleEndian = true;\n } else if (BOM === 0x4D4D) {\n this.littleEndian = false;\n } else {\n throw new TypeError(\"Invalid byte order value.\");\n }\n\n var magicNumber = this.dataView.getUint16(2, this.littleEndian);\n if (this.dataView.getUint16(2, this.littleEndian) === 42) {\n this.bigTiff = false;\n } else if (magicNumber === 43) {\n this.bigTiff = true;\n var offsetBytesize = this.dataView.getUint16(4, this.littleEndian);\n if (offsetBytesize !== 8) {\n throw new Error(\"Unsupported offset byte-size.\");\n }\n } else {\n throw new TypeError(\"Invalid magic number.\");\n }\n\n this.fileDirectories = this.parseFileDirectories(this.getOffset(this.bigTiff ? 8 : 4));\n}\n\nGeoTIFF.prototype = {\n getOffset: function getOffset(offset) {\n if (this.bigTiff) {\n return this.dataView.getUint64(offset, this.littleEndian);\n }\n return this.dataView.getUint32(offset, this.littleEndian);\n },\n\n getFieldTypeLength: function getFieldTypeLength(fieldType) {\n switch (fieldType) {\n case fieldTypes.BYTE:case fieldTypes.ASCII:case fieldTypes.SBYTE:case fieldTypes.UNDEFINED:\n return 1;\n case fieldTypes.SHORT:case fieldTypes.SSHORT:\n return 2;\n case fieldTypes.LONG:case fieldTypes.SLONG:case fieldTypes.FLOAT:\n return 4;\n case fieldTypes.RATIONAL:case fieldTypes.SRATIONAL:case fieldTypes.DOUBLE:\n case fieldTypes.LONG8:case fieldTypes.SLONG8:case fieldTypes.IFD8:\n return 8;\n default:\n throw new RangeError(\"Invalid field type: \" + fieldType);\n }\n },\n\n getValues: function getValues(fieldType, count, offset) {\n var values = null;\n var readMethod = null;\n var fieldTypeLength = this.getFieldTypeLength(fieldType);\n var i;\n\n switch (fieldType) {\n case fieldTypes.BYTE:case fieldTypes.ASCII:case fieldTypes.UNDEFINED:\n values = new Uint8Array(count);readMethod = this.dataView.getUint8;\n break;\n case fieldTypes.SBYTE:\n values = new Int8Array(count);readMethod = this.dataView.getInt8;\n break;\n case fieldTypes.SHORT:\n values = new Uint16Array(count);readMethod = this.dataView.getUint16;\n break;\n case fieldTypes.SSHORT:\n values = new Int16Array(count);readMethod = this.dataView.getInt16;\n break;\n case fieldTypes.LONG:\n values = new Uint32Array(count);readMethod = this.dataView.getUint32;\n break;\n case fieldTypes.SLONG:\n values = new Int32Array(count);readMethod = this.dataView.getInt32;\n break;\n case fieldTypes.LONG8:case fieldTypes.IFD8:\n values = new Array(count);readMethod = this.dataView.getUint64;\n break;\n case fieldTypes.SLONG8:\n values = new Array(count);readMethod = this.dataView.getInt64;\n break;\n case fieldTypes.RATIONAL:\n values = new Uint32Array(count * 2);readMethod = this.dataView.getUint32;\n break;\n case fieldTypes.SRATIONAL:\n values = new Int32Array(count * 2);readMethod = this.dataView.getInt32;\n break;\n case fieldTypes.FLOAT:\n values = new Float32Array(count);readMethod = this.dataView.getFloat32;\n break;\n case fieldTypes.DOUBLE:\n values = new Float64Array(count);readMethod = this.dataView.getFloat64;\n break;\n default:\n throw new RangeError(\"Invalid field type: \" + fieldType);\n }\n\n // normal fields\n if (!(fieldType === fieldTypes.RATIONAL || fieldType === fieldTypes.SRATIONAL)) {\n for (i = 0; i < count; ++i) {\n values[i] = readMethod.call(this.dataView, offset + i * fieldTypeLength, this.littleEndian);\n }\n }\n // RATIONAL or SRATIONAL\n else {\n for (i = 0; i < count; i += 2) {\n values[i] = readMethod.call(this.dataView, offset + i * fieldTypeLength, this.littleEndian);\n values[i + 1] = readMethod.call(this.dataView, offset + (i * fieldTypeLength + 4), this.littleEndian);\n }\n }\n\n if (fieldType === fieldTypes.ASCII) {\n return String.fromCharCode.apply(null, values);\n }\n return values;\n },\n\n getFieldValues: function getFieldValues(fieldTag, fieldType, typeCount, valueOffset) {\n var fieldValues;\n var fieldTypeLength = this.getFieldTypeLength(fieldType);\n\n if (fieldTypeLength * typeCount <= (this.bigTiff ? 8 : 4)) {\n fieldValues = this.getValues(fieldType, typeCount, valueOffset);\n } else {\n var actualOffset = this.getOffset(valueOffset);\n fieldValues = this.getValues(fieldType, typeCount, actualOffset);\n }\n\n if (typeCount === 1 && arrayFields.indexOf(fieldTag) === -1 && !(fieldType === fieldTypes.RATIONAL || fieldType === fieldTypes.SRATIONAL)) {\n return fieldValues[0];\n }\n\n return fieldValues;\n },\n\n parseGeoKeyDirectory: function parseGeoKeyDirectory(fileDirectory) {\n var rawGeoKeyDirectory = fileDirectory.GeoKeyDirectory;\n if (!rawGeoKeyDirectory) {\n return null;\n }\n\n var geoKeyDirectory = {};\n for (var i = 4; i < rawGeoKeyDirectory[3] * 4; i += 4) {\n var key = geoKeyNames[rawGeoKeyDirectory[i]],\n location = rawGeoKeyDirectory[i + 1] ? fieldTagNames[rawGeoKeyDirectory[i + 1]] : null,\n count = rawGeoKeyDirectory[i + 2],\n offset = rawGeoKeyDirectory[i + 3];\n\n var value = null;\n if (!location) {\n value = offset;\n } else {\n value = fileDirectory[location];\n if (typeof value === \"undefined\" || value === null) {\n throw new Error(\"Could not get value of geoKey '\" + key + \"'.\");\n } else if (typeof value === \"string\") {\n value = value.substring(offset, offset + count - 1);\n } else if (value.subarray) {\n value = value.subarray(offset, offset + count - 1);\n }\n }\n geoKeyDirectory[key] = value;\n }\n return geoKeyDirectory;\n },\n\n parseFileDirectories: function parseFileDirectories(byteOffset) {\n var nextIFDByteOffset = byteOffset;\n var fileDirectories = [];\n\n while (nextIFDByteOffset !== 0x00000000) {\n var numDirEntries = this.bigTiff ? this.dataView.getUint64(nextIFDByteOffset, this.littleEndian) : this.dataView.getUint16(nextIFDByteOffset, this.littleEndian);\n\n var fileDirectory = {};\n var i = nextIFDByteOffset + (this.bigTiff ? 8 : 2);\n for (var entryCount = 0; entryCount < numDirEntries; i += this.bigTiff ? 20 : 12, ++entryCount) {\n var fieldTag = this.dataView.getUint16(i, this.littleEndian);\n var fieldType = this.dataView.getUint16(i + 2, this.littleEndian);\n var typeCount = this.bigTiff ? this.dataView.getUint64(i + 4, this.littleEndian) : this.dataView.getUint32(i + 4, this.littleEndian);\n\n fileDirectory[fieldTagNames[fieldTag]] = this.getFieldValues(fieldTag, fieldType, typeCount, i + (this.bigTiff ? 12 : 8));\n }\n fileDirectories.push([fileDirectory, this.parseGeoKeyDirectory(fileDirectory)]);\n\n nextIFDByteOffset = this.getOffset(i);\n }\n return fileDirectories;\n },\n\n /**\n * Get the n-th internal subfile a an image. By default, the first is returned.\n *\n * @param {Number} [index=0] the index of the image to return.\n * @returns {GeoTIFFImage} the image at the given index\n */\n getImage: function getImage(index) {\n index = index || 0;\n var fileDirectoryAndGeoKey = this.fileDirectories[index];\n if (!fileDirectoryAndGeoKey) {\n throw new RangeError(\"Invalid image index\");\n }\n return new GeoTIFFImage(fileDirectoryAndGeoKey[0], fileDirectoryAndGeoKey[1], this.dataView, this.littleEndian, this.cache);\n },\n\n /**\n * Returns the count of the internal subfiles.\n *\n * @returns {Number} the number of internal subfile images\n */\n getImageCount: function getImageCount() {\n return this.fileDirectories.length;\n }\n};\n\nmodule.exports = GeoTIFF;","\"use strict\";\n\nvar GeoTIFF = require(\"./geotiff.js\");\n\n/**\n * Main parsing function for GeoTIFF files.\n * @param {(string|ArrayBuffer)} data Raw data to parse the GeoTIFF from.\n * @param {Object} [options] further options.\n * @param {Boolean} [options.cache=false] whether or not decoded tiles shall be cached.\n * @returns {GeoTIFF} the parsed geotiff file.\n */\nvar parse = function parse(data, options) {\n var rawData, i, strLen, view;\n if (typeof data === \"string\" || data instanceof String) {\n rawData = new ArrayBuffer(data.length * 2); // 2 bytes for each char\n view = new Uint16Array(rawData);\n for (i = 0, strLen = data.length; i < strLen; ++i) {\n view[i] = data.charCodeAt(i);\n }\n } else if (data instanceof ArrayBuffer) {\n rawData = data;\n } else {\n throw new Error(\"Invalid input data given.\");\n }\n return new GeoTIFF(rawData, options);\n};\n\nif (typeof module !== \"undefined\" && typeof module.exports !== \"undefined\") {\n module.exports.parse = parse;\n}\nif (typeof window !== \"undefined\") {\n window[\"GeoTIFF\"] = { parse: parse };\n}","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport geotiff from 'geotiff';\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\n\n\n/**\n* Read and decode Tiff format. The decoder for BigTiff is experimental.\n* Takes an ArrayBuffer of a tiff file as input and the TiffDecoder outputs an Image2D.\n* Tiff format is very broad and this decoder, thanks to the Geotiff npm package\n* is compatible with single or multiband images, with or without compression, using\n* various bith depth and types (8bits, 32bits, etc.)\n*\n* Info: Tiff 6.0 specification http://www.npes.org/pdf/TIFF-v6.pdf\n*\n* **Usage**\n* - [examples/savePixpFile.html](../examples/fileToTiff.html)\n*\n*/\nclass TiffDecoder extends Filter {\n constructor() {\n super();\n this.addInputValidator(0, ArrayBuffer);\n }\n \n _run(){\n\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"TiffDecoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n \n var success = false;\n \n var tiffData = geotiff.parse(inputBuffer);\n var tiffImage = tiffData.getImage();\n \n var data = tiffImage.readRasters( {interleave: true} );\n var width = tiffImage.getWidth();\n var height = tiffImage.getHeight();\n var ncpp = tiffImage.getSamplesPerPixel();\n \n if(ncpp == (data.length / (width*height))){\n success = true;\n }\n \n if( success ){\n var outputImg = this._addOutput( Image2D );\n outputImg.setData( data, width, height, ncpp);\n }else{\n console.warn(\"Tiff support is experimental and this file is not compatible.\");\n }\n \n }\n \n \n} /* END of class TiffDecoder */\n\nexport { TiffDecoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/es6module\n* Lab MCIN - http://mcin.ca/ - Montreal Neurological Institute\n*/\n\n\n/**\n* An instance of QeegModFileParser can be used to parse several file\n* (you don't need to create a QeegModFileParser instance per file to parse).\n* The Qeeg MOD file usually have the .MOD extension, though this parser does not\n* need the filename or the extension.\n*\n*/\nclass QeegModFileParser {\n constructor(){\n this._rawData = null;\n }\n \n /**\n * Feed the parser with raw data to be parsed\n * @param {ArrayBuffer} data - the raw data\n */\n setRawData( data ){\n this._rawData = data;\n }\n \n \n /**\n * Launch the parsing of the ArrayBuffer that was given with the method setRawData\n * @return {Object} - the MOD file data in a readable format\n */\n parse(){\n var qeegData = null;\n try{\n qeegData = this._parseNoException();\n }catch(e){\n //console.error( e );\n console.warn(\"This file is not compatible.\");\n }\n \n return qeegData;\n }\n \n \n /**\n * [PRIVATE]\n * This method parses the data without caring of potential exception to be raised,\n * it does not handle them. Thus, this method is unsafe to use as is and should\n * no be used directly.\n * @return {Object} - the MOD file data in a readable format\n */\n _parseNoException(){\n if( !this._rawData ){\n console.warn(\"The input buffer is null. Nothing to be parsed here.\");\n return null;\n }\n \n var inputBuffer = this._rawData\n \n var view = new DataView( inputBuffer );\n var littleEndian = true;\n \n // ------------- DECODING HEADER -------------------\n \n var header = {};\n \n // Protection Mask\n // Offset: 0, length: 2\n header.protectionMask = view.getUint16(0, littleEndian);\n \n // Comment (first byte is the real length)\n // Offset: 2, length: 81\n var commentRealLength = view.getUint8(2);\n var commentBytes = new Uint8Array(inputBuffer, 3, commentRealLength);\n header.comment = String.fromCharCode.apply(String, commentBytes);\n \n // Measure (M) Size\n // Offset: 83, length: 2\n header.measureSize = view.getUint16(83, littleEndian);\n \n // Duration (D) Size\n // Offset: 85, length: 2\n header.durationSize = view.getUint16(85, littleEndian);\n \n // First space (F) Size\n // Offset: 87, length: 2\n header.firstSpaceSize = view.getUint16(87, littleEndian);\n \n // Second space (S) Size\n // Offset: 89, length: 2\n header.secondSpaceSize = view.getUint16(89, littleEndian);\n \n // Reserved bytes\n // Offset: 91, length: 2\n header.reservedBytes = view.getUint16(91, littleEndian);\n \n // Data size\n // Offset: 93, length: 2\n header.dataSize = view.getUint16(93, littleEndian);\n \n // ------------- DECODING MATRIX -------------------\n var matrixOffset = 95;\n \n var matrixSizeElements = header.measureSize * \n header.durationSize * \n header.firstSpaceSize * \n header.secondSpaceSize;\n \n var matrixSizeBytes = matrixSizeElements * header.dataSize;\n \n var matrixData = new Float32Array(matrixSizeElements);\n \n for(var i=0; i 1) {\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = binaryOps[item.value];\n item = new Instruction(INUMBER, f(n1.value, n2.value));\n nstack.push(item);\n } else if (type === IOP3 && nstack.length > 2) {\n n3 = nstack.pop();\n n2 = nstack.pop();\n n1 = nstack.pop();\n if (item.value === '?') {\n nstack.push(n1.value ? n2.value : n3.value);\n } else {\n f = ternaryOps[item.value];\n item = new Instruction(INUMBER, f(n1.value, n2.value, n3.value));\n nstack.push(item);\n }\n } else if (type === IOP1 && nstack.length > 0) {\n n1 = nstack.pop();\n f = unaryOps[item.value];\n item = new Instruction(INUMBER, f(n1.value));\n nstack.push(item);\n } else if (type === IEXPR) {\n while (nstack.length > 0) {\n newexpression.push(nstack.shift());\n }\n newexpression.push(new Instruction(IEXPR, simplify(item.value, unaryOps, binaryOps, ternaryOps, values)));\n } else if (type === IMEMBER && nstack.length > 0) {\n n1 = nstack.pop();\n nstack.push(new Instruction(INUMBER, n1.value[item.value]));\n } else {\n while (nstack.length > 0) {\n newexpression.push(nstack.shift());\n }\n newexpression.push(item);\n }\n }\n while (nstack.length > 0) {\n newexpression.push(nstack.shift());\n }\n return newexpression;\n}\n\nExpression.prototype.simplify = function (values) {\n values = values || {};\n return new Expression(simplify(this.tokens, this.unaryOps, this.binaryOps, this.ternaryOps, values), this.parser);\n};\n\nfunction substitute(tokens, variable, expr) {\n var newexpression = [];\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n var type = item.type;\n if (type === IVAR && item.value === variable) {\n for (var j = 0; j < expr.tokens.length; j++) {\n var expritem = expr.tokens[j];\n var replitem;\n if (expritem.type === IOP1) {\n replitem = unaryInstruction(expritem.value);\n } else if (expritem.type === IOP2) {\n replitem = binaryInstruction(expritem.value);\n } else if (expritem.type === IOP3) {\n replitem = ternaryInstruction(expritem.value);\n } else {\n replitem = new Instruction(expritem.type, expritem.value);\n }\n newexpression.push(replitem);\n }\n } else if (type === IEXPR) {\n newexpression.push(new Instruction(IEXPR, substitute(item.value, variable, expr)));\n } else {\n newexpression.push(item);\n }\n }\n return newexpression;\n}\n\nExpression.prototype.substitute = function (variable, expr) {\n if (!(expr instanceof Expression)) {\n expr = this.parser.parse(String(expr));\n }\n\n return new Expression(substitute(this.tokens, variable, expr), this.parser);\n};\n\nfunction evaluate(tokens, expr, values) {\n var nstack = [];\n var n1, n2, n3;\n var f;\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n var type = item.type;\n if (type === INUMBER) {\n nstack.push(item.value);\n } else if (type === IOP2) {\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = expr.binaryOps[item.value];\n nstack.push(f(n1, n2));\n } else if (type === IOP3) {\n n3 = nstack.pop();\n n2 = nstack.pop();\n n1 = nstack.pop();\n if (item.value === '?') {\n nstack.push(evaluate(n1 ? n2 : n3, expr, values));\n } else {\n f = expr.ternaryOps[item.value];\n nstack.push(f(n1, n2, n3));\n }\n } else if (type === IVAR) {\n if (item.value in expr.functions) {\n nstack.push(expr.functions[item.value]);\n } else {\n var v = values[item.value];\n if (v !== undefined) {\n nstack.push(v);\n } else {\n throw new Error('undefined variable: ' + item.value);\n }\n }\n } else if (type === IOP1) {\n n1 = nstack.pop();\n f = expr.unaryOps[item.value];\n nstack.push(f(n1));\n } else if (type === IFUNCALL) {\n var argCount = item.value;\n var args = [];\n while (argCount-- > 0) {\n args.unshift(nstack.pop());\n }\n f = nstack.pop();\n if (f.apply && f.call) {\n nstack.push(f.apply(undefined, args));\n } else {\n throw new Error(f + ' is not a function');\n }\n } else if (type === IEXPR) {\n nstack.push(item.value);\n } else if (type === IMEMBER) {\n n1 = nstack.pop();\n nstack.push(n1[item.value]);\n } else {\n throw new Error('invalid Expression');\n }\n }\n if (nstack.length > 1) {\n throw new Error('invalid Expression (parity)');\n }\n return nstack[0];\n}\n\nExpression.prototype.evaluate = function (values) {\n values = values || {};\n return evaluate(this.tokens, this, values);\n};\n\nfunction expressionToString(tokens, toJS) {\n var nstack = [];\n var n1, n2, n3;\n var f;\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n var type = item.type;\n if (type === INUMBER) {\n if (typeof item.value === 'number' && item.value < 0) {\n nstack.push('(' + item.value + ')');\n } else {\n nstack.push(escapeValue(item.value));\n }\n } else if (type === IOP2) {\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = item.value;\n if (toJS) {\n if (f === '^') {\n nstack.push('Math.pow(' + n1 + ', ' + n2 + ')');\n } else if (f === 'and') {\n nstack.push('(!!' + n1 + ' && !!' + n2 + ')');\n } else if (f === 'or') {\n nstack.push('(!!' + n1 + ' || !!' + n2 + ')');\n } else if (f === '||') {\n nstack.push('(String(' + n1 + ') + String(' + n2 + '))');\n } else if (f === '==') {\n nstack.push('(' + n1 + ' === ' + n2 + ')');\n } else if (f === '!=') {\n nstack.push('(' + n1 + ' !== ' + n2 + ')');\n } else {\n nstack.push('(' + n1 + ' ' + f + ' ' + n2 + ')');\n }\n } else {\n nstack.push('(' + n1 + ' ' + f + ' ' + n2 + ')');\n }\n } else if (type === IOP3) {\n n3 = nstack.pop();\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = item.value;\n if (f === '?') {\n nstack.push('(' + n1 + ' ? ' + n2 + ' : ' + n3 + ')');\n } else {\n throw new Error('invalid Expression');\n }\n } else if (type === IVAR) {\n nstack.push(item.value);\n } else if (type === IOP1) {\n n1 = nstack.pop();\n f = item.value;\n if (f === '-' || f === '+') {\n nstack.push('(' + f + n1 + ')');\n } else if (toJS) {\n if (f === 'not') {\n nstack.push('(' + '!' + n1 + ')');\n } else if (f === '!') {\n nstack.push('fac(' + n1 + ')');\n } else {\n nstack.push(f + '(' + n1 + ')');\n }\n } else if (f === '!') {\n nstack.push('(' + n1 + '!)');\n } else {\n nstack.push('(' + f + ' ' + n1 + ')');\n }\n } else if (type === IFUNCALL) {\n var argCount = item.value;\n var args = [];\n while (argCount-- > 0) {\n args.unshift(nstack.pop());\n }\n f = nstack.pop();\n nstack.push(f + '(' + args.join(', ') + ')');\n } else if (type === IMEMBER) {\n n1 = nstack.pop();\n nstack.push(n1 + '.' + item.value);\n } else if (type === IEXPR) {\n nstack.push('(' + expressionToString(item.value, toJS) + ')');\n } else {\n throw new Error('invalid Expression');\n }\n }\n if (nstack.length > 1) {\n throw new Error('invalid Expression (parity)');\n }\n return nstack[0];\n}\n\nExpression.prototype.toString = function () {\n return expressionToString(this.tokens, false);\n};\n\nfunction getSymbols(tokens, symbols) {\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n if (item.type === IVAR && (indexOf(symbols, item.value) === -1)) {\n symbols.push(item.value);\n } else if (item.type === IEXPR) {\n getSymbols(item.value, symbols);\n }\n }\n}\n\nExpression.prototype.symbols = function () {\n var vars = [];\n getSymbols(this.tokens, vars);\n return vars;\n};\n\nExpression.prototype.variables = function () {\n var vars = [];\n getSymbols(this.tokens, vars);\n var functions = this.functions;\n return vars.filter(function (name) {\n return !(name in functions);\n });\n};\n\nExpression.prototype.toJSFunction = function (param, variables) {\n var expr = this;\n var f = new Function(param, 'with(this.functions) with (this.ternaryOps) with (this.binaryOps) with (this.unaryOps) { return ' + expressionToString(this.simplify(variables).tokens, true) + '; }'); // eslint-disable-line no-new-func\n return function () {\n return f.apply(expr, arguments);\n };\n};\n\nfunction add(a, b) {\n return Number(a) + Number(b);\n}\nfunction sub(a, b) {\n return a - b;\n}\nfunction mul(a, b) {\n return a * b;\n}\nfunction div(a, b) {\n return a / b;\n}\nfunction mod(a, b) {\n return a % b;\n}\nfunction concat(a, b) {\n return '' + a + b;\n}\nfunction equal(a, b) {\n return a === b;\n}\nfunction notEqual(a, b) {\n return a !== b;\n}\nfunction greaterThan(a, b) {\n return a > b;\n}\nfunction lessThan(a, b) {\n return a < b;\n}\nfunction greaterThanEqual(a, b) {\n return a >= b;\n}\nfunction lessThanEqual(a, b) {\n return a <= b;\n}\nfunction andOperator(a, b) {\n return Boolean(a && b);\n}\nfunction orOperator(a, b) {\n return Boolean(a || b);\n}\nfunction sinh(a) {\n return ((Math.exp(a) - Math.exp(-a)) / 2);\n}\nfunction cosh(a) {\n return ((Math.exp(a) + Math.exp(-a)) / 2);\n}\nfunction tanh(a) {\n if (a === Infinity) return 1;\n if (a === -Infinity) return -1;\n return (Math.exp(a) - Math.exp(-a)) / (Math.exp(a) + Math.exp(-a));\n}\nfunction asinh(a) {\n if (a === -Infinity) return a;\n return Math.log(a + Math.sqrt(a * a + 1));\n}\nfunction acosh(a) {\n return Math.log(a + Math.sqrt(a * a - 1));\n}\nfunction atanh(a) {\n return (Math.log((1 + a) / (1 - a)) / 2);\n}\nfunction log10(a) {\n return Math.log(a) * Math.LOG10E;\n}\nfunction neg(a) {\n return -a;\n}\nfunction not(a) {\n return !a;\n}\nfunction trunc(a) {\n return a < 0 ? Math.ceil(a) : Math.floor(a);\n}\nfunction random(a) {\n return Math.random() * (a || 1);\n}\nfunction factorial(a) { // a!\n return gamma(a + 1);\n}\nfunction stringLength(s) {\n return String(s).length;\n}\n\nfunction hypot() {\n var sum = 0;\n var larg = 0;\n for (var i = 0, L = arguments.length; i < L; i++) {\n var arg = Math.abs(arguments[i]);\n var div;\n if (larg < arg) {\n div = larg / arg;\n sum = sum * div * div + 1;\n larg = arg;\n } else if (arg > 0) {\n div = arg / larg;\n sum += div * div;\n } else {\n sum += arg;\n }\n }\n return larg === Infinity ? Infinity : larg * Math.sqrt(sum);\n}\n\nfunction condition(cond, yep, nope) {\n return cond ? yep : nope;\n}\n\nfunction isInteger(value) {\n return isFinite(value) && (value === Math.round(value));\n}\n\nvar GAMMA_G = 4.7421875;\nvar GAMMA_P = [\n 0.99999999999999709182,\n 57.156235665862923517, -59.597960355475491248,\n 14.136097974741747174, -0.49191381609762019978,\n 0.33994649984811888699e-4,\n 0.46523628927048575665e-4, -0.98374475304879564677e-4,\n 0.15808870322491248884e-3, -0.21026444172410488319e-3,\n 0.21743961811521264320e-3, -0.16431810653676389022e-3,\n 0.84418223983852743293e-4, -0.26190838401581408670e-4,\n 0.36899182659531622704e-5\n];\n\n// Gamma function from math.js\nfunction gamma(n) {\n var t, x;\n\n if (isInteger(n)) {\n if (n <= 0) {\n return isFinite(n) ? Infinity : NaN;\n }\n\n if (n > 171) {\n return Infinity; // Will overflow\n }\n\n var value = n - 2;\n var res = n - 1;\n while (value > 1) {\n res *= value;\n value--;\n }\n\n if (res === 0) {\n res = 1; // 0! is per definition 1\n }\n\n return res;\n }\n\n if (n < 0.5) {\n return Math.PI / (Math.sin(Math.PI * n) * gamma(1 - n));\n }\n\n if (n >= 171.35) {\n return Infinity; // will overflow\n }\n\n if (n > 85.0) { // Extended Stirling Approx\n var twoN = n * n;\n var threeN = twoN * n;\n var fourN = threeN * n;\n var fiveN = fourN * n;\n return Math.sqrt(2 * Math.PI / n) * Math.pow((n / Math.E), n) *\n (1 + 1 / (12 * n) + 1 / (288 * twoN) - 139 / (51840 * threeN) -\n 571 / (2488320 * fourN) + 163879 / (209018880 * fiveN) +\n 5246819 / (75246796800 * fiveN * n));\n }\n\n --n;\n x = GAMMA_P[0];\n for (var i = 1; i < GAMMA_P.length; ++i) {\n x += GAMMA_P[i] / (n + i);\n }\n\n t = n + GAMMA_G + 0.5;\n return Math.sqrt(2 * Math.PI) * Math.pow(t, n + 0.5) * Math.exp(-t) * x;\n}\n\nvar TEOF = 'TEOF';\nvar TOP = 'TOP';\nvar TNUMBER = 'TNUMBER';\nvar TSTRING = 'TSTRING';\nvar TPAREN = 'TPAREN';\nvar TCOMMA = 'TCOMMA';\nvar TNAME = 'TNAME';\n\nfunction Token(type, value, line, column) {\n this.type = type;\n this.value = value;\n this.line = line;\n this.column = column;\n}\n\nToken.prototype.toString = function () {\n return this.type + ': ' + this.value;\n};\n\nfunction TokenStream(expression, unaryOps, binaryOps, ternaryOps, consts) {\n this.pos = 0;\n this.line = 0;\n this.column = 0;\n this.current = null;\n this.unaryOps = unaryOps;\n this.binaryOps = binaryOps;\n this.ternaryOps = ternaryOps;\n this.consts = consts;\n this.expression = expression;\n this.savedPosition = 0;\n this.savedCurrent = null;\n this.savedLine = 0;\n this.savedColumn = 0;\n}\n\nTokenStream.prototype.newToken = function (type, value, line, column) {\n return new Token(type, value, line != null ? line : this.line, column != null ? column : this.column);\n};\n\nTokenStream.prototype.save = function () {\n this.savedPosition = this.pos;\n this.savedCurrent = this.current;\n this.savedLine = this.line;\n this.savedColumn = this.column;\n};\n\nTokenStream.prototype.restore = function () {\n this.pos = this.savedPosition;\n this.current = this.savedCurrent;\n this.line = this.savedLine;\n this.column = this.savedColumn;\n};\n\nTokenStream.prototype.next = function () {\n if (this.pos >= this.expression.length) {\n return this.newToken(TEOF, 'EOF');\n }\n\n if (this.isWhitespace() || this.isComment()) {\n return this.next();\n } else if (this.isNumber() ||\n this.isOperator() ||\n this.isString() ||\n this.isParen() ||\n this.isComma() ||\n this.isNamedOp() ||\n this.isConst() ||\n this.isName()) {\n return this.current;\n } else {\n this.parseError('Unknown character \"' + this.expression.charAt(this.pos) + '\"');\n }\n};\n\nTokenStream.prototype.isString = function () {\n var r = false;\n var startLine = this.line;\n var startColumn = this.column;\n var startPos = this.pos;\n var quote = this.expression.charAt(startPos);\n\n if (quote === '\\'' || quote === '\"') {\n this.pos++;\n this.column++;\n var index = this.expression.indexOf(quote, startPos + 1);\n while (index >= 0 && this.pos < this.expression.length) {\n this.pos = index + 1;\n if (this.expression.charAt(index - 1) !== '\\\\') {\n var rawString = this.expression.substring(startPos + 1, index);\n this.current = this.newToken(TSTRING, this.unescape(rawString), startLine, startColumn);\n var newLine = rawString.indexOf('\\n');\n var lastNewline = -1;\n while (newLine >= 0) {\n this.line++;\n this.column = 0;\n lastNewline = newLine;\n newLine = rawString.indexOf('\\n', newLine + 1);\n }\n this.column += rawString.length - lastNewline;\n r = true;\n break;\n }\n index = this.expression.indexOf(quote, index + 1);\n }\n }\n return r;\n};\n\nTokenStream.prototype.isParen = function () {\n var char = this.expression.charAt(this.pos);\n if (char === '(' || char === ')') {\n this.current = this.newToken(TPAREN, char);\n this.pos++;\n this.column++;\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isComma = function () {\n var char = this.expression.charAt(this.pos);\n if (char === ',') {\n this.current = this.newToken(TCOMMA, ',');\n this.pos++;\n this.column++;\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isConst = function () {\n var startPos = this.pos;\n var i = startPos;\n for (; i < this.expression.length; i++) {\n var c = this.expression.charAt(i);\n if (c.toUpperCase() === c.toLowerCase()) {\n if (i === this.pos || (c !== '_' && c !== '.' && (c < '0' || c > '9'))) {\n break;\n }\n }\n }\n if (i > startPos) {\n var str = this.expression.substring(startPos, i);\n if (str in this.consts) {\n this.current = this.newToken(TNUMBER, this.consts[str]);\n this.pos += str.length;\n this.column += str.length;\n return true;\n }\n }\n return false;\n};\n\nTokenStream.prototype.isNamedOp = function () {\n var startPos = this.pos;\n var i = startPos;\n for (; i < this.expression.length; i++) {\n var c = this.expression.charAt(i);\n if (c.toUpperCase() === c.toLowerCase()) {\n if (i === this.pos || (c !== '_' && (c < '0' || c > '9'))) {\n break;\n }\n }\n }\n if (i > startPos) {\n var str = this.expression.substring(startPos, i);\n if (str in this.binaryOps || str in this.unaryOps || str in this.ternaryOps) {\n this.current = this.newToken(TOP, str);\n this.pos += str.length;\n this.column += str.length;\n return true;\n }\n }\n return false;\n};\n\nTokenStream.prototype.isName = function () {\n var startPos = this.pos;\n var i = startPos;\n for (; i < this.expression.length; i++) {\n var c = this.expression.charAt(i);\n if (c.toUpperCase() === c.toLowerCase()) {\n if (i === this.pos || (c !== '_' && (c < '0' || c > '9'))) {\n break;\n }\n }\n }\n if (i > startPos) {\n var str = this.expression.substring(startPos, i);\n this.current = this.newToken(TNAME, str);\n this.pos += str.length;\n this.column += str.length;\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isWhitespace = function () {\n var r = false;\n var char = this.expression.charAt(this.pos);\n while (char === ' ' || char === '\\t' || char === '\\n' || char === '\\r') {\n r = true;\n this.pos++;\n this.column++;\n if (char === '\\n') {\n this.line++;\n this.column = 0;\n }\n if (this.pos >= this.expression.length) {\n break;\n }\n char = this.expression.charAt(this.pos);\n }\n return r;\n};\n\nvar codePointPattern = /^[0-9a-f]{4}$/i;\n\nTokenStream.prototype.unescape = function (v) {\n var index = v.indexOf('\\\\');\n if (index < 0) {\n return v;\n }\n\n var buffer = v.substring(0, index);\n while (index >= 0) {\n var c = v.charAt(++index);\n switch (c) {\n case '\\'':\n buffer += '\\'';\n break;\n case '\"':\n buffer += '\"';\n break;\n case '\\\\':\n buffer += '\\\\';\n break;\n case '/':\n buffer += '/';\n break;\n case 'b':\n buffer += '\\b';\n break;\n case 'f':\n buffer += '\\f';\n break;\n case 'n':\n buffer += '\\n';\n break;\n case 'r':\n buffer += '\\r';\n break;\n case 't':\n buffer += '\\t';\n break;\n case 'u':\n // interpret the following 4 characters as the hex of the unicode code point\n var codePoint = v.substring(index + 1, index + 5);\n if (!codePointPattern.test(codePoint)) {\n this.parseError('Illegal escape sequence: \\\\u' + codePoint);\n }\n buffer += String.fromCharCode(parseInt(codePoint, 16));\n index += 4;\n break;\n default:\n throw this.parseError('Illegal escape sequence: \"\\\\' + c + '\"');\n }\n ++index;\n var backslash = v.indexOf('\\\\', index);\n buffer += v.substring(index, backslash < 0 ? v.length : backslash);\n index = backslash;\n }\n\n return buffer;\n};\n\nTokenStream.prototype.isComment = function () {\n var char = this.expression.charAt(this.pos);\n if (char === '/' && this.expression.charAt(this.pos + 1) === '*') {\n var startPos = this.pos;\n this.pos = this.expression.indexOf('*/', this.pos) + 2;\n if (this.pos === 1) {\n this.pos = this.expression.length;\n }\n var comment = this.expression.substring(startPos, this.pos);\n var newline = comment.indexOf('\\n');\n while (newline >= 0) {\n this.line++;\n this.column = comment.length - newline;\n newline = comment.indexOf('\\n', newline + 1);\n }\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isNumber = function () {\n var valid = false;\n var pos = this.pos;\n var startPos = pos;\n var resetPos = pos;\n var column = this.column;\n var resetColumn = column;\n var foundDot = false;\n var foundDigits = false;\n var char;\n\n while (pos < this.expression.length) {\n char = this.expression.charAt(pos);\n if ((char >= '0' && char <= '9') || (!foundDot && char === '.')) {\n if (char === '.') {\n foundDot = true;\n } else {\n foundDigits = true;\n }\n pos++;\n column++;\n valid = foundDigits;\n } else {\n break;\n }\n }\n\n if (valid) {\n resetPos = pos;\n resetColumn = column;\n }\n\n if (char === 'e' || char === 'E') {\n pos++;\n column++;\n var acceptSign = true;\n var validExponent = false;\n while (pos < this.expression.length) {\n char = this.expression.charAt(pos);\n if (acceptSign && (char === '+' || char === '-')) {\n acceptSign = false;\n } else if (char >= '0' && char <= '9') {\n validExponent = true;\n acceptSign = false;\n } else {\n break;\n }\n pos++;\n column++;\n }\n\n if (!validExponent) {\n pos = resetPos;\n column = resetColumn;\n }\n }\n\n if (valid) {\n this.current = this.newToken(TNUMBER, parseFloat(this.expression.substring(startPos, pos)));\n this.pos = pos;\n this.column = column;\n } else {\n this.pos = resetPos;\n this.column = resetColumn;\n }\n return valid;\n};\n\nTokenStream.prototype.isOperator = function () {\n var char = this.expression.charAt(this.pos);\n\n if (char === '+' || char === '-' || char === '*' || char === '/' || char === '%' || char === '^' || char === '?' || char === ':' || char === '.') {\n this.current = this.newToken(TOP, char);\n } else if (char === '∙' || char === '•') {\n this.current = this.newToken(TOP, '*');\n } else if (char === '>') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '>=');\n this.pos++;\n this.column++;\n } else {\n this.current = this.newToken(TOP, '>');\n }\n } else if (char === '<') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '<=');\n this.pos++;\n this.column++;\n } else {\n this.current = this.newToken(TOP, '<');\n }\n } else if (char === '|') {\n if (this.expression.charAt(this.pos + 1) === '|') {\n this.current = this.newToken(TOP, '||');\n this.pos++;\n this.column++;\n } else {\n return false;\n }\n } else if (char === '=') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '==');\n this.pos++;\n this.column++;\n } else {\n return false;\n }\n } else if (char === '!') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '!=');\n this.pos++;\n this.column++;\n } else {\n this.current = this.newToken(TOP, char);\n }\n } else {\n return false;\n }\n this.pos++;\n this.column++;\n return true;\n};\n\nTokenStream.prototype.parseError = function (msg) {\n throw new Error('parse error [' + (this.line + 1) + ':' + (this.column + 1) + ']: ' + msg);\n};\n\nvar unaryInstructionCache = {};\nfunction unaryInstruction(value) {\n var inst = unaryInstructionCache[value];\n if (!inst) {\n inst = unaryInstructionCache[value] = new Instruction(IOP1, value);\n }\n return inst;\n}\n\nvar binaryInstructionCache = {};\nfunction binaryInstruction(value) {\n var inst = binaryInstructionCache[value];\n if (!inst) {\n inst = binaryInstructionCache[value] = new Instruction(IOP2, value);\n }\n return inst;\n}\n\nvar ternaryInstructionCache = {};\nfunction ternaryInstruction(value) {\n var inst = ternaryInstructionCache[value];\n if (!inst) {\n inst = ternaryInstructionCache[value] = new Instruction(IOP3, value);\n }\n return inst;\n}\n\nfunction ParserState(parser, tokenStream) {\n this.parser = parser;\n this.tokens = tokenStream;\n this.current = null;\n this.nextToken = null;\n this.next();\n this.savedCurrent = null;\n this.savedNextToken = null;\n}\n\nParserState.prototype.next = function () {\n this.current = this.nextToken;\n return (this.nextToken = this.tokens.next());\n};\n\nParserState.prototype.tokenMatches = function (token, value) {\n if (typeof value === 'undefined') {\n return true;\n } else if (Array.isArray(value)) {\n return indexOf(value, token.value) >= 0;\n } else if (typeof value === 'function') {\n return value(token);\n } else {\n return token.value === value;\n }\n};\n\nParserState.prototype.save = function () {\n this.savedCurrent = this.current;\n this.savedNextToken = this.nextToken;\n this.tokens.save();\n};\n\nParserState.prototype.restore = function () {\n this.tokens.restore();\n this.current = this.savedCurrent;\n this.nextToken = this.savedNextToken;\n};\n\nParserState.prototype.accept = function (type, value) {\n if (this.nextToken.type === type && this.tokenMatches(this.nextToken, value)) {\n this.next();\n return true;\n }\n return false;\n};\n\nParserState.prototype.expect = function (type, value) {\n if (!this.accept(type, value)) {\n throw new Error('parse error [' + this.tokens.line + ':' + this.tokens.column + ']: Expected ' + (value || type));\n }\n};\n\nParserState.prototype.parseAtom = function (instr) {\n if (this.accept(TNAME)) {\n instr.push(new Instruction(IVAR, this.current.value));\n } else if (this.accept(TNUMBER)) {\n instr.push(new Instruction(INUMBER, this.current.value));\n } else if (this.accept(TSTRING)) {\n instr.push(new Instruction(INUMBER, this.current.value));\n } else if (this.accept(TPAREN, '(')) {\n this.parseExpression(instr);\n this.expect(TPAREN, ')');\n } else {\n throw new Error('unexpected ' + this.nextToken);\n }\n};\n\nParserState.prototype.parseExpression = function (instr) {\n this.parseConditionalExpression(instr);\n};\n\nParserState.prototype.parseConditionalExpression = function (instr) {\n this.parseOrExpression(instr);\n while (this.accept(TOP, '?')) {\n var trueBranch = [];\n var falseBranch = [];\n this.parseConditionalExpression(trueBranch);\n this.expect(TOP, ':');\n this.parseConditionalExpression(falseBranch);\n instr.push(new Instruction(IEXPR, trueBranch));\n instr.push(new Instruction(IEXPR, falseBranch));\n instr.push(ternaryInstruction('?'));\n }\n};\n\nParserState.prototype.parseOrExpression = function (instr) {\n this.parseAndExpression(instr);\n while (this.accept(TOP, 'or')) {\n this.parseAndExpression(instr);\n instr.push(binaryInstruction('or'));\n }\n};\n\nParserState.prototype.parseAndExpression = function (instr) {\n this.parseComparison(instr);\n while (this.accept(TOP, 'and')) {\n this.parseComparison(instr);\n instr.push(binaryInstruction('and'));\n }\n};\n\nParserState.prototype.parseComparison = function (instr) {\n this.parseAddSub(instr);\n while (this.accept(TOP, ['==', '!=', '<', '<=', '>=', '>'])) {\n var op = this.current;\n this.parseAddSub(instr);\n instr.push(binaryInstruction(op.value));\n }\n};\n\nParserState.prototype.parseAddSub = function (instr) {\n this.parseTerm(instr);\n while (this.accept(TOP, ['+', '-', '||'])) {\n var op = this.current;\n this.parseTerm(instr);\n instr.push(binaryInstruction(op.value));\n }\n};\n\nParserState.prototype.parseTerm = function (instr) {\n this.parseFactor(instr);\n while (this.accept(TOP, ['*', '/', '%'])) {\n var op = this.current;\n this.parseFactor(instr);\n instr.push(binaryInstruction(op.value));\n }\n};\n\nParserState.prototype.parseFactor = function (instr) {\n var unaryOps = this.tokens.unaryOps;\n function isPrefixOperator(token) {\n return token.value in unaryOps;\n }\n\n this.save();\n if (this.accept(TOP, isPrefixOperator)) {\n if ((this.current.value !== '-' && this.current.value !== '+' && this.nextToken.type === TPAREN && this.nextToken.value === '(')) {\n this.restore();\n this.parseExponential(instr);\n } else {\n var op = this.current;\n this.parseFactor(instr);\n instr.push(unaryInstruction(op.value));\n }\n } else {\n this.parseExponential(instr);\n }\n};\n\nParserState.prototype.parseExponential = function (instr) {\n this.parsePostfixExpression(instr);\n while (this.accept(TOP, '^')) {\n this.parseFactor(instr);\n instr.push(binaryInstruction('^'));\n }\n};\n\nParserState.prototype.parsePostfixExpression = function (instr) {\n this.parseFunctionCall(instr);\n while (this.accept(TOP, '!')) {\n instr.push(unaryInstruction('!'));\n }\n};\n\nParserState.prototype.parseFunctionCall = function (instr) {\n var unaryOps = this.tokens.unaryOps;\n function isPrefixOperator(token) {\n return token.value in unaryOps;\n }\n\n if (this.accept(TOP, isPrefixOperator)) {\n var op = this.current;\n this.parseAtom(instr);\n instr.push(unaryInstruction(op.value));\n } else {\n this.parseMemberExpression(instr);\n while (this.accept(TPAREN, '(')) {\n if (this.accept(TPAREN, ')')) {\n instr.push(new Instruction(IFUNCALL, 0));\n } else {\n var argCount = this.parseArgumentList(instr);\n instr.push(new Instruction(IFUNCALL, argCount));\n }\n }\n }\n};\n\nParserState.prototype.parseArgumentList = function (instr) {\n var argCount = 0;\n\n while (!this.accept(TPAREN, ')')) {\n this.parseExpression(instr);\n ++argCount;\n while (this.accept(TCOMMA)) {\n this.parseExpression(instr);\n ++argCount;\n }\n }\n\n return argCount;\n};\n\nParserState.prototype.parseMemberExpression = function (instr) {\n this.parseAtom(instr);\n while (this.accept(TOP, '.')) {\n this.expect(TNAME);\n instr.push(new Instruction(IMEMBER, this.current.value));\n }\n};\n\nfunction Parser() {\n this.unaryOps = {\n sin: Math.sin,\n cos: Math.cos,\n tan: Math.tan,\n asin: Math.asin,\n acos: Math.acos,\n atan: Math.atan,\n sinh: Math.sinh || sinh,\n cosh: Math.cosh || cosh,\n tanh: Math.tanh || tanh,\n asinh: Math.asinh || asinh,\n acosh: Math.acosh || acosh,\n atanh: Math.atanh || atanh,\n sqrt: Math.sqrt,\n log: Math.log,\n ln: Math.log,\n lg: Math.log10 || log10,\n log10: Math.log10 || log10,\n abs: Math.abs,\n ceil: Math.ceil,\n floor: Math.floor,\n round: Math.round,\n trunc: Math.trunc || trunc,\n '-': neg,\n '+': Number,\n exp: Math.exp,\n not: not,\n length: stringLength,\n '!': factorial\n };\n\n this.binaryOps = {\n '+': add,\n '-': sub,\n '*': mul,\n '/': div,\n '%': mod,\n '^': Math.pow,\n '||': concat,\n '==': equal,\n '!=': notEqual,\n '>': greaterThan,\n '<': lessThan,\n '>=': greaterThanEqual,\n '<=': lessThanEqual,\n and: andOperator,\n or: orOperator\n };\n\n this.ternaryOps = {\n '?': condition\n };\n\n this.functions = {\n random: random,\n fac: factorial,\n min: Math.min,\n max: Math.max,\n hypot: Math.hypot || hypot,\n pyt: Math.hypot || hypot, // backward compat\n pow: Math.pow,\n atan2: Math.atan2,\n 'if': condition,\n gamma: gamma\n };\n\n this.consts = {\n E: Math.E,\n PI: Math.PI,\n 'true': true,\n 'false': false\n };\n}\n\nParser.parse = function (expr) {\n return new Parser().parse(expr);\n};\n\nParser.evaluate = function (expr, variables) {\n return Parser.parse(expr).evaluate(variables);\n};\n\nParser.prototype = {\n parse: function (expr) {\n var instr = [];\n var parserState = new ParserState(this, new TokenStream(expr, this.unaryOps, this.binaryOps, this.ternaryOps, this.consts));\n parserState.parseExpression(instr);\n parserState.expect(TEOF, 'EOF');\n\n return new Expression(instr, this);\n },\n\n evaluate: function (expr, variables) {\n return this.parse(expr).evaluate(variables);\n }\n};\n\nvar parser = {\n Parser: Parser,\n Expression: Expression\n};\n\nreturn parser;\n\n})));\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport Parser from 'expr-eval'\nimport { Image2D } from '../core/Image2D.js';\nimport { ImageToImageFilter } from '../core/ImageToImageFilter.js';\n\n\n/**\n* An instance of ImageBlendExpressionFilter takes Image2D inputs, as many as\n* we need as long as they have the same size and the same number of components\n* per pixel.\n* This filter blends images pixel values using a literal expression. This expression\n* should be set using `setMetadata( \"expresssion\", \"A * B\" )` , where `A` and `B`\n* are the categories set in input.\n*\n* Using a blending expression is the aesiest way to test a blending but it is a\n* pretty slow process since the expresion has to be evaluated for every process.\n* To speed-up your process, it is recomended to develop a new filter that does\n* exactly (and only) the blending method you want.\n*\n* **usage** \n* - [examples/imageBlending.html](../examples/imageBlending.html)\n* - [examples/imageBlending2.html](../examples/imageBlending2.html)\n* - [examples/forEachPixelGradientBlend.html](../examples/forEachPixelGradientBlend.html)\n*\n*/\nclass ImageBlendExpressionFilter extends ImageToImageFilter {\n\n constructor(){\n super();\n }\n\n\n\n _run(){\n\n // the metadata was not set\n if(!this.hasMetadata(\"expression\")){\n console.warn(\"A filter of type ImageBlendExpressionFilter requires a blending expression.\\nUse 'setMetadata(\\\"expression\\\", \\\"...\\\")' to set it.\" );\n return;\n }\n\n if( !this.hasSameNcppInput() || !this.hasSameSizeInput() ){\n return;\n }\n\n if(!this.getNumberOfInputs()){\n console.warn(\"A filter of type ImageBlendExpressionFilter requires at least one input.\");\n return;\n }\n\n var inputCategories = this.getInputCategories();\n var firstInput = this._getInput( inputCategories[0] );\n var outputBuffer = firstInput.getDataCopy();\n var parser = new Parser.Parser();\n var expr = parser.parse( this.getMetadata(\"expression\") );\n\n for(var i=0; i N\n [-1 , 0], // [1] => W\n [ 0 , 1], // [2] => S\n [ 1 , 0] // [3] => E\n ];\n \n this._directionListConnexity8 = [\n [ 0 , -1], // [0] => N\n [-1 , -1], // [1] => NW\n [-1 , 0], // [2] => W\n [-1 , 1], // [3] => SW\n [ 0 , 1], // [4] => S\n [ 1 , 1], // [5] => SE\n [ 1 , 0], // [6] => E\n [ 1 , -1] // [7] => NE\n ];\n \n }\n \n \n _run(){\n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type AngleToHueWheelHelper requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput(0);\n var ncpp = imageIn.getNcpp();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var directionList = null\n \n if( this.getMetadata(\"connexity\") == 8){\n directionList = this._directionListConnexity8;\n }else{\n directionList = this._directionListConnexity4;\n }\n \n // handy color comparison\n function isSameColor(c1, c2){\n if(c1.length != c2.length)\n return false;\n \n for(var i=0; i=width || newSeed[1]>= height){\n console.warn(\"The seed is out of image range.\");\n return;\n }\n \n var clusterColor = imageIn.getPixel( {x: newSeed[0], y: newSeed[1]} );\n var newColor = clusterColor;\n var atNorth = newSeed.slice();\n \n \n var canStartFromOriginalSeed = false;\n \n \n // test the local surrounding and avoid going North\n for(var i=0; i= width || potentialPosition[1] >= height)\n {\n return 2;\n }\n \n var potentialPositionColor = imageIn.getPixel( {x: potentialPosition[0], y: potentialPosition[1]} );\n \n // test if the new direction goes with the same color\n if( isSameColor(potentialPositionColor, clusterColor) ){\n \n if( potentialPosition[0]==listOfValidPoints[0] && // the point just found is the\n potentialPosition[1]==listOfValidPoints[1] ) // same as the very first\n {\n return 0; // break the loop\n }else{\n // we validate the point and keep moving\n movingPoint[0] = potentialPosition[0];\n movingPoint[1] = potentialPosition[1];\n listOfValidPoints.push( movingPoint[0] );\n listOfValidPoints.push( movingPoint[1] );\n }\n return 1; // continue the loop\n }\n return 2; // try directions\n }\n \n \n // start the real navigation, starting from movingPoint\n main_loop:\n while( true ){\n \n // go the previous direction on the list\n direction -= directionIncrement;\n if(direction<0)\n direction = directionList.length - directionIncrement;\n \n var score = tryPotientialPosition();\n \n if( score == 0){ \n break main_loop;\n }else if(score == 1){\n continue;\n }else{ // score == 2\n \n var nbTrials = 0;\n \n // we try the other directions\n direction_loop:\n for(var i=0; i N\n [-1 , 0], // [1] => W\n [ 0 , 1], // [2] => S\n [ 1 , 0] // [3] => E\n ];\n \n this._directionListConnexity8 = [\n [ 0 , -1], // [0] => N\n [-1 , -1], // [1] => NW\n [-1 , 0], // [2] => W\n [-1 , 1], // [3] => SW\n [ 0 , 1], // [4] => S\n [ 1 , 1], // [5] => SE\n [ 1 , 0], // [6] => E\n [ 1 , -1] // [7] => NE\n ];\n \n }\n \n \n _run(){\n \n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type FloodFillImageFilter requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput(0);\n var ncpp = imageIn.getNcpp();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var directionList = null;\n \n if( this.getMetadata(\"connexity\") == 8){\n directionList = this._directionListConnexity8;\n }else{\n directionList = this._directionListConnexity4;\n }\n \n var replacementColor = new Array(ncpp); // red\n replacementColor[0] = 255;\n \n var paintColor = this.getMetadata(\"color\") || replacementColor;\n \n // checking color validity\n if(paintColor.length != ncpp){\n if(!(paintColor.length == 3 && ncpp ==4)){\n console.warn(\"The color to fill must have the same number of components as the input image. (RGB color for RGBA image is accepted)\");\n return;\n }\n }\n \n \n \n // to mark the place we've been in the filling\n var markerImage = new Image2D({width: width, height: height, color: [0]});\n var seed = this.getMetadata(\"seed\");\n var seedColor = imageIn.getPixel({x: seed[0], y: seed[1]});\n var tolerance = this.getMetadata(\"tolerance\");\n var onlyHits = this.getMetadata(\"onlyHits\");\n \n var imageOut = null;\n if(!onlyHits){\n imageOut = imageIn.clone();\n }\n \n \n // the points in this list are points at the edge, except the edge of the image\n var edgePointList = [];\n \n var pixelStack = [];\n pixelStack.push( seed );\n \n while(pixelStack.length > 0){\n \n var currentPixel = pixelStack.pop();\n var x = currentPixel[0];\n var y = currentPixel[1];\n \n if(x<0 || x>=width || y<0 || y>=height){\n continue;\n }\n \n // if the image was not filled here...\n if(markerImage.getPixel({x: x, y: y})[0] == 0){\n \n // mark as visited\n markerImage.setPixel({x: x, y: y}, [1]);\n \n // paint the image\n if(!onlyHits){\n imageOut.setPixel({x: x, y: y}, paintColor);\n }\n \n // check neighbours upon connexity degree\n var potentialPosition = [0, 0];\n var isOnEdge = false;\n \n for(var i=0; i=width || \n potentialPosition[1]<0 || potentialPosition[1]>=height ) \n { \n continue;\n }\n \n var targetColor = imageIn.getPixel({x:potentialPosition[0], y: potentialPosition[1] });\n \n var isWithinTolerance = true;\n for(var c=0; c tolerance ){\n isWithinTolerance = false;\n isOnEdge = true;\n break;\n }\n } /* END for loop color channels */\n \n if(isWithinTolerance ){\n var newCandidate = [potentialPosition[0], potentialPosition[1]];\n pixelStack.push( newCandidate );\n }\n \n } /* END for loop direction*/\n \n if(isOnEdge){\n if(x!=0 && x!=(width-1) && y!=0 && y!=(height-1)){ // we dont want the edge of the image\n edgePointList.push( currentPixel );\n }\n }\n \n } /* END if image was not filled at this position */\n \n } /* END while loop unstacking the points */\n \n if(!onlyHits){\n this._output[0] = imageOut;\n }\n \n this._output[\"edgePoints\"] = edgePointList;\n \n } /* END of _run() */\n \n \n \n \n} /* END of class FloodFillImageFilter */\n\nexport { FloodFillImageFilter }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { LineString } from '../core/LineString.js';\nimport { ContourImage2DFilter } from './ContourImage2DFilter.js';\nimport { FloodFillImageFilter } from './FloodFillImageFilter.js';\n\n\n/**\n*\n*/\nclass ContourHolesImage2DFilter extends Filter {\n \n constructor() {\n super();\n this.addInputValidator(0, Image2D);\n this.setMetadata(\"connexity\", 4);\n this.setMetadata(\"seed\", [0, 0]);\n }\n \n \n _run(){\n \n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type ContourHolesImage2DFilter requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput(0);\n var ncpp = imageIn.getNcpp();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var directionList = null;\n var contours = [];\n \n var connexity = this.getMetadata(\"connexity\");\n var seed = this.getMetadata(\"seed\");\n \n // finding the 1st contour\n var contourDetector = new ContourImage2DFilter();\n contourDetector.addInput( imageIn );\n contourDetector.setMetadata(\"connexity\", connexity);\n contourDetector.setMetadata(\"seed\", seed);\n contourDetector.update();\n \n contours.push( contourDetector.getOutput() );\n \n // From the same seed, flood fill - we dont care about the filled image, but\n // we want the hit points from it\n var filler = new FloodFillImageFilter();\n filler.addInput( imageIn );\n filler.setMetadata('onlyHits', false); // if we are not interested in the image but just want the hit points, this must be true.\n filler.setMetadata(\"connexity\", 4); // could be 4\n filler.setMetadata(\"tolerance\", 0); // in pixel value, applied to each component\n filler.setMetadata(\"seed\", seed);\n filler.update();\n \n var fillingEdgePoints = filler.getOutput(\"edgePoints\");\n \n var flyContourDetector = new ContourImage2DFilter(); // will be reused several times\n flyContourDetector.setMetadata(\"time\", false); // prevent every little contour finding to print their time\n \n // for each point found while filling, we check if already in one of the contours.\n // if not already, we launch a new contour extraction from this point (as a seed)\n // and add a new contour.\n for(var i=0; i xmax) xmax = vertices[i][0];\n if(vertices[i][1] < ymin) ymin = vertices[i][1];\n if(vertices[i][1] > ymax) ymax = vertices[i][1];\n }\n\n dx = xmax - xmin;\n dy = ymax - ymin;\n dmax = Math.max(dx, dy);\n xmid = xmin + dx * 0.5;\n ymid = ymin + dy * 0.5;\n\n return [\n [xmid - 20 * dmax, ymid - dmax],\n [xmid , ymid + 20 * dmax],\n [xmid + 20 * dmax, ymid - dmax]\n ];\n }\n\n function circumcircle(vertices, i, j, k) {\n var x1 = vertices[i][0],\n y1 = vertices[i][1],\n x2 = vertices[j][0],\n y2 = vertices[j][1],\n x3 = vertices[k][0],\n y3 = vertices[k][1],\n fabsy1y2 = Math.abs(y1 - y2),\n fabsy2y3 = Math.abs(y2 - y3),\n xc, yc, m1, m2, mx1, mx2, my1, my2, dx, dy;\n\n /* Check for coincident points */\n if(fabsy1y2 < EPSILON && fabsy2y3 < EPSILON)\n throw new Error(\"Eek! Coincident points!\");\n\n if(fabsy1y2 < EPSILON) {\n m2 = -((x3 - x2) / (y3 - y2));\n mx2 = (x2 + x3) / 2.0;\n my2 = (y2 + y3) / 2.0;\n xc = (x2 + x1) / 2.0;\n yc = m2 * (xc - mx2) + my2;\n }\n\n else if(fabsy2y3 < EPSILON) {\n m1 = -((x2 - x1) / (y2 - y1));\n mx1 = (x1 + x2) / 2.0;\n my1 = (y1 + y2) / 2.0;\n xc = (x3 + x2) / 2.0;\n yc = m1 * (xc - mx1) + my1;\n }\n\n else {\n m1 = -((x2 - x1) / (y2 - y1));\n m2 = -((x3 - x2) / (y3 - y2));\n mx1 = (x1 + x2) / 2.0;\n mx2 = (x2 + x3) / 2.0;\n my1 = (y1 + y2) / 2.0;\n my2 = (y2 + y3) / 2.0;\n xc = (m1 * mx1 - m2 * mx2 + my2 - my1) / (m1 - m2);\n yc = (fabsy1y2 > fabsy2y3) ?\n m1 * (xc - mx1) + my1 :\n m2 * (xc - mx2) + my2;\n }\n\n dx = x2 - xc;\n dy = y2 - yc;\n return {i: i, j: j, k: k, x: xc, y: yc, r: dx * dx + dy * dy};\n }\n\n function dedup(edges) {\n var i, j, a, b, m, n;\n\n for(j = edges.length; j; ) {\n b = edges[--j];\n a = edges[--j];\n\n for(i = j; i; ) {\n n = edges[--i];\n m = edges[--i];\n\n if((a === m && b === n) || (a === n && b === m)) {\n edges.splice(j, 2);\n edges.splice(i, 2);\n break;\n }\n }\n }\n }\n\n Delaunay = {\n triangulate: function(vertices, key) {\n var n = vertices.length,\n i, j, indices, st, open, closed, edges, dx, dy, a, b, c;\n\n /* Bail if there aren't enough vertices to form any triangles. */\n if(n < 3)\n return [];\n\n /* Slice out the actual vertices from the passed objects. (Duplicate the\n * array even if we don't, though, since we need to make a supertriangle\n * later on!) */\n vertices = vertices.slice(0);\n\n if(key)\n for(i = n; i--; )\n vertices[i] = vertices[i][key];\n\n /* Make an array of indices into the vertex array, sorted by the\n * vertices' x-position. Force stable sorting by comparing indices if\n * the x-positions are equal. */\n indices = new Array(n);\n\n for(i = n; i--; )\n indices[i] = i;\n\n indices.sort(function(i, j) {\n var diff = vertices[j][0] - vertices[i][0];\n return diff !== 0 ? diff : i - j;\n });\n\n /* Next, find the vertices of the supertriangle (which contains all other\n * triangles), and append them onto the end of a (copy of) the vertex\n * array. */\n st = supertriangle(vertices);\n vertices.push(st[0], st[1], st[2]);\n \n /* Initialize the open list (containing the supertriangle and nothing\n * else) and the closed list (which is empty since we havn't processed\n * any triangles yet). */\n open = [circumcircle(vertices, n + 0, n + 1, n + 2)];\n closed = [];\n edges = [];\n\n /* Incrementally add each vertex to the mesh. */\n for(i = indices.length; i--; edges.length = 0) {\n c = indices[i];\n\n /* For each open triangle, check to see if the current point is\n * inside it's circumcircle. If it is, remove the triangle and add\n * it's edges to an edge list. */\n for(j = open.length; j--; ) {\n /* If this point is to the right of this triangle's circumcircle,\n * then this triangle should never get checked again. Remove it\n * from the open list, add it to the closed list, and skip. */\n dx = vertices[c][0] - open[j].x;\n if(dx > 0.0 && dx * dx > open[j].r) {\n closed.push(open[j]);\n open.splice(j, 1);\n continue;\n }\n\n /* If we're outside the circumcircle, skip this triangle. */\n dy = vertices[c][1] - open[j].y;\n if(dx * dx + dy * dy - open[j].r > EPSILON)\n continue;\n\n /* Remove the triangle and add it's edges to the edge list. */\n edges.push(\n open[j].i, open[j].j,\n open[j].j, open[j].k,\n open[j].k, open[j].i\n );\n open.splice(j, 1);\n }\n\n /* Remove any doubled edges. */\n dedup(edges);\n\n /* Add a new triangle for each edge. */\n for(j = edges.length; j; ) {\n b = edges[--j];\n a = edges[--j];\n open.push(circumcircle(vertices, a, b, c));\n }\n }\n\n /* Copy any remaining open triangles to the closed list, and then\n * remove any triangles that share a vertex with the supertriangle,\n * building a list of triplets that represent triangles. */\n for(i = open.length; i--; )\n closed.push(open[i]);\n open.length = 0;\n\n for(i = closed.length; i--; )\n if(closed[i].i < n && closed[i].j < n && closed[i].k < n)\n open.push(closed[i].i, closed[i].j, closed[i].k);\n\n /* Yay, we're done! */\n return open;\n },\n contains: function(tri, p) {\n /* Bounding box test first, for quick rejections. */\n if((p[0] < tri[0][0] && p[0] < tri[1][0] && p[0] < tri[2][0]) ||\n (p[0] > tri[0][0] && p[0] > tri[1][0] && p[0] > tri[2][0]) ||\n (p[1] < tri[0][1] && p[1] < tri[1][1] && p[1] < tri[2][1]) ||\n (p[1] > tri[0][1] && p[1] > tri[1][1] && p[1] > tri[2][1]))\n return null;\n\n var a = tri[1][0] - tri[0][0],\n b = tri[2][0] - tri[0][0],\n c = tri[1][1] - tri[0][1],\n d = tri[2][1] - tri[0][1],\n i = a * d - b * c;\n\n /* Degenerate tri. */\n if(i === 0.0)\n return null;\n\n var u = (d * (p[0] - tri[0][0]) - b * (p[1] - tri[0][1])) / i,\n v = (a * (p[1] - tri[0][1]) - c * (p[0] - tri[0][0])) / i;\n\n /* If we're outside the tri, fail. */\n if(u < 0.0 || v < 0.0 || (u + v) > 1.0)\n return null;\n\n return [u, v];\n }\n };\n\n if(typeof module !== \"undefined\")\n module.exports = Delaunay;\n})();\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\n\nimport Delaunay from 'delaunay-fast';\n\n/**\n* An instance of TriangulationSparseInterpolationImageFilter performs a triangulation\n* of an original dataset followed by a barycentric 2D interpolation. It is used to\n* perform a 2D linear interpolation of a sparse dataset.\n* The original dataset is specified using the method `.addInput( points )`, where\n* `points` is an `Array` of `{x: Number, y: Number, value: Number}`.\n* The triangulation is the result of a Delaunay triangulation.\n* This filter outputs an `Image2D` with interpolated values only within the boundaries\n* of the convex hull created by the triangulation. The size of the output must be\n* specified using the method `.setMetadata( \"outputSize\", {width: Number, height: Number})`.\n*\n* Note 1: at least 3 unaligned points are required to perform a triangulation\n* Note 2: points can be outside the boundaries of the original image\n* Note 3: interpolated values are floating point\n*\n* Note that only single-component images are outputed from this filter.\n* \n* **Usage**\n* - [examples/TriangleSparseInterpolation.html](../examples/TriangleSparseInterpolation.html)\n*/ \n\nclass TriangulationSparseInterpolationImageFilter extends Filter {\n \n constructor(){\n super()\n this.setMetadata( \"outputSize\", {width: 0, height: 0})\n }\n \n _run(){\n \n var origPoints = null;\n \n // getting the input\n if( \"0\" in this._input ){\n origPoints = this._input[ 0 ];\n }else{\n console.warn(\"No input point set were given.\");\n return;\n }\n \n var outputSize = this.getMetadata( \"outputSize\" );\n \n // checking output size\n if( outputSize.width == 0 || outputSize.height == 0 ){\n console.warn(\"The output size cannot be 0.\");\n return;\n }\n \n // remapping the point as an array of ArrayBuffer\n var points = origPoints.map( function(p){\n return [p.x, p.y];\n })\n \n // computing the list of triangles\n var triangleVertices = Delaunay.triangulate( points );\n\n // rearranging the triangles in a propper array that group by 3 the index of vertices used\n var triangles = [];\n for(var i=0; i<=triangleVertices.length-3; i+=3){\n triangles.push( [\n triangleVertices[i],\n triangleVertices[i+1],\n triangleVertices[i+2],\n ] );\n }\n\n console.log( points );\n console.log( triangles );\n \n // return the area of a triangle using Heron's formula\n // Each point A, B and C is a couple of 2D coords like [Number, Number] \n function getTriangleArea(A, B, C){\n // manhattan distances\n var _AB = [ A[0] - B[0], A[1] - B[1]];\n var _BC = [ B[0] - C[0], B[1] - C[1]];\n var _CA = [ C[0] - A[0], C[1] - A[1]];\n \n // Euclidian distances - Pythagore\n var a = Math.sqrt( _BC[0]*_BC[0] + _BC[1]*_BC[1] );\n var b = Math.sqrt( _CA[0]*_CA[0] + _CA[1]*_CA[1] );\n var c = Math.sqrt( _AB[0]*_AB[0] + _AB[1]*_AB[1] );\n \n // semiperimeter\n var s = (a + b + c) / 2;\n \n var area = Math.sqrt( s*(s-a)*(s-b)*(s-c) );\n return area;\n }\n \n // creating the output image\n var out = new pixpipe.Image2D({width: Math.round(outputSize.width), height: Math.round(outputSize.height), color: [0]})\n \n // each line of the output image...\n for(var i=0; i= ncpp ){\n console.warn(\"The component to filter must be valid.\");\n return;\n }\n \n var imageIn = this._getInput();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var ncpp = imageIn.getNcpp();\n \n var minAngle = this.getMetadata(\"minAngle\");\n var maxAngle = this.getMetadata(\"maxAngle\");\n \n if(minAngle === \"auto\" || maxAngle === \"auto\"){\n minAngle = imageIn.getMin();\n maxAngle = imageIn.getMax();\n }\n \n var imageOut = new Image2D( {width: width, height: height, color: [0, 0, 0, 255] } );\n var forEachPixelFilter = new pixpipe.ForEachPixelImageFilter();\n \n // add the input input\n forEachPixelFilter.addInput( imageOut );\n\n forEachPixelFilter.on( \"pixel\", function(position, color){\n var angle = imageIn.getPixel( position )[component];\n var angle360 = ( (angle - minAngle) / (maxAngle - minAngle) ) * 360;\n var colorRGB = that._hsl2Rgba( angle360, 100, 50 );\n return colorRGB;\n });\n \n // run the filter to create a gradient image\n forEachPixelFilter.update();\n \n if( forEachPixelFilter.getNumberOfOutputs() == 0 ){\n console.warn(\"No output of ForEachPixelImageFilter.\");\n return;\n }\n \n // mapping the output\n this._output[ 0 ] = forEachPixelFilter.getOutput();\n \n }\n \n \n /**\n * \n * A part of this code was borrowed from github.com/netbeast/colorsys and modified.\n */\n _hsl2Rgba( h, s=100, l=100 ){\n // pseudo constants\n var HUE_MAX = 360;\n var SV_MAX = 100;\n var RGB_MAX = 255;\n \n // ouputs\n var r, g, b\n\n h = (h === HUE_MAX) ? 1 : (h % HUE_MAX / HUE_MAX)\n s = (s === SV_MAX) ? 1 : (s % SV_MAX / SV_MAX)\n l = (l === SV_MAX) ? 1 : (l % SV_MAX / SV_MAX)\n\n if (s === 0) {\n r = g = b = l // achromatic\n } else {\n var hue2rgb = function hue2rgb (p, q, t) {\n if (t < 0) t += 1\n if (t > 1) t -= 1\n if (t < 1 / 6) return p + (q - p) * 6 * t\n if (t < 1 / 2) return q\n if (t < 2 / 3) return p + (q - p) * (2 / 3 - t) * 6\n return p\n }\n\n var q = l < 0.5 ? l * (1 + s) : l + s - l * s\n var p = 2 * l - q\n r = hue2rgb(p, q, h + 1 / 3)\n g = hue2rgb(p, q, h)\n b = hue2rgb(p, q, h - 1 / 3)\n }\n \n return [ Math.round(r * RGB_MAX), \n Math.round(g * RGB_MAX), \n Math.round(b * RGB_MAX),\n 255 ];\n }\n \n \n} /* END of class AngleToHueWheelFilter */\n\nexport { AngleToHueWheelHelper }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Image2D } from '../core/Image2D.js';\nimport { LineString } from '../core/LineString.js';\nimport { ImageToImageFilter } from '../core/ImageToImageFilter.js';\n\n\n/**\n* A instance of LineStringPrinterOnImage2DHelper prints a list of LineStrings on\n* an Image2D. To add the Image2D input, use `.addInput(myImage2D)`.\n* To add a LineString, use `.addLineString(ls, c );` where `ls` is a LineString \n* instance and `c` is an Array representing a color (i.e. [255, 0, 0] for red).\n*\n* **Usage**\n* - [examples/contourImage2D.html](../examples/contourImage2D.html)\n*\n*/\nclass LineStringPrinterOnImage2DHelper extends ImageToImageFilter {\n \n constructor() {\n super();\n this.addInputValidator(0, Image2D);\n this.setMetadata(\"lineStrings\", []);\n this.setMetadata(\"lineStringsColors\", []);\n }\n \n \n /**\n * Add a LineString instance to be printed on the image\n * @param {LineString} ls - a linestring to add\n * @param {Array} color - of for [R, G, B] or [R, G, B, A] \n */\n addLineString(ls, color){\n this._metadata.lineStrings.push( ls ) ;\n this._metadata.lineStringsColors.push( color ) ;\n }\n \n \n _run(){\n \n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type LineStringPrinterOnImage2DHelper requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput();\n var imageOut = imageIn.clone();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var ncpp = imageIn.getNcpp();\n \n var printed = false;\n \n var lineStrings = this._metadata.lineStrings;\n var colors = this._metadata.lineStringsColors;\n \n for(var i=0; i1 ){\n console.warn(\"Each colormap segment 'index' property should be in [0, 1]\");\n return false;\n }\n }else{\n console.warn(\"Each colormap segment 'index' property should be a number.\");\n return false;\n }\n\n // the rgb property has to be an array\n if( Array.isArray( d[i].rgb ) ){\n if(d[i].rgb.length == 3){\n for(var j=0; j 255 ){\n console.warn(\"The colormap must have only values in [0, 255]\");\n return false;\n }\n }\n }else{\n console.warn(\"Each colormap segment 'rgb' should contain 3 values\");\n return false;\n }\n }\n }else{\n console.warn(\"Each colormap segment must have a 'index' property and a 'rgb' property.\");\n return false;\n }\n }else{\n console.warn(\"Each colormap segment must be a non-null object\");\n return false;\n }\n }\n return true;\n }\n\n\n /**\n * Get the color at the colormap position\n * @param {Number} position - position within the colormap in [0, 1]\n * @return {Array} color array as [r, g, b] , values being in [0, 255]\n */\n getValueAt( position ){\n if( !this._colormapDescription ){\n console.warn(\"The colormap description is not defined.\");\n return null;\n }\n\n // case 1: before the first \"index\" position\n if(position <= this._colormapDescription[0].index){\n return this._colormapDescription[0].rgb.slice()\n }\n\n // case 2: after the last \"index\" position\n if(position >= this._colormapDescription[this._colormapDescription.length - 1].index){\n return this._colormapDescription[this._colormapDescription.length - 1].rgb.slice()\n }\n\n // case 3: between 2 values of the descrition (most likely to happen)\n for(var i=0; i= this._colormapDescription[i].index && position < this._colormapDescription[i+1].index ){\n\n var unitDistanceToFirst = (position - this._colormapDescription[i].index) / (this._colormapDescription[i+1].index - this._colormapDescription[i].index);\n var unitDistanceToSecond = 1 - unitDistanceToFirst;\n\n var color = [\n Math.round(this._colormapDescription[i].rgb[0] * unitDistanceToSecond + this._colormapDescription[i+1].rgb[0] * unitDistanceToFirst), // R\n Math.round(this._colormapDescription[i].rgb[1] * unitDistanceToSecond + this._colormapDescription[i+1].rgb[1] * unitDistanceToFirst), // G\n Math.round(this._colormapDescription[i].rgb[2] * unitDistanceToSecond + this._colormapDescription[i+1].rgb[2] * unitDistanceToFirst), // B\n ]\n\n return color;\n }\n }\n }\n\n\n /**\n * Build a LUT from the colormap description\n * @param {Number} size - number of samples in the LUT\n */\n buildLut( size ){\n if( !this._colormapDescription ){\n console.warn(\"The colormap description is not defined, the LUT cannot be created\");\n return null;\n }\n\n if( size < 0 ){\n console.warn(\"Size of the colormap can not be negative.\");\n return;\n }\n\n this._LUT = new Array( size );\n\n for(var i=0; i this._LUT.length )\n return null;\n\n return this._LUT[ index ];\n }\n\n\n /**\n * Creates a horizontal Image2D of the colormap. The height is 1px and\n * the width is the size of the LUT currently in use.\n * @param {Boolean} flip - flips the colormap image\n * @return {Image2D} the result image\n */\n createHorizontalLutImage( flip=false ){\n if(! this._LUT ){\n console.warn(\"The LUT must be built before creating a LUT image.\");\n return;\n }\n\n var LutSize = this._LUT.length;\n var colorStrip = new Image2D({width: LutSize, height: 1, color: [0, 0, 0]});\n\n for(var i=0; i1){\n console.warn(\"The color cannot be added because its index is out of range [0, 1]\");\n return false;\n }\n\n // checking if a color is already present at the given index\n var indexAlreadyPresent = this._colormapDescription.find(function(indexAndColor){\n return indexAndColor.index == index;\n })\n\n if( indexAlreadyPresent ){\n console.warn(\"A color is already present at index \" + index);\n return false;\n }\n\n if( rgb && Array.isArray(rgb) && rgb.length == 3){\n for(var i=0; i 255){\n console.warn(\"The rgb colors must be in [0, 255]\");\n return false;\n }\n }\n }else{\n console.warn(\"The color cannot be added because its rgb array is the wrong size.\");\n return false;\n }\n\n // data integrity is ok\n this._colormapDescription.push({\"index\":index,\"rgb\":rgb.slice()})\n this._colormapDescription.sort(function(a, b) {\n return a.index - b.index;\n });\n\n return true;\n }\n\n\n /**\n * Remove the color at the given index\n * @param {Number} index - the [0, 1] index of the color to remove\n * @return {Boolean} true if successfully remove, false if not\n */\n removeColor( index ){\n if( !this._colormapDescription ){\n console.warn(\"The colormap description is empty.\");\n return false;\n }\n\n var indexAlreadyIn = this._colormapDescription.findIndex(function(element){\n return (element.index == index);\n })\n\n if( indexAlreadyIn == -1 ){\n console.warn(\"Such index does not exist.\");\n return false;\n }\n\n this._colormapDescription.splice(indexAlreadyIn, 1);\n return true;\n }\n\n\n /**\n * Get a json version of the colormap description\n * @return {String} the json string\n */\n toJson(){\n return JSON.stringify(this._colormapDescription);\n }\n\n\n} /* END of class Colormap */\n\nexport { Colormap }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* An instance of Image3DToMosaicFilter takes an Image3D as Input and output a\n* mosaic composed of each slice. The axis: \"xspace\", \"yspace\" or \"zspace\" can be\n* specified with `setMetadata(\"axis\", \"xspace\")`, the default being xspace.\n* The default output image is 4096x4096 but these boundaries can be changed using\n* `setMetadata(\"maxWidth\", n)` and `setMetadata(\"maxHeight\", m)`.\n* These are boundaries so the size of the output image(s) will possibly be lower\n* to not contain unused space.\n* If mosaicing the whole given Image3D does not fit in maxWidth*maxHeight, more\n* Image2D will be created and accessible through `getOutput(n)`.\n* All output image have the same size so that the last one may have dead space.\n* To know precisely the size of the output mosaic use `getMetadata(\"gridWidth\")`\n* and `getMetadata(\"gridHeight\")`, this will give the number of slices used in\n* horizontal and vertical respectively.\n*\n* **Usage**\n* - [examples/niftiToMosaic.html](../examples/niftiToMosaic.html)\n*/\nclass Image3DToMosaicFilter extends Filter{\n\n constructor(){\n super();\n this.addInputValidator(0, Image3D);\n\n // default settings\n this.setMetadata(\"maxWidth\", 4096);\n this.setMetadata(\"maxHeight\", 4096);\n this.setMetadata(\"axis\", \"xspace\");\n this.setMetadata(\"time\", 0);\n }\n\n\n _run(){\n if(! this.hasValidInput() ){\n return;\n }\n\n var inputImage3D = this._getInput(0);\n var spaceInfo = inputImage3D.getMetadata( this.getMetadata(\"axis\") );\n\n if(!spaceInfo){\n console.warn(\"Sampling axis for mosaicing was not poperly set. Has to be 'xspace', 'yspace' or 'zspace'.\");\n return;\n }\n\n var numOfSlices = spaceInfo.space_length;\n var width = spaceInfo.width;\n var height = spaceInfo.height;\n \n // dealing with time series\n var startTime = 0;\n var endTime = 1;\n \n if( inputImage3D.hasMetadata(\"time\") ){\n var timeInfo = inputImage3D.getMetadata(\"time\");\n var timeLength = timeInfo.space_length;\n \n if(this._metadata.time == -1 ){\n startTime = 0;\n endTime = timeLength;\n }else if( this._metadata.time < timeLength){\n startTime = this._metadata.time;\n endTime = startTime + 1;\n }\n }\n \n var numberOfSlicesWithTime = numOfSlices * (endTime-startTime);\n\n // number of image we can fit in the with and heigth of an output image\n var widthFit = Math.floor( this.getMetadata(\"maxWidth\") / width );\n var heightFit = Math.floor( this.getMetadata(\"maxHeight\") / height );\n\n // size of the ouput image(s)\n var outputWidth = widthFit * width;\n var outputHeight = heightFit * height;\n var slicePerOutputIm = widthFit * heightFit;\n\n // Number of output image(s) necessary to cover the whole Image3D dataset\n //var outputNecessary = Math.ceil( numOfSlices / slicePerOutputIm ); // does not work for time series\n var outputNecessary = Math.ceil( numberOfSlicesWithTime / slicePerOutputIm );\n\n // if only one output, maybe it's not filled entirely, so we can make it a bit smaller\n if( outputNecessary == 1){\n outputHeight = Math.ceil( numberOfSlicesWithTime / widthFit ) * height;\n }\n\n this.setMetadata(\"gridWidth\", outputWidth / width);\n this.setMetadata(\"gridHeight\", outputHeight / height);\n\n var outputCounter = 0;\n var sliceCounter = 0;\n var sliceIndexCurrentOutput = 0;\n\n var outImage = null;\n\n // the 3 following functions are a work around to fetch voxel along the right axis\n function fetchAlongXspace(i, j, sliceIndex, time){\n return inputImage3D.getIntensity_xyz(sliceIndex, i, j, time)\n }\n\n function fetchAlongYspace(i, j, sliceIndex, time){\n return inputImage3D.getIntensity_xyz(i, sliceIndex, j, time)\n }\n\n function fetchAlongZspace(i, j, sliceIndex, time){\n return inputImage3D.getIntensity_xyz(i, j, sliceIndex, time)\n }\n\n var fetchAlongAxis = null;\n\n if( this._metadata.axis === \"xspace\")\n fetchAlongAxis = fetchAlongXspace;\n else if( this._metadata.axis === \"yspace\")\n fetchAlongAxis = fetchAlongYspace;\n else if( this._metadata.axis === \"zspace\")\n fetchAlongAxis = fetchAlongZspace;\n \n if( !fetchAlongAxis ){\n console.warn(\"The axis to sample along for the mosaic was not properly set.\");\n return;\n }\n\n // to make it works no matter the ncpp\n var initPixel = new Array( inputImage3D.getMetadata(\"ncpp\") ).fill(0);\n \n for(var t=startTime; t Time: [\" + fromRecord + \" , \" + toRecord + \"] is \" + t + \" millisec.\");\n }\n\n return t;\n }else{\n console.warn(\"The two given record name must exist in the time record table.\");\n return -1;\n }\n }\n\n\n /**\n * Defines a callback. By defautl, no callback is called.\n */\n on(eventId, callback){\n this._events[ eventId ] = callback;\n }\n\n\n /**\n * Call an event with arguments.\n * Inside the callback, the \"this\" object will be the filter.\n * @param {String} eventName - name of the event to trigger\n * @param {Object} any other param can follow\n */\n triggerEvent( eventName /* any other arguments to follow */ ){\n var returnValue = null;\n\n if(this.hasEvent(eventName)){\n if( arguments.length > 1 ){\n\n // a-la-mano slicing argument array to comply with V8 JS engine optimization...\n var argToSend = [];\n for(var i=1; i 0 && options.height > 0){\n this.setMetadata(\"width\", options.width);\n this.setMetadata(\"height\", options.height);\n\n if(\"color\" in options){\n this.setMetadata(\"ncpp\", options.color.length );\n }\n\n this._data = new Float32Array( options.width * options.height * this.getMetadata(\"ncpp\") );\n var ncpp = this.getMetadata(\"ncpp\");\n\n // init with the given color\n if(\"color\" in options){\n var color = options.color;\n for(var i=0; i=0 && position.x < this._metadata.width &&\n \"y\" in position && position.y >=0 && position.y < this._metadata.height )\n {\n\n if(color.length == ncpp){\n var pos1D = this.get1dIndexFrom2dPosition( position );\n\n if(ncpp == 1){\n this._data[ pos1D ] = color[0];\n }else{\n pos1D *= ncpp;\n for(var i=0; i=0 && position.x < this._metadata.width &&\n \"y\" in position && position.y >=0 && position.y < this._metadata.height )\n {\n //var ncpp = this.getMetadata(\"ncpp\");\n var ncpp = this._metadata.ncpp;\n var color = null;\n var pos1D = this.get1dIndexFrom2dPosition( position );\n\n // \n if(ncpp == 1){\n color = [this._data[pos1D]];\n }else{\n pos1D *= ncpp;\n color = this._data.slice(pos1D, pos1D + ncpp);\n }\n \n return color;\n\n }else{\n console.warn(\"The requested position is outside the image.\");\n return null;\n }\n }\n\n\n /**\n * Get the width of the image\n * @return {Number} the width of the Image2D\n */\n getWidth(){\n return this._metadata.width;\n }\n\n\n /**\n * Get the height of the image\n * @return {Number} the height of the Image2D\n */\n getHeight(){\n return this._metadata.height;\n }\n\n\n /**\n * Get the number of components per pixel\n * @return {Number} the number of components per pixel\n */\n getComponentsPerPixel(){\n return this._metadata.ncpp;\n }\n\n \n /**\n * Alias to getComponentsPerPixel. Return the number of components per pixel.\n * @return {Number} ncpp\n */\n getNcpp(){\n return this.getComponentsPerPixel();\n }\n\n\n /**\n * Get the internal image data (pointer)\n * @return {TypedArray} the original data (most likely a Float32Array), dont mess up with this one.\n * in case of doubt, use getDataCopy()\n */\n getData(){\n return this._data; // return the actual array, editable!\n }\n\n\n /**\n * Get a copy of the data\n * @return {TypedArray} a deep copy of the data (most likely a Float32Array)\n */\n getDataCopy(){\n return new this._data.constructor( this._data );\n }\n\n\n /**\n * No matter the original type of the internal data, scale it into a [0, 255] uInt8Array\n * @return {Uint8Array} scaled data\n */\n getDataAsUInt8Array(){\n if(! this._data){\n console.warn(\"No data, cannot make a copy of it.\");\n return;\n }\n\n var min = this.getMin();\n var max = this.getMax();\n\n var uintData = new Uint8Array(this._data.length);\n\n for(var i=0; i= 0 && pos.x < this._metadata.width &&\n pos.y >= 0 && pos.y < this._metadata.height\n )\n }\n \n /**\n * Sample the color along a segment\n * @param {Object} posFrom - starting position of type {x: Number, y: Number}\n * @param {Object} posFrom - ending position of type {x: Number, y: Number}\n * @return {Object} array of Array like that: {\n positions: [\n {x: x0, y: y0},\n {x: x1, y: y1},\n {x: x2, y: y2},\n ...\n ],\n labels: [\n \"(x0, y0)\", \"(x1, y1)\", \"(x2, y2)\", ...\n ],\n colors: [\n [r0, r1, r2 ...],\n [g0, g1, g2 ...],\n [b0, b1, b2 ...]\n ]\n }\n return null if posFrom or posTo is outside\n */\n getSegmentSample( posFrom, posTo ){\n // both position must be inside the image\n if( !this.isInside(posFrom) || !this.isInside(posTo) )\n return null;\n \n var dx = posTo.x - posFrom.x;\n var dy = posTo.y - posFrom.y;\n var euclidianDistance = Math.sqrt( Math.pow(dx , 2) + Math.pow(dy , 2) );\n var numberOfSamples = Math.floor( euclidianDistance + 1 );\n \n // we want to sample every unit distance along the segment\n var stepX = dx / euclidianDistance;\n var stepY = dy / euclidianDistance;\n \n var ncpp = this._metadata.ncpp;\n var positions = new Array(numberOfSamples).fill(0);\n var colors = new Array(ncpp).fill(0);\n var labels = new Array(numberOfSamples).fill(0);\n \n // creating empty arrays for colors\n for(var c=0; c 0 && options.ySize > 0 && options.zSize > 0 ){\n xspace.space_length = options.xSize;\n yspace.space_length = options.ySize;\n zspace.space_length = options.zSize;\n\n yspace.offset = xspace.space_length;\n zspace.offset = xspace.space_length * yspace.space_length;\n\n this._data = new Float32Array( options.xSize * options.ySize * options.zSize * this.getMetadata(\"ncpp\") );\n this._data.fill(0);\n\n this._scanDataRange();\n this._finishHeader();\n }\n }\n }\n\n\n /**\n * Hardcode the datatype\n */\n static TYPE(){\n return \"IMAGE3D\";\n }\n\n\n /**\n * @return {Image3D} a deep copy instance of this Image3D\n */\n clone(){\n var cpImg = new Image3D();\n\n cpImg.setData(\n this._data,\n this.getMetadata(\"xspace\").space_length,\n this.getMetadata(\"yspace\").space_length,\n this.getMetadata(\"zspace\").space_length,\n {\n ncpp: this.getMetadata(\"ncpp\"),\n order: this.getMetadata(\"order\").slice(),\n deepCopy: true,\n }\n );\n\n cpImg.copyMetadataFrom( this );\n\n return cpImg;\n }\n\n\n /**\n * Set the data to this Image3D.\n * @param {Float32Array} array - 1D array of raw data stored as RGBARGBA...\n * @param {Number} xSize - length along x dimension of the Image3D\n * @param {Number} ySize - length along y dimension of the Image3D\n * @param {Number} zSize - length along z dimension of the Image3D\n * @param {Number} ncpp - number of components per pixel (default: 4)\n * @param {Boolean} deepCopy - if true, a copy of the data is given, if false we jsut give the pointer\n * @param {Object} options, among them:\n * - ncpp {Number} number of components per pixel. Default = 1\n * - order {Array} dimensionality order. Default = [\"zspace\", \"yspace\", \"xspace\"]\n * - deepCopy {Boolean} copy the whole array if true, or just the pointer if false. Default = false\n *\n */\n setData( array, xSize, ySize, zSize, options){\n var ncpp = 1;\n\n // number of components per pixel\n if(options && \"ncpp\" in options){\n ncpp = options.ncpp;\n }\n\n if( array.length != xSize*ySize*zSize*ncpp){\n console.warn(\"The array size does not match the width and height. Cannot init the Image3D.\");\n return;\n }\n\n // number of components per pixel\n if(options && \"ncpp\" in options){\n this.setMetadata(\"ncpp\", options.ncpp);\n }\n\n // dimensionality order\n if(options && \"order\" in options){\n this.setMetadata(\"order\", options.order);\n }\n\n // deep of shallow copy\n if(options && \"deepCopy\" in options && options.deepCopy){\n this._data = new array.constructor( array );\n }else{\n this._data = array;\n }\n\n var xspace = this.getMetadata(\"xspace\");\n var yspace = this.getMetadata(\"yspace\");\n var zspace = this.getMetadata(\"zspace\");\n\n xspace.space_length = xSize;\n yspace.space_length = ySize;\n zspace.space_length = zSize;\n\n yspace.offset = xspace.space_length;\n zspace.offset = xspace.space_length * yspace.space_length;\n\n this._scanDataRange();\n this._finishHeader();\n }\n\n\n /**\n * [PRIVATE]\n * Creates common fields all headers must contain.\n */\n _finishHeader() {\n var xspace = this.getMetadata(\"xspace\");\n var yspace = this.getMetadata(\"yspace\");\n var zspace = this.getMetadata(\"zspace\");\n\n xspace.name = \"xspace\";\n yspace.name = \"yspace\";\n zspace.name = \"zspace\";\n\n xspace.width_space = JSON.parse( JSON.stringify( yspace ) );//yspace;\n xspace.width = yspace.space_length;\n xspace.height_space = JSON.parse( JSON.stringify( zspace ) );//zspace;\n xspace.height = zspace.space_length;\n\n yspace.width_space = JSON.parse( JSON.stringify( xspace ) );//xspace;\n yspace.width = xspace.space_length;\n yspace.height_space = JSON.parse( JSON.stringify( zspace ) );//zspace;\n yspace.height = zspace.space_length;\n\n zspace.width_space = JSON.parse( JSON.stringify( xspace ) );//xspace;\n zspace.width = xspace.space_length;\n zspace.height_space = JSON.parse( JSON.stringify( yspace ) );//yspace;\n zspace.height = yspace.space_length;\n }\n\n\n /**\n * [PRIVATE]\n * Look for min and max on the dataset and add them to the header metadata\n */\n _scanDataRange(){\n var min = +Infinity;\n var max = -Infinity;\n\n for(var i=0; i 0;\n var y_positive = height_space.step > 0;\n var z_positive = axis_space.step > 0;\n\n // iterator for the result slice.\n var i = 0;\n var intensity = 0;\n var intensitySum = 0;\n var min = Infinity;\n var max = -Infinity;\n\n var maxOfVolume = this.getMetadata(\"voxel_max\");\n\n z = z_positive ? slice_num : axis_space.space_length - slice_num - 1;\n if (z >= 0 && z < axis_space.space_length) {\n tz_offset = time_offset + z * axis_space_offset;\n\n for (row = height - 1; row >= 0; row--) {\n y = y_positive ? row : height - row - 1;\n tzy_offset = tz_offset + y * height_space_offset;\n\n for (col = 0; col < width; col++) {\n x = x_positive ? col : width - col - 1;\n tzyx_offset = tzy_offset + x * width_space_offset;\n\n intensity = this._data[tzyx_offset];\n\n min = Math.min(min, intensity);\n max = Math.max(max, intensity);\n intensitySum += intensity;\n\n slice_data[i++] = intensity;\n }\n }\n }\n\n var outputImage = new Image2D();\n outputImage.setData( slice_data, width, height, 1);\n outputImage.setMetadata(\"min\", min);\n outputImage.setMetadata(\"max\", max);\n outputImage.setMetadata(\"avg\", intensitySum / (i-1) );\n return outputImage;\n\n }\n\n\n /**\n * Get the intensity of a given voxel, addressed by dimensionality order.\n * In case of doubt, use getIntensity_xyz instead.\n * @param {Number} i - Position within the biggest dimensionality order\n * @param {Number} j - Position within the in-the-middle dimensionality order\n * @param {Number} k - Position within the smallest dimensionality order\n */\n getIntensity_ijk(i, j, k, time = 0) {\n var order = this.getMetadata(\"order\");\n\n if (i < 0 || i >= this.getMetadata( order[0] ).space_length ||\n j < 0 || j >= this.getMetadata( order[1] ).space_length ||\n k < 0 || k >= this.getMetadata( order[2] ).space_length)\n {\n console.warn(\"getIntensity_ijk position is out of range.\");\n return 0;\n }\n\n //var time_offset = this.hasMetadata( \"time\" ) ? time * this.getMetadata( \"time\" ).offset : 0;\n var time_offset = this._metadata.time.offset * time;\n\n var xyzt_offset = (\n i * this.getMetadata( order[0] ).offset +\n j * this.getMetadata( order[1] ).offset +\n k * this.getMetadata( order[2] ).offset +\n time_offset);\n\n return this._data[xyzt_offset];\n }\n\n\n /**\n * Get the intensity of a given voxel, addressed by dimension names.\n * @param {Number} x - position within xspace\n * @param {Number} y - position within yspace\n * @param {Number} z - position within zspace\n * @param {Number} time - position in time (optional)\n */\n getIntensity_xyz(x, y, z, time = 0) {\n\n if (x < 0 || x >= this._metadata.xspace.space_length ||\n y < 0 || y >= this._metadata.yspace.space_length ||\n z < 0 || z >= this._metadata.zspace.space_length)\n {\n console.warn(\"getIntensity_xyz position is out of range.\");\n return 0;\n }\n\n //var time_offset = this.hasMetadata( \"time\" ) ? time * this.getMetadata( \"time\" ).offset : 0;\n var time_offset = this._metadata.time.offset * time;\n \n var xyzt_offset = (\n x * this._metadata.xspace.offset +\n y * this._metadata.yspace.offset +\n z * this._metadata.zspace.offset +\n time_offset);\n\n return this._data[xyzt_offset];\n }\n\n \n /**\n * Get the number of samples over time\n */\n getTimeLength(){\n return ( this.hasMetadata(\"time\") ? this.getMetadata(\"time\").space_length : 1 );\n }\n\n\n /**\n * Tells if a given point is inside or outside the image\n * @param {Object} pos - position like {x: Number, y: Number, z: Number}\n * @return {Boolean} true for inside, false for outside\n */\n isInside( pos ){\n return !(pos.x < 0 || pos.x >= this._metadata.xspace.space_length ||\n pos.y < 0 || pos.y >= this._metadata.yspace.space_length ||\n pos.z < 0 || pos.z >= this._metadata.zspace.space_length)\n }\n \n\n /**\n * Sample the color along a segment\n * @param {Object} posFrom - starting position of type {x: Number, y: Number, z: Number}\n * @param {Object} posFrom - ending position of type {x: Number, y: Number, z: Number}\n * @return {Object} array of Array like that: {\n positions: [\n {x: x0, y: y0, z: z0},\n {x: x1, y: y1, z: z1},\n {x: x2, y: y2, z: z2},\n ...\n ],\n labels: [\n \"(x0, y0, z0)\", \"(x1, y1, z1)\", \"(x2, y2, z2)\", ...\n ],\n colors: [\n [r0, r1, r2 ...],\n [g0, g1, g2 ...],\n [b0, b1, b2 ...]\n ]\n }\n return null if posFrom or posTo is outside\n */\n getSegmentSample( posFrom, posTo, time = 0 ){\n // both position must be inside the image\n if( !this.isInside(posFrom) || !this.isInside(posTo) )\n return null;\n \n var dx = posTo.x - posFrom.x;\n var dy = posTo.y - posFrom.y;\n var dz = posTo.z - posFrom.z;\n var euclidianDistance = Math.sqrt( Math.pow(dx , 2) + Math.pow(dy , 2) + Math.pow(dz , 2) );\n var numberOfSamples = Math.floor( euclidianDistance + 1 );\n \n // we want to sample every unit distance along the segment\n var stepX = dx / euclidianDistance;\n var stepY = dy / euclidianDistance;\n var stepZ = dz / euclidianDistance;\n \n var ncpp = this._metadata.ncpp;\n var positions = new Array(numberOfSamples).fill(0);\n var colors = new Array(ncpp).fill(0);\n var labels = new Array(numberOfSamples).fill(0);\n \n // creating empty arrays for colors\n for(var c=0; c lo_offset) {\n var tmp = byte_data[d + hi_offset];\n byte_data[d + hi_offset] = byte_data[d + lo_offset];\n byte_data[d + lo_offset] = tmp;\n hi_offset--;\n lo_offset++;\n }\n }\n }\n\n\n /**\n * Initialize a MniVolume with the data and the header.\n * @param {Array} data - TypedArray containing the data\n */\n setData( data, header ){\n var that = this;\n this._data = data;\n\n this.setMetadata( \"position\", {} );\n this.setMetadata( \"current_time\", 0 );\n\n // copying header into metadata\n var headerKeys = Object.keys(header);\n headerKeys.forEach( function(key){\n that.setMetadata( key, header[key] );\n })\n\n // find min/max\n this._scanDataRange();\n\n // set W2v matrix\n this._saveOriginAndTransform();\n\n // adding some fields to metadata header\n this._finishHeader()\n\n console.log(this._metadata);\n }\n\n\n\n\n\n /**\n * [PRIVATE}\n * Calculate the world to voxel transform and save it, so we\n * can access it efficiently. The transform is:\n * cxx / stepx | cxy / stepx | cxz / stepx | (-o.x * cxx - o.y * cxy - o.z * cxz) / stepx\n * cyx / stepy | cyy / stepy | cyz / stepy | (-o.x * cyx - o.y * cyy - o.z * cyz) / stepy\n * czx / stepz | czy / stepz | czz / stepz | (-o.x * czx - o.y * czy - o.z * czz) / stepz\n * 0 | 0 | 0 | 1\n *\n * Origin equation taken from (http://www.bic.mni.mcgill.ca/software/minc/minc2_format/node4.html)\n */\n _saveOriginAndTransform() {\n\n var xspace = this.getMetadata(\"xspace\");\n var yspace = this.getMetadata(\"yspace\");\n var zspace = this.getMetadata(\"zspace\");\n\n var startx = xspace.start;\n var starty = yspace.start;\n var startz = zspace.start;\n var cx = xspace.direction_cosines;\n var cy = yspace.direction_cosines;\n var cz = zspace.direction_cosines;\n var stepx = xspace.step;\n var stepy = yspace.step;\n var stepz = zspace.step;\n\n // voxel_origin\n var o = {\n x: startx * cx[0] + starty * cy[0] + startz * cz[0],\n y: startx * cx[1] + starty * cy[1] + startz * cz[1],\n z: startx * cx[2] + starty * cy[2] + startz * cz[2]\n };\n\n this.setMetadata(\"voxel_origin\", o);\n\n var tx = (-o.x * cx[0] - o.y * cx[1] - o.z * cx[2]) / stepx;\n var ty = (-o.x * cy[0] - o.y * cy[1] - o.z * cy[2]) / stepy;\n var tz = (-o.x * cz[0] - o.y * cz[1] - o.z * cz[2]) / stepz;\n\n var w2v = [\n [cx[0] / stepx, cx[1] / stepx, cx[2] / stepx, tx],\n [cy[0] / stepy, cy[1] / stepy, cy[2] / stepy, ty],\n [cz[0] / stepz, cz[1] / stepz, cz[2] / stepz, tz]\n ];\n\n this.setMetadata(\"w2v\", w2v);\n }\n\n\n} /* END of class Image3D */\n\nexport { MniVolume }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport { PixpipeContainer } from './PixpipeContainer.js';\n\n\n/**\n* A LineString is a vectorial reprensation of a line or polyline, open or closed.\n* When closed, it can be considered as a polygon. \n* By default, a LineString is 2 dimensional but the dimension can be changed when\n* using the `.setData(...)` method or before any point addition with `.setNod()`.\n* To close a LineString, use `.setMetadata(\"closed\", true);`, this will not add\n* any point but will flag this LineString as \"closed\".\n*\n*/\nclass LineString extends PixpipeContainer {\n \n constructor() {\n super();\n this.setMetadata(\"closed\", false);\n this.setMetadata(\"defaultNod\", 2);\n this.setMetadata(\"nod\", 2);\n \n this._type = LineString.TYPE();\n \n // local record for saving the last point\n this._lastPoint = null;\n }\n \n \n /**\n * Hardcode the datatype\n */\n static TYPE(){\n return \"LINESTRING\";\n }\n \n \n /**\n * Set/replace the point data.\n * @param {points} points - 1D array containing coord [x, y, x, y, x, y, ...]\n * @param {Number} nod - Number of Dimensions, default = 2\n * @param {Boolean} deepCopy - pointer copy if false, deep copy if true.\n */\n setData(points, nod=-1, deepCopy=false){\n if( nod != -1){\n this.setMetadata(\"nod\", nod);\n }\n \n if(points.length % this.getMetadata(\"nod\") != 0 ){\n console.warn(\"The number of points is not compatible with the number of dimensions (nod).\");\n return;\n }\n \n if(deepCopy){\n this._data = new points.constructor( points );\n }else{\n this._data = points;\n }\n \n this._setLastPoint();\n }\n \n \n /**\n * Define the number of dimensions. This can be done only when this LineString\n * is still empty.\n * @param {Number} nod - Number of dimensions\n */\n setNod( nod ){\n if(!this._data || !this._data.length){\n console.warn(\"The number of dimension can be set only when this LineString is empty.\");\n return;\n }\n \n this.setMetadata(\"nod\", nod);\n }\n \n \n /**\n * Get the internal image data (pointer)\n * @return {Array} the original data, dont mess up with this one.\n * in case of doubt, use getDataCopy()\n */\n getData(){\n return this._data; // return the actual array, editable!\n }\n\n\n /**\n * Get a copy of the data\n * @return {Array} a deep copy of the data\n */\n getDataCopy(){\n return new this._data.constructor( this._data );\n }\n \n \n /**\n * Get the number of points in this linestring\n * @return {Number} nb of points\n */\n getNumberOfPoints(){\n if(!this._data){\n return 0;\n }\n \n return this._data.length / this.getMetadata(\"nod\");\n }\n \n \n /**\n * Get a point of this LineString\n * @return {Array} a point, being [x, y] if 2D or [x, y, z] if 3D\n */\n getPoint( index ){\n if(index >=0 && index < getNumberOfPoints){\n var nod = this._metadata.nod;\n return this._data.slice(index*nod, index*nod + nod);\n }else{\n console.warn(\"Index of point is out of range.\");\n return null;\n }\n }\n \n \n /**\n * Considere this LineString as closed, making it a polygon\n */\n close(){\n this.setMetadata(\"closed\", true);\n }\n \n \n /**\n * Considere this LineString as open\n */\n open(){\n this.setMetadata(\"closed\", false);\n }\n \n \n /**\n * Add a point at the end of the LineString. Keeps the polygon open.\n * @param {Array} position - [x, y] if 2D or [x, y, z] if 3D\n */\n addPoint( position ){\n if( position.length != this._metadata.nod ){\n console.warn(\"Cannot add the point becase it has a diferent number of dimensions.\");\n return;\n }\n \n if( !this._data ){\n this._data = new Array();\n }\n \n for(var i=0; i 1) {\n for (var i = 1; i < arguments.length; i++) {\n args[i - 1] = arguments[i];\n }\n }\n queue.push(new Item(fun, args));\n if (queue.length === 1 && !draining) {\n runTimeout(drainQueue);\n }\n}\n// v8 likes predictible objects\nfunction Item(fun, array) {\n this.fun = fun;\n this.array = array;\n}\nItem.prototype.run = function () {\n this.fun.apply(null, this.array);\n};\nexport var title = 'browser';\nexport var platform = 'browser';\nexport var browser = true;\nexport var env = {};\nexport var argv = [];\nexport var version = ''; // empty string to avoid regexp issues\nexport var versions = {};\nexport var release = {};\nexport var config = {};\n\nfunction noop() {}\n\nexport var on = noop;\nexport var addListener = noop;\nexport var once = noop;\nexport var off = noop;\nexport var removeListener = noop;\nexport var removeAllListeners = noop;\nexport var emit = noop;\n\nexport function binding(name) {\n throw new Error('process.binding is not supported');\n}\n\nexport function cwd () { return '/' }\nexport function chdir (dir) {\n throw new Error('process.chdir is not supported');\n};\nexport function umask() { return 0; }\n\n// from https://github.com/kumavis/browser-process-hrtime/blob/master/index.js\nvar performance = global.performance || {}\nvar performanceNow =\n performance.now ||\n performance.mozNow ||\n performance.msNow ||\n performance.oNow ||\n performance.webkitNow ||\n function(){ return (new Date()).getTime() }\n\n// generate timestamp or delta\n// see http://nodejs.org/api/process.html#process_process_hrtime\nexport function hrtime(previousTimestamp){\n var clocktime = performanceNow.call(performance)*1e-3\n var seconds = Math.floor(clocktime)\n var nanoseconds = Math.floor((clocktime%1)*1e9)\n if (previousTimestamp) {\n seconds = seconds - previousTimestamp[0]\n nanoseconds = nanoseconds - previousTimestamp[1]\n if (nanoseconds<0) {\n seconds--\n nanoseconds += 1e9\n }\n }\n return [seconds,nanoseconds]\n}\n\nvar startTime = new Date();\nexport function uptime() {\n var currentTime = new Date();\n var dif = currentTime - startTime;\n return dif / 1000;\n}\n\nexport default {\n nextTick: nextTick,\n title: title,\n browser: browser,\n env: env,\n argv: argv,\n version: version,\n versions: versions,\n on: on,\n addListener: addListener,\n once: once,\n off: off,\n removeListener: removeListener,\n removeAllListeners: removeAllListeners,\n emit: emit,\n binding: binding,\n cwd: cwd,\n chdir: chdir,\n umask: umask,\n hrtime: hrtime,\n platform: platform,\n release: release,\n config: config,\n uptime: uptime\n};\n","export default {};\n","\nvar lookup = []\nvar revLookup = []\nvar Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array\nvar inited = false;\nfunction init () {\n inited = true;\n var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n for (var i = 0, len = code.length; i < len; ++i) {\n lookup[i] = code[i]\n revLookup[code.charCodeAt(i)] = i\n }\n\n revLookup['-'.charCodeAt(0)] = 62\n revLookup['_'.charCodeAt(0)] = 63\n}\n\nexport function toByteArray (b64) {\n if (!inited) {\n init();\n }\n var i, j, l, tmp, placeHolders, arr\n var len = b64.length\n\n if (len % 4 > 0) {\n throw new Error('Invalid string. Length must be a multiple of 4')\n }\n\n // the number of equal signs (place holders)\n // if there are two placeholders, than the two characters before it\n // represent one byte\n // if there is only one, then the three characters before it represent 2 bytes\n // this is just a cheap hack to not do indexOf twice\n placeHolders = b64[len - 2] === '=' ? 2 : b64[len - 1] === '=' ? 1 : 0\n\n // base64 is 4/3 + up to two characters of the original data\n arr = new Arr(len * 3 / 4 - placeHolders)\n\n // if there are placeholders, only get up to the last complete 4 chars\n l = placeHolders > 0 ? len - 4 : len\n\n var L = 0\n\n for (i = 0, j = 0; i < l; i += 4, j += 3) {\n tmp = (revLookup[b64.charCodeAt(i)] << 18) | (revLookup[b64.charCodeAt(i + 1)] << 12) | (revLookup[b64.charCodeAt(i + 2)] << 6) | revLookup[b64.charCodeAt(i + 3)]\n arr[L++] = (tmp >> 16) & 0xFF\n arr[L++] = (tmp >> 8) & 0xFF\n arr[L++] = tmp & 0xFF\n }\n\n if (placeHolders === 2) {\n tmp = (revLookup[b64.charCodeAt(i)] << 2) | (revLookup[b64.charCodeAt(i + 1)] >> 4)\n arr[L++] = tmp & 0xFF\n } else if (placeHolders === 1) {\n tmp = (revLookup[b64.charCodeAt(i)] << 10) | (revLookup[b64.charCodeAt(i + 1)] << 4) | (revLookup[b64.charCodeAt(i + 2)] >> 2)\n arr[L++] = (tmp >> 8) & 0xFF\n arr[L++] = tmp & 0xFF\n }\n\n return arr\n}\n\nfunction tripletToBase64 (num) {\n return lookup[num >> 18 & 0x3F] + lookup[num >> 12 & 0x3F] + lookup[num >> 6 & 0x3F] + lookup[num & 0x3F]\n}\n\nfunction encodeChunk (uint8, start, end) {\n var tmp\n var output = []\n for (var i = start; i < end; i += 3) {\n tmp = (uint8[i] << 16) + (uint8[i + 1] << 8) + (uint8[i + 2])\n output.push(tripletToBase64(tmp))\n }\n return output.join('')\n}\n\nexport function fromByteArray (uint8) {\n if (!inited) {\n init();\n }\n var tmp\n var len = uint8.length\n var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes\n var output = ''\n var parts = []\n var maxChunkLength = 16383 // must be multiple of 3\n\n // go through the array every three bytes, we'll deal with trailing stuff later\n for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {\n parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)))\n }\n\n // pad the end with zeros, but make sure to not forget the extra bytes\n if (extraBytes === 1) {\n tmp = uint8[len - 1]\n output += lookup[tmp >> 2]\n output += lookup[(tmp << 4) & 0x3F]\n output += '=='\n } else if (extraBytes === 2) {\n tmp = (uint8[len - 2] << 8) + (uint8[len - 1])\n output += lookup[tmp >> 10]\n output += lookup[(tmp >> 4) & 0x3F]\n output += lookup[(tmp << 2) & 0x3F]\n output += '='\n }\n\n parts.push(output)\n\n return parts.join('')\n}\n","\nexport function read (buffer, offset, isLE, mLen, nBytes) {\n var e, m\n var eLen = nBytes * 8 - mLen - 1\n var eMax = (1 << eLen) - 1\n var eBias = eMax >> 1\n var nBits = -7\n var i = isLE ? (nBytes - 1) : 0\n var d = isLE ? -1 : 1\n var s = buffer[offset + i]\n\n i += d\n\n e = s & ((1 << (-nBits)) - 1)\n s >>= (-nBits)\n nBits += eLen\n for (; nBits > 0; e = e * 256 + buffer[offset + i], i += d, nBits -= 8) {}\n\n m = e & ((1 << (-nBits)) - 1)\n e >>= (-nBits)\n nBits += mLen\n for (; nBits > 0; m = m * 256 + buffer[offset + i], i += d, nBits -= 8) {}\n\n if (e === 0) {\n e = 1 - eBias\n } else if (e === eMax) {\n return m ? NaN : ((s ? -1 : 1) * Infinity)\n } else {\n m = m + Math.pow(2, mLen)\n e = e - eBias\n }\n return (s ? -1 : 1) * m * Math.pow(2, e - mLen)\n}\n\nexport function write (buffer, value, offset, isLE, mLen, nBytes) {\n var e, m, c\n var eLen = nBytes * 8 - mLen - 1\n var eMax = (1 << eLen) - 1\n var eBias = eMax >> 1\n var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0)\n var i = isLE ? 0 : (nBytes - 1)\n var d = isLE ? 1 : -1\n var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0\n\n value = Math.abs(value)\n\n if (isNaN(value) || value === Infinity) {\n m = isNaN(value) ? 1 : 0\n e = eMax\n } else {\n e = Math.floor(Math.log(value) / Math.LN2)\n if (value * (c = Math.pow(2, -e)) < 1) {\n e--\n c *= 2\n }\n if (e + eBias >= 1) {\n value += rt / c\n } else {\n value += rt * Math.pow(2, 1 - eBias)\n }\n if (value * c >= 2) {\n e++\n c /= 2\n }\n\n if (e + eBias >= eMax) {\n m = 0\n e = eMax\n } else if (e + eBias >= 1) {\n m = (value * c - 1) * Math.pow(2, mLen)\n e = e + eBias\n } else {\n m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen)\n e = 0\n }\n }\n\n for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}\n\n e = (e << mLen) | m\n eLen += mLen\n for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}\n\n buffer[offset + i - d] |= s * 128\n}\n","var toString = {}.toString;\n\nexport default Array.isArray || function (arr) {\n return toString.call(arr) == '[object Array]';\n};\n","/*!\n * The buffer module from node.js, for the browser.\n *\n * @author Feross Aboukhadijeh \n * @license MIT\n */\n/* eslint-disable no-proto */\n\n\nimport * as base64 from './base64'\nimport * as ieee754 from './ieee754'\nimport isArray from './isArray'\n\nexport var INSPECT_MAX_BYTES = 50\n\n/**\n * If `Buffer.TYPED_ARRAY_SUPPORT`:\n * === true Use Uint8Array implementation (fastest)\n * === false Use Object implementation (most compatible, even IE6)\n *\n * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,\n * Opera 11.6+, iOS 4.2+.\n *\n * Due to various browser bugs, sometimes the Object implementation will be used even\n * when the browser supports typed arrays.\n *\n * Note:\n *\n * - Firefox 4-29 lacks support for adding new properties to `Uint8Array` instances,\n * See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438.\n *\n * - Chrome 9-10 is missing the `TypedArray.prototype.subarray` function.\n *\n * - IE10 has a broken `TypedArray.prototype.subarray` function which returns arrays of\n * incorrect length in some situations.\n\n * We detect these buggy browsers and set `Buffer.TYPED_ARRAY_SUPPORT` to `false` so they\n * get the Object implementation, which is slower but behaves correctly.\n */\nBuffer.TYPED_ARRAY_SUPPORT = global.TYPED_ARRAY_SUPPORT !== undefined\n ? global.TYPED_ARRAY_SUPPORT\n : true\n\n/*\n * Export kMaxLength after typed array support is determined.\n */\nvar _kMaxLength = kMaxLength()\nexport {_kMaxLength as kMaxLength};\nfunction typedArraySupport () {\n return true;\n // rollup issues\n // try {\n // var arr = new Uint8Array(1)\n // arr.__proto__ = {\n // __proto__: Uint8Array.prototype,\n // foo: function () { return 42 }\n // }\n // return arr.foo() === 42 && // typed array instances can be augmented\n // typeof arr.subarray === 'function' && // chrome 9-10 lack `subarray`\n // arr.subarray(1, 1).byteLength === 0 // ie10 has broken `subarray`\n // } catch (e) {\n // return false\n // }\n}\n\nfunction kMaxLength () {\n return Buffer.TYPED_ARRAY_SUPPORT\n ? 0x7fffffff\n : 0x3fffffff\n}\n\nfunction createBuffer (that, length) {\n if (kMaxLength() < length) {\n throw new RangeError('Invalid typed array length')\n }\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n // Return an augmented `Uint8Array` instance, for best performance\n that = new Uint8Array(length)\n that.__proto__ = Buffer.prototype\n } else {\n // Fallback: Return an object instance of the Buffer class\n if (that === null) {\n that = new Buffer(length)\n }\n that.length = length\n }\n\n return that\n}\n\n/**\n * The Buffer constructor returns instances of `Uint8Array` that have their\n * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of\n * `Uint8Array`, so the returned instances will have all the node `Buffer` methods\n * and the `Uint8Array` methods. Square bracket notation works as expected -- it\n * returns a single octet.\n *\n * The `Uint8Array` prototype remains unmodified.\n */\n\nexport function Buffer (arg, encodingOrOffset, length) {\n if (!Buffer.TYPED_ARRAY_SUPPORT && !(this instanceof Buffer)) {\n return new Buffer(arg, encodingOrOffset, length)\n }\n\n // Common case.\n if (typeof arg === 'number') {\n if (typeof encodingOrOffset === 'string') {\n throw new Error(\n 'If encoding is specified then the first argument must be a string'\n )\n }\n return allocUnsafe(this, arg)\n }\n return from(this, arg, encodingOrOffset, length)\n}\n\nBuffer.poolSize = 8192 // not used by this implementation\n\n// TODO: Legacy, not needed anymore. Remove in next major version.\nBuffer._augment = function (arr) {\n arr.__proto__ = Buffer.prototype\n return arr\n}\n\nfunction from (that, value, encodingOrOffset, length) {\n if (typeof value === 'number') {\n throw new TypeError('\"value\" argument must not be a number')\n }\n\n if (typeof ArrayBuffer !== 'undefined' && value instanceof ArrayBuffer) {\n return fromArrayBuffer(that, value, encodingOrOffset, length)\n }\n\n if (typeof value === 'string') {\n return fromString(that, value, encodingOrOffset)\n }\n\n return fromObject(that, value)\n}\n\n/**\n * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError\n * if value is a number.\n * Buffer.from(str[, encoding])\n * Buffer.from(array)\n * Buffer.from(buffer)\n * Buffer.from(arrayBuffer[, byteOffset[, length]])\n **/\nBuffer.from = function (value, encodingOrOffset, length) {\n return from(null, value, encodingOrOffset, length)\n}\n\nif (Buffer.TYPED_ARRAY_SUPPORT) {\n Buffer.prototype.__proto__ = Uint8Array.prototype\n Buffer.__proto__ = Uint8Array\n if (typeof Symbol !== 'undefined' && Symbol.species &&\n Buffer[Symbol.species] === Buffer) {\n // Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97\n // Object.defineProperty(Buffer, Symbol.species, {\n // value: null,\n // configurable: true\n // })\n }\n}\n\nfunction assertSize (size) {\n if (typeof size !== 'number') {\n throw new TypeError('\"size\" argument must be a number')\n } else if (size < 0) {\n throw new RangeError('\"size\" argument must not be negative')\n }\n}\n\nfunction alloc (that, size, fill, encoding) {\n assertSize(size)\n if (size <= 0) {\n return createBuffer(that, size)\n }\n if (fill !== undefined) {\n // Only pay attention to encoding if it's a string. This\n // prevents accidentally sending in a number that would\n // be interpretted as a start offset.\n return typeof encoding === 'string'\n ? createBuffer(that, size).fill(fill, encoding)\n : createBuffer(that, size).fill(fill)\n }\n return createBuffer(that, size)\n}\n\n/**\n * Creates a new filled Buffer instance.\n * alloc(size[, fill[, encoding]])\n **/\nBuffer.alloc = function (size, fill, encoding) {\n return alloc(null, size, fill, encoding)\n}\n\nfunction allocUnsafe (that, size) {\n assertSize(size)\n that = createBuffer(that, size < 0 ? 0 : checked(size) | 0)\n if (!Buffer.TYPED_ARRAY_SUPPORT) {\n for (var i = 0; i < size; ++i) {\n that[i] = 0\n }\n }\n return that\n}\n\n/**\n * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.\n * */\nBuffer.allocUnsafe = function (size) {\n return allocUnsafe(null, size)\n}\n/**\n * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.\n */\nBuffer.allocUnsafeSlow = function (size) {\n return allocUnsafe(null, size)\n}\n\nfunction fromString (that, string, encoding) {\n if (typeof encoding !== 'string' || encoding === '') {\n encoding = 'utf8'\n }\n\n if (!Buffer.isEncoding(encoding)) {\n throw new TypeError('\"encoding\" must be a valid string encoding')\n }\n\n var length = byteLength(string, encoding) | 0\n that = createBuffer(that, length)\n\n var actual = that.write(string, encoding)\n\n if (actual !== length) {\n // Writing a hex string, for example, that contains invalid characters will\n // cause everything after the first invalid character to be ignored. (e.g.\n // 'abxxcd' will be treated as 'ab')\n that = that.slice(0, actual)\n }\n\n return that\n}\n\nfunction fromArrayLike (that, array) {\n var length = array.length < 0 ? 0 : checked(array.length) | 0\n that = createBuffer(that, length)\n for (var i = 0; i < length; i += 1) {\n that[i] = array[i] & 255\n }\n return that\n}\n\nfunction fromArrayBuffer (that, array, byteOffset, length) {\n array.byteLength // this throws if `array` is not a valid ArrayBuffer\n\n if (byteOffset < 0 || array.byteLength < byteOffset) {\n throw new RangeError('\\'offset\\' is out of bounds')\n }\n\n if (array.byteLength < byteOffset + (length || 0)) {\n throw new RangeError('\\'length\\' is out of bounds')\n }\n\n if (byteOffset === undefined && length === undefined) {\n array = new Uint8Array(array)\n } else if (length === undefined) {\n array = new Uint8Array(array, byteOffset)\n } else {\n array = new Uint8Array(array, byteOffset, length)\n }\n\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n // Return an augmented `Uint8Array` instance, for best performance\n that = array\n that.__proto__ = Buffer.prototype\n } else {\n // Fallback: Return an object instance of the Buffer class\n that = fromArrayLike(that, array)\n }\n return that\n}\n\nfunction fromObject (that, obj) {\n if (internalIsBuffer(obj)) {\n var len = checked(obj.length) | 0\n that = createBuffer(that, len)\n\n if (that.length === 0) {\n return that\n }\n\n obj.copy(that, 0, 0, len)\n return that\n }\n\n if (obj) {\n if ((typeof ArrayBuffer !== 'undefined' &&\n obj.buffer instanceof ArrayBuffer) || 'length' in obj) {\n if (typeof obj.length !== 'number' || isnan(obj.length)) {\n return createBuffer(that, 0)\n }\n return fromArrayLike(that, obj)\n }\n\n if (obj.type === 'Buffer' && isArray(obj.data)) {\n return fromArrayLike(that, obj.data)\n }\n }\n\n throw new TypeError('First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.')\n}\n\nfunction checked (length) {\n // Note: cannot use `length < kMaxLength()` here because that fails when\n // length is NaN (which is otherwise coerced to zero.)\n if (length >= kMaxLength()) {\n throw new RangeError('Attempt to allocate Buffer larger than maximum ' +\n 'size: 0x' + kMaxLength().toString(16) + ' bytes')\n }\n return length | 0\n}\n\nexport function SlowBuffer (length) {\n if (+length != length) { // eslint-disable-line eqeqeq\n length = 0\n }\n return Buffer.alloc(+length)\n}\nBuffer.isBuffer = isBuffer;\nfunction internalIsBuffer (b) {\n return !!(b != null && b._isBuffer)\n}\n\nBuffer.compare = function compare (a, b) {\n if (!internalIsBuffer(a) || !internalIsBuffer(b)) {\n throw new TypeError('Arguments must be Buffers')\n }\n\n if (a === b) return 0\n\n var x = a.length\n var y = b.length\n\n for (var i = 0, len = Math.min(x, y); i < len; ++i) {\n if (a[i] !== b[i]) {\n x = a[i]\n y = b[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\nBuffer.isEncoding = function isEncoding (encoding) {\n switch (String(encoding).toLowerCase()) {\n case 'hex':\n case 'utf8':\n case 'utf-8':\n case 'ascii':\n case 'latin1':\n case 'binary':\n case 'base64':\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return true\n default:\n return false\n }\n}\n\nBuffer.concat = function concat (list, length) {\n if (!isArray(list)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n\n if (list.length === 0) {\n return Buffer.alloc(0)\n }\n\n var i\n if (length === undefined) {\n length = 0\n for (i = 0; i < list.length; ++i) {\n length += list[i].length\n }\n }\n\n var buffer = Buffer.allocUnsafe(length)\n var pos = 0\n for (i = 0; i < list.length; ++i) {\n var buf = list[i]\n if (!internalIsBuffer(buf)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n buf.copy(buffer, pos)\n pos += buf.length\n }\n return buffer\n}\n\nfunction byteLength (string, encoding) {\n if (internalIsBuffer(string)) {\n return string.length\n }\n if (typeof ArrayBuffer !== 'undefined' && typeof ArrayBuffer.isView === 'function' &&\n (ArrayBuffer.isView(string) || string instanceof ArrayBuffer)) {\n return string.byteLength\n }\n if (typeof string !== 'string') {\n string = '' + string\n }\n\n var len = string.length\n if (len === 0) return 0\n\n // Use a for loop to avoid recursion\n var loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'ascii':\n case 'latin1':\n case 'binary':\n return len\n case 'utf8':\n case 'utf-8':\n case undefined:\n return utf8ToBytes(string).length\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return len * 2\n case 'hex':\n return len >>> 1\n case 'base64':\n return base64ToBytes(string).length\n default:\n if (loweredCase) return utf8ToBytes(string).length // assume utf8\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\nBuffer.byteLength = byteLength\n\nfunction slowToString (encoding, start, end) {\n var loweredCase = false\n\n // No need to verify that \"this.length <= MAX_UINT32\" since it's a read-only\n // property of a typed array.\n\n // This behaves neither like String nor Uint8Array in that we set start/end\n // to their upper/lower bounds if the value passed is out of range.\n // undefined is handled specially as per ECMA-262 6th Edition,\n // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.\n if (start === undefined || start < 0) {\n start = 0\n }\n // Return early if start > this.length. Done here to prevent potential uint32\n // coercion fail below.\n if (start > this.length) {\n return ''\n }\n\n if (end === undefined || end > this.length) {\n end = this.length\n }\n\n if (end <= 0) {\n return ''\n }\n\n // Force coersion to uint32. This will also coerce falsey/NaN values to 0.\n end >>>= 0\n start >>>= 0\n\n if (end <= start) {\n return ''\n }\n\n if (!encoding) encoding = 'utf8'\n\n while (true) {\n switch (encoding) {\n case 'hex':\n return hexSlice(this, start, end)\n\n case 'utf8':\n case 'utf-8':\n return utf8Slice(this, start, end)\n\n case 'ascii':\n return asciiSlice(this, start, end)\n\n case 'latin1':\n case 'binary':\n return latin1Slice(this, start, end)\n\n case 'base64':\n return base64Slice(this, start, end)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return utf16leSlice(this, start, end)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = (encoding + '').toLowerCase()\n loweredCase = true\n }\n }\n}\n\n// The property is used by `Buffer.isBuffer` and `is-buffer` (in Safari 5-7) to detect\n// Buffer instances.\nBuffer.prototype._isBuffer = true\n\nfunction swap (b, n, m) {\n var i = b[n]\n b[n] = b[m]\n b[m] = i\n}\n\nBuffer.prototype.swap16 = function swap16 () {\n var len = this.length\n if (len % 2 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 16-bits')\n }\n for (var i = 0; i < len; i += 2) {\n swap(this, i, i + 1)\n }\n return this\n}\n\nBuffer.prototype.swap32 = function swap32 () {\n var len = this.length\n if (len % 4 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 32-bits')\n }\n for (var i = 0; i < len; i += 4) {\n swap(this, i, i + 3)\n swap(this, i + 1, i + 2)\n }\n return this\n}\n\nBuffer.prototype.swap64 = function swap64 () {\n var len = this.length\n if (len % 8 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 64-bits')\n }\n for (var i = 0; i < len; i += 8) {\n swap(this, i, i + 7)\n swap(this, i + 1, i + 6)\n swap(this, i + 2, i + 5)\n swap(this, i + 3, i + 4)\n }\n return this\n}\n\nBuffer.prototype.toString = function toString () {\n var length = this.length | 0\n if (length === 0) return ''\n if (arguments.length === 0) return utf8Slice(this, 0, length)\n return slowToString.apply(this, arguments)\n}\n\nBuffer.prototype.equals = function equals (b) {\n if (!internalIsBuffer(b)) throw new TypeError('Argument must be a Buffer')\n if (this === b) return true\n return Buffer.compare(this, b) === 0\n}\n\nBuffer.prototype.inspect = function inspect () {\n var str = ''\n var max = INSPECT_MAX_BYTES\n if (this.length > 0) {\n str = this.toString('hex', 0, max).match(/.{2}/g).join(' ')\n if (this.length > max) str += ' ... '\n }\n return ''\n}\n\nBuffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {\n if (!internalIsBuffer(target)) {\n throw new TypeError('Argument must be a Buffer')\n }\n\n if (start === undefined) {\n start = 0\n }\n if (end === undefined) {\n end = target ? target.length : 0\n }\n if (thisStart === undefined) {\n thisStart = 0\n }\n if (thisEnd === undefined) {\n thisEnd = this.length\n }\n\n if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {\n throw new RangeError('out of range index')\n }\n\n if (thisStart >= thisEnd && start >= end) {\n return 0\n }\n if (thisStart >= thisEnd) {\n return -1\n }\n if (start >= end) {\n return 1\n }\n\n start >>>= 0\n end >>>= 0\n thisStart >>>= 0\n thisEnd >>>= 0\n\n if (this === target) return 0\n\n var x = thisEnd - thisStart\n var y = end - start\n var len = Math.min(x, y)\n\n var thisCopy = this.slice(thisStart, thisEnd)\n var targetCopy = target.slice(start, end)\n\n for (var i = 0; i < len; ++i) {\n if (thisCopy[i] !== targetCopy[i]) {\n x = thisCopy[i]\n y = targetCopy[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\n// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,\n// OR the last index of `val` in `buffer` at offset <= `byteOffset`.\n//\n// Arguments:\n// - buffer - a Buffer to search\n// - val - a string, Buffer, or number\n// - byteOffset - an index into `buffer`; will be clamped to an int32\n// - encoding - an optional encoding, relevant is val is a string\n// - dir - true for indexOf, false for lastIndexOf\nfunction bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {\n // Empty buffer means no match\n if (buffer.length === 0) return -1\n\n // Normalize byteOffset\n if (typeof byteOffset === 'string') {\n encoding = byteOffset\n byteOffset = 0\n } else if (byteOffset > 0x7fffffff) {\n byteOffset = 0x7fffffff\n } else if (byteOffset < -0x80000000) {\n byteOffset = -0x80000000\n }\n byteOffset = +byteOffset // Coerce to Number.\n if (isNaN(byteOffset)) {\n // byteOffset: it it's undefined, null, NaN, \"foo\", etc, search whole buffer\n byteOffset = dir ? 0 : (buffer.length - 1)\n }\n\n // Normalize byteOffset: negative offsets start from the end of the buffer\n if (byteOffset < 0) byteOffset = buffer.length + byteOffset\n if (byteOffset >= buffer.length) {\n if (dir) return -1\n else byteOffset = buffer.length - 1\n } else if (byteOffset < 0) {\n if (dir) byteOffset = 0\n else return -1\n }\n\n // Normalize val\n if (typeof val === 'string') {\n val = Buffer.from(val, encoding)\n }\n\n // Finally, search either indexOf (if dir is true) or lastIndexOf\n if (internalIsBuffer(val)) {\n // Special case: looking for empty string/buffer always fails\n if (val.length === 0) {\n return -1\n }\n return arrayIndexOf(buffer, val, byteOffset, encoding, dir)\n } else if (typeof val === 'number') {\n val = val & 0xFF // Search for a byte value [0-255]\n if (Buffer.TYPED_ARRAY_SUPPORT &&\n typeof Uint8Array.prototype.indexOf === 'function') {\n if (dir) {\n return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)\n } else {\n return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)\n }\n }\n return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)\n }\n\n throw new TypeError('val must be string, number or Buffer')\n}\n\nfunction arrayIndexOf (arr, val, byteOffset, encoding, dir) {\n var indexSize = 1\n var arrLength = arr.length\n var valLength = val.length\n\n if (encoding !== undefined) {\n encoding = String(encoding).toLowerCase()\n if (encoding === 'ucs2' || encoding === 'ucs-2' ||\n encoding === 'utf16le' || encoding === 'utf-16le') {\n if (arr.length < 2 || val.length < 2) {\n return -1\n }\n indexSize = 2\n arrLength /= 2\n valLength /= 2\n byteOffset /= 2\n }\n }\n\n function read (buf, i) {\n if (indexSize === 1) {\n return buf[i]\n } else {\n return buf.readUInt16BE(i * indexSize)\n }\n }\n\n var i\n if (dir) {\n var foundIndex = -1\n for (i = byteOffset; i < arrLength; i++) {\n if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {\n if (foundIndex === -1) foundIndex = i\n if (i - foundIndex + 1 === valLength) return foundIndex * indexSize\n } else {\n if (foundIndex !== -1) i -= i - foundIndex\n foundIndex = -1\n }\n }\n } else {\n if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength\n for (i = byteOffset; i >= 0; i--) {\n var found = true\n for (var j = 0; j < valLength; j++) {\n if (read(arr, i + j) !== read(val, j)) {\n found = false\n break\n }\n }\n if (found) return i\n }\n }\n\n return -1\n}\n\nBuffer.prototype.includes = function includes (val, byteOffset, encoding) {\n return this.indexOf(val, byteOffset, encoding) !== -1\n}\n\nBuffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, true)\n}\n\nBuffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, false)\n}\n\nfunction hexWrite (buf, string, offset, length) {\n offset = Number(offset) || 0\n var remaining = buf.length - offset\n if (!length) {\n length = remaining\n } else {\n length = Number(length)\n if (length > remaining) {\n length = remaining\n }\n }\n\n // must be an even number of digits\n var strLen = string.length\n if (strLen % 2 !== 0) throw new TypeError('Invalid hex string')\n\n if (length > strLen / 2) {\n length = strLen / 2\n }\n for (var i = 0; i < length; ++i) {\n var parsed = parseInt(string.substr(i * 2, 2), 16)\n if (isNaN(parsed)) return i\n buf[offset + i] = parsed\n }\n return i\n}\n\nfunction utf8Write (buf, string, offset, length) {\n return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nfunction asciiWrite (buf, string, offset, length) {\n return blitBuffer(asciiToBytes(string), buf, offset, length)\n}\n\nfunction latin1Write (buf, string, offset, length) {\n return asciiWrite(buf, string, offset, length)\n}\n\nfunction base64Write (buf, string, offset, length) {\n return blitBuffer(base64ToBytes(string), buf, offset, length)\n}\n\nfunction ucs2Write (buf, string, offset, length) {\n return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nBuffer.prototype.write = function write (string, offset, length, encoding) {\n // Buffer#write(string)\n if (offset === undefined) {\n encoding = 'utf8'\n length = this.length\n offset = 0\n // Buffer#write(string, encoding)\n } else if (length === undefined && typeof offset === 'string') {\n encoding = offset\n length = this.length\n offset = 0\n // Buffer#write(string, offset[, length][, encoding])\n } else if (isFinite(offset)) {\n offset = offset | 0\n if (isFinite(length)) {\n length = length | 0\n if (encoding === undefined) encoding = 'utf8'\n } else {\n encoding = length\n length = undefined\n }\n // legacy write(string, encoding, offset, length) - remove in v0.13\n } else {\n throw new Error(\n 'Buffer.write(string, encoding, offset[, length]) is no longer supported'\n )\n }\n\n var remaining = this.length - offset\n if (length === undefined || length > remaining) length = remaining\n\n if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {\n throw new RangeError('Attempt to write outside buffer bounds')\n }\n\n if (!encoding) encoding = 'utf8'\n\n var loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'hex':\n return hexWrite(this, string, offset, length)\n\n case 'utf8':\n case 'utf-8':\n return utf8Write(this, string, offset, length)\n\n case 'ascii':\n return asciiWrite(this, string, offset, length)\n\n case 'latin1':\n case 'binary':\n return latin1Write(this, string, offset, length)\n\n case 'base64':\n // Warning: maxLength not taken into account in base64Write\n return base64Write(this, string, offset, length)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return ucs2Write(this, string, offset, length)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\n\nBuffer.prototype.toJSON = function toJSON () {\n return {\n type: 'Buffer',\n data: Array.prototype.slice.call(this._arr || this, 0)\n }\n}\n\nfunction base64Slice (buf, start, end) {\n if (start === 0 && end === buf.length) {\n return base64.fromByteArray(buf)\n } else {\n return base64.fromByteArray(buf.slice(start, end))\n }\n}\n\nfunction utf8Slice (buf, start, end) {\n end = Math.min(buf.length, end)\n var res = []\n\n var i = start\n while (i < end) {\n var firstByte = buf[i]\n var codePoint = null\n var bytesPerSequence = (firstByte > 0xEF) ? 4\n : (firstByte > 0xDF) ? 3\n : (firstByte > 0xBF) ? 2\n : 1\n\n if (i + bytesPerSequence <= end) {\n var secondByte, thirdByte, fourthByte, tempCodePoint\n\n switch (bytesPerSequence) {\n case 1:\n if (firstByte < 0x80) {\n codePoint = firstByte\n }\n break\n case 2:\n secondByte = buf[i + 1]\n if ((secondByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)\n if (tempCodePoint > 0x7F) {\n codePoint = tempCodePoint\n }\n }\n break\n case 3:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)\n if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {\n codePoint = tempCodePoint\n }\n }\n break\n case 4:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n fourthByte = buf[i + 3]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)\n if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {\n codePoint = tempCodePoint\n }\n }\n }\n }\n\n if (codePoint === null) {\n // we did not generate a valid codePoint so insert a\n // replacement char (U+FFFD) and advance only 1 byte\n codePoint = 0xFFFD\n bytesPerSequence = 1\n } else if (codePoint > 0xFFFF) {\n // encode to utf16 (surrogate pair dance)\n codePoint -= 0x10000\n res.push(codePoint >>> 10 & 0x3FF | 0xD800)\n codePoint = 0xDC00 | codePoint & 0x3FF\n }\n\n res.push(codePoint)\n i += bytesPerSequence\n }\n\n return decodeCodePointsArray(res)\n}\n\n// Based on http://stackoverflow.com/a/22747272/680742, the browser with\n// the lowest limit is Chrome, with 0x10000 args.\n// We go 1 magnitude less, for safety\nvar MAX_ARGUMENTS_LENGTH = 0x1000\n\nfunction decodeCodePointsArray (codePoints) {\n var len = codePoints.length\n if (len <= MAX_ARGUMENTS_LENGTH) {\n return String.fromCharCode.apply(String, codePoints) // avoid extra slice()\n }\n\n // Decode in chunks to avoid \"call stack size exceeded\".\n var res = ''\n var i = 0\n while (i < len) {\n res += String.fromCharCode.apply(\n String,\n codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)\n )\n }\n return res\n}\n\nfunction asciiSlice (buf, start, end) {\n var ret = ''\n end = Math.min(buf.length, end)\n\n for (var i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i] & 0x7F)\n }\n return ret\n}\n\nfunction latin1Slice (buf, start, end) {\n var ret = ''\n end = Math.min(buf.length, end)\n\n for (var i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i])\n }\n return ret\n}\n\nfunction hexSlice (buf, start, end) {\n var len = buf.length\n\n if (!start || start < 0) start = 0\n if (!end || end < 0 || end > len) end = len\n\n var out = ''\n for (var i = start; i < end; ++i) {\n out += toHex(buf[i])\n }\n return out\n}\n\nfunction utf16leSlice (buf, start, end) {\n var bytes = buf.slice(start, end)\n var res = ''\n for (var i = 0; i < bytes.length; i += 2) {\n res += String.fromCharCode(bytes[i] + bytes[i + 1] * 256)\n }\n return res\n}\n\nBuffer.prototype.slice = function slice (start, end) {\n var len = this.length\n start = ~~start\n end = end === undefined ? len : ~~end\n\n if (start < 0) {\n start += len\n if (start < 0) start = 0\n } else if (start > len) {\n start = len\n }\n\n if (end < 0) {\n end += len\n if (end < 0) end = 0\n } else if (end > len) {\n end = len\n }\n\n if (end < start) end = start\n\n var newBuf\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n newBuf = this.subarray(start, end)\n newBuf.__proto__ = Buffer.prototype\n } else {\n var sliceLen = end - start\n newBuf = new Buffer(sliceLen, undefined)\n for (var i = 0; i < sliceLen; ++i) {\n newBuf[i] = this[i + start]\n }\n }\n\n return newBuf\n}\n\n/*\n * Need to make sure that buffer isn't trying to write out of bounds.\n */\nfunction checkOffset (offset, ext, length) {\n if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')\n if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')\n}\n\nBuffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var val = this[offset]\n var mul = 1\n var i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n checkOffset(offset, byteLength, this.length)\n }\n\n var val = this[offset + --byteLength]\n var mul = 1\n while (byteLength > 0 && (mul *= 0x100)) {\n val += this[offset + --byteLength] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 1, this.length)\n return this[offset]\n}\n\nBuffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n return this[offset] | (this[offset + 1] << 8)\n}\n\nBuffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n return (this[offset] << 8) | this[offset + 1]\n}\n\nBuffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return ((this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16)) +\n (this[offset + 3] * 0x1000000)\n}\n\nBuffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] * 0x1000000) +\n ((this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n this[offset + 3])\n}\n\nBuffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var val = this[offset]\n var mul = 1\n var i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var i = byteLength\n var mul = 1\n var val = this[offset + --i]\n while (i > 0 && (mul *= 0x100)) {\n val += this[offset + --i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readInt8 = function readInt8 (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 1, this.length)\n if (!(this[offset] & 0x80)) return (this[offset])\n return ((0xff - this[offset] + 1) * -1)\n}\n\nBuffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n var val = this[offset] | (this[offset + 1] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n var val = this[offset + 1] | (this[offset] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16) |\n (this[offset + 3] << 24)\n}\n\nBuffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] << 24) |\n (this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n (this[offset + 3])\n}\n\nBuffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, true, 23, 4)\n}\n\nBuffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, false, 23, 4)\n}\n\nBuffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, true, 52, 8)\n}\n\nBuffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, false, 52, 8)\n}\n\nfunction checkInt (buf, value, offset, ext, max, min) {\n if (!internalIsBuffer(buf)) throw new TypeError('\"buffer\" argument must be a Buffer instance')\n if (value > max || value < min) throw new RangeError('\"value\" argument is out of bounds')\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n}\n\nBuffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n var maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n var mul = 1\n var i = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n var maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n var i = byteLength - 1\n var mul = 1\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)\n if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nfunction objectWriteUInt16 (buf, value, offset, littleEndian) {\n if (value < 0) value = 0xffff + value + 1\n for (var i = 0, j = Math.min(buf.length - offset, 2); i < j; ++i) {\n buf[offset + i] = (value & (0xff << (8 * (littleEndian ? i : 1 - i)))) >>>\n (littleEndian ? i : 1 - i) * 8\n }\n}\n\nBuffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n } else {\n objectWriteUInt16(this, value, offset, true)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n } else {\n objectWriteUInt16(this, value, offset, false)\n }\n return offset + 2\n}\n\nfunction objectWriteUInt32 (buf, value, offset, littleEndian) {\n if (value < 0) value = 0xffffffff + value + 1\n for (var i = 0, j = Math.min(buf.length - offset, 4); i < j; ++i) {\n buf[offset + i] = (value >>> (littleEndian ? i : 3 - i) * 8) & 0xff\n }\n}\n\nBuffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset + 3] = (value >>> 24)\n this[offset + 2] = (value >>> 16)\n this[offset + 1] = (value >>> 8)\n this[offset] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, true)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, false)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) {\n var limit = Math.pow(2, 8 * byteLength - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n var i = 0\n var mul = 1\n var sub = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) {\n var limit = Math.pow(2, 8 * byteLength - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n var i = byteLength - 1\n var mul = 1\n var sub = 0\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)\n if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)\n if (value < 0) value = 0xff + value + 1\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nBuffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n } else {\n objectWriteUInt16(this, value, offset, true)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n } else {\n objectWriteUInt16(this, value, offset, false)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n this[offset + 2] = (value >>> 16)\n this[offset + 3] = (value >>> 24)\n } else {\n objectWriteUInt32(this, value, offset, true)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (value < 0) value = 0xffffffff + value + 1\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, false)\n }\n return offset + 4\n}\n\nfunction checkIEEE754 (buf, value, offset, ext, max, min) {\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n if (offset < 0) throw new RangeError('Index out of range')\n}\n\nfunction writeFloat (buf, value, offset, littleEndian, noAssert) {\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)\n }\n ieee754.write(buf, value, offset, littleEndian, 23, 4)\n return offset + 4\n}\n\nBuffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {\n return writeFloat(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {\n return writeFloat(this, value, offset, false, noAssert)\n}\n\nfunction writeDouble (buf, value, offset, littleEndian, noAssert) {\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)\n }\n ieee754.write(buf, value, offset, littleEndian, 52, 8)\n return offset + 8\n}\n\nBuffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {\n return writeDouble(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {\n return writeDouble(this, value, offset, false, noAssert)\n}\n\n// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)\nBuffer.prototype.copy = function copy (target, targetStart, start, end) {\n if (!start) start = 0\n if (!end && end !== 0) end = this.length\n if (targetStart >= target.length) targetStart = target.length\n if (!targetStart) targetStart = 0\n if (end > 0 && end < start) end = start\n\n // Copy 0 bytes; we're done\n if (end === start) return 0\n if (target.length === 0 || this.length === 0) return 0\n\n // Fatal error conditions\n if (targetStart < 0) {\n throw new RangeError('targetStart out of bounds')\n }\n if (start < 0 || start >= this.length) throw new RangeError('sourceStart out of bounds')\n if (end < 0) throw new RangeError('sourceEnd out of bounds')\n\n // Are we oob?\n if (end > this.length) end = this.length\n if (target.length - targetStart < end - start) {\n end = target.length - targetStart + start\n }\n\n var len = end - start\n var i\n\n if (this === target && start < targetStart && targetStart < end) {\n // descending copy from end\n for (i = len - 1; i >= 0; --i) {\n target[i + targetStart] = this[i + start]\n }\n } else if (len < 1000 || !Buffer.TYPED_ARRAY_SUPPORT) {\n // ascending copy from start\n for (i = 0; i < len; ++i) {\n target[i + targetStart] = this[i + start]\n }\n } else {\n Uint8Array.prototype.set.call(\n target,\n this.subarray(start, start + len),\n targetStart\n )\n }\n\n return len\n}\n\n// Usage:\n// buffer.fill(number[, offset[, end]])\n// buffer.fill(buffer[, offset[, end]])\n// buffer.fill(string[, offset[, end]][, encoding])\nBuffer.prototype.fill = function fill (val, start, end, encoding) {\n // Handle string cases:\n if (typeof val === 'string') {\n if (typeof start === 'string') {\n encoding = start\n start = 0\n end = this.length\n } else if (typeof end === 'string') {\n encoding = end\n end = this.length\n }\n if (val.length === 1) {\n var code = val.charCodeAt(0)\n if (code < 256) {\n val = code\n }\n }\n if (encoding !== undefined && typeof encoding !== 'string') {\n throw new TypeError('encoding must be a string')\n }\n if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {\n throw new TypeError('Unknown encoding: ' + encoding)\n }\n } else if (typeof val === 'number') {\n val = val & 255\n }\n\n // Invalid ranges are not set to a default, so can range check early.\n if (start < 0 || this.length < start || this.length < end) {\n throw new RangeError('Out of range index')\n }\n\n if (end <= start) {\n return this\n }\n\n start = start >>> 0\n end = end === undefined ? this.length : end >>> 0\n\n if (!val) val = 0\n\n var i\n if (typeof val === 'number') {\n for (i = start; i < end; ++i) {\n this[i] = val\n }\n } else {\n var bytes = internalIsBuffer(val)\n ? val\n : utf8ToBytes(new Buffer(val, encoding).toString())\n var len = bytes.length\n for (i = 0; i < end - start; ++i) {\n this[i + start] = bytes[i % len]\n }\n }\n\n return this\n}\n\n// HELPER FUNCTIONS\n// ================\n\nvar INVALID_BASE64_RE = /[^+\\/0-9A-Za-z-_]/g\n\nfunction base64clean (str) {\n // Node strips out invalid characters like \\n and \\t from the string, base64-js does not\n str = stringtrim(str).replace(INVALID_BASE64_RE, '')\n // Node converts strings with length < 2 to ''\n if (str.length < 2) return ''\n // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not\n while (str.length % 4 !== 0) {\n str = str + '='\n }\n return str\n}\n\nfunction stringtrim (str) {\n if (str.trim) return str.trim()\n return str.replace(/^\\s+|\\s+$/g, '')\n}\n\nfunction toHex (n) {\n if (n < 16) return '0' + n.toString(16)\n return n.toString(16)\n}\n\nfunction utf8ToBytes (string, units) {\n units = units || Infinity\n var codePoint\n var length = string.length\n var leadSurrogate = null\n var bytes = []\n\n for (var i = 0; i < length; ++i) {\n codePoint = string.charCodeAt(i)\n\n // is surrogate component\n if (codePoint > 0xD7FF && codePoint < 0xE000) {\n // last char was a lead\n if (!leadSurrogate) {\n // no lead yet\n if (codePoint > 0xDBFF) {\n // unexpected trail\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n } else if (i + 1 === length) {\n // unpaired lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n }\n\n // valid lead\n leadSurrogate = codePoint\n\n continue\n }\n\n // 2 leads in a row\n if (codePoint < 0xDC00) {\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n leadSurrogate = codePoint\n continue\n }\n\n // valid surrogate pair\n codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000\n } else if (leadSurrogate) {\n // valid bmp char, but last char was a lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n }\n\n leadSurrogate = null\n\n // encode utf8\n if (codePoint < 0x80) {\n if ((units -= 1) < 0) break\n bytes.push(codePoint)\n } else if (codePoint < 0x800) {\n if ((units -= 2) < 0) break\n bytes.push(\n codePoint >> 0x6 | 0xC0,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x10000) {\n if ((units -= 3) < 0) break\n bytes.push(\n codePoint >> 0xC | 0xE0,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x110000) {\n if ((units -= 4) < 0) break\n bytes.push(\n codePoint >> 0x12 | 0xF0,\n codePoint >> 0xC & 0x3F | 0x80,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else {\n throw new Error('Invalid code point')\n }\n }\n\n return bytes\n}\n\nfunction asciiToBytes (str) {\n var byteArray = []\n for (var i = 0; i < str.length; ++i) {\n // Node's code seems to be doing this and not & 0x7F..\n byteArray.push(str.charCodeAt(i) & 0xFF)\n }\n return byteArray\n}\n\nfunction utf16leToBytes (str, units) {\n var c, hi, lo\n var byteArray = []\n for (var i = 0; i < str.length; ++i) {\n if ((units -= 2) < 0) break\n\n c = str.charCodeAt(i)\n hi = c >> 8\n lo = c % 256\n byteArray.push(lo)\n byteArray.push(hi)\n }\n\n return byteArray\n}\n\n\nfunction base64ToBytes (str) {\n return base64.toByteArray(base64clean(str))\n}\n\nfunction blitBuffer (src, dst, offset, length) {\n for (var i = 0; i < length; ++i) {\n if ((i + offset >= dst.length) || (i >= src.length)) break\n dst[i + offset] = src[i]\n }\n return i\n}\n\nfunction isnan (val) {\n return val !== val // eslint-disable-line no-self-compare\n}\n\n\n// the following is from is-buffer, also by Feross Aboukhadijeh and with same lisence\n// The _isBuffer check is for Safari 5-7 support, because it's missing\n// Object.prototype.constructor. Remove this eventually\nexport function isBuffer(obj) {\n return obj != null && (!!obj._isBuffer || isFastBuffer(obj) || isSlowBuffer(obj))\n}\n\nfunction isFastBuffer (obj) {\n return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj)\n}\n\n// For Node v0.10 support. Remove this eventually.\nfunction isSlowBuffer (obj) {\n return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isFastBuffer(obj.slice(0, 0))\n}\n","/**\r\n * [js-md5]{@link https://github.com/emn178/js-md5}\r\n *\r\n * @namespace md5\r\n * @version 0.4.2\r\n * @author Chen, Yi-Cyuan [emn178@gmail.com]\r\n * @copyright Chen, Yi-Cyuan 2014-2017\r\n * @license MIT\r\n */\r\n(function () {\r\n 'use strict';\r\n\r\n var root = typeof window === 'object' ? window : {};\r\n var NODE_JS = !root.JS_MD5_NO_NODE_JS && typeof process === 'object' && process.versions && process.versions.node;\r\n if (NODE_JS) {\r\n root = global;\r\n }\r\n var COMMON_JS = !root.JS_MD5_NO_COMMON_JS && typeof module === 'object' && module.exports;\r\n var AMD = typeof define === 'function' && define.amd;\r\n var ARRAY_BUFFER = !root.JS_MD5_NO_ARRAY_BUFFER && typeof ArrayBuffer !== 'undefined';\r\n var HEX_CHARS = '0123456789abcdef'.split('');\r\n var EXTRA = [128, 32768, 8388608, -2147483648];\r\n var SHIFT = [0, 8, 16, 24];\r\n var OUTPUT_TYPES = ['hex', 'array', 'digest', 'buffer', 'arrayBuffer'];\r\n\r\n var blocks = [], buffer8;\r\n if (ARRAY_BUFFER) {\r\n var buffer = new ArrayBuffer(68);\r\n buffer8 = new Uint8Array(buffer);\r\n blocks = new Uint32Array(buffer);\r\n }\r\n\r\n /**\r\n * @method hex\r\n * @memberof md5\r\n * @description Output hash as hex string\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {String} Hex string\r\n * @example\r\n * md5.hex('The quick brown fox jumps over the lazy dog');\r\n * // equal to\r\n * md5('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method digest\r\n * @memberof md5\r\n * @description Output hash as bytes array\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Array} Bytes array\r\n * @example\r\n * md5.digest('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method array\r\n * @memberof md5\r\n * @description Output hash as bytes array\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Array} Bytes array\r\n * @example\r\n * md5.array('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method arrayBuffer\r\n * @memberof md5\r\n * @description Output hash as ArrayBuffer\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @example\r\n * md5.arrayBuffer('The quick brown fox jumps over the lazy dog');\r\n */\r\n /**\r\n * @method buffer\r\n * @deprecated This maybe confuse with Buffer in node.js. Please use arrayBuffer instead.\r\n * @memberof md5\r\n * @description Output hash as ArrayBuffer\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @example\r\n * md5.buffer('The quick brown fox jumps over the lazy dog');\r\n */\r\n var createOutputMethod = function (outputType) {\r\n return function (message) {\r\n return new Md5(true).update(message)[outputType]();\r\n };\r\n };\r\n\r\n /**\r\n * @method create\r\n * @memberof md5\r\n * @description Create Md5 object\r\n * @returns {Md5} Md5 object.\r\n * @example\r\n * var hash = md5.create();\r\n */\r\n /**\r\n * @method update\r\n * @memberof md5\r\n * @description Create and update Md5 object\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Md5} Md5 object.\r\n * @example\r\n * var hash = md5.update('The quick brown fox jumps over the lazy dog');\r\n * // equal to\r\n * var hash = md5.create();\r\n * hash.update('The quick brown fox jumps over the lazy dog');\r\n */\r\n var createMethod = function () {\r\n var method = createOutputMethod('hex');\r\n if (NODE_JS) {\r\n method = nodeWrap(method);\r\n }\r\n method.create = function () {\r\n return new Md5();\r\n };\r\n method.update = function (message) {\r\n return method.create().update(message);\r\n };\r\n for (var i = 0; i < OUTPUT_TYPES.length; ++i) {\r\n var type = OUTPUT_TYPES[i];\r\n method[type] = createOutputMethod(type);\r\n }\r\n return method;\r\n };\r\n\r\n var nodeWrap = function (method) {\r\n var crypto = require('crypto');\r\n var Buffer = require('buffer').Buffer;\r\n var nodeMethod = function (message) {\r\n if (typeof message === 'string') {\r\n return crypto.createHash('md5').update(message, 'utf8').digest('hex');\r\n } else if (message.constructor === ArrayBuffer) {\r\n message = new Uint8Array(message);\r\n } else if (message.length === undefined) {\r\n return method(message);\r\n }\r\n return crypto.createHash('md5').update(new Buffer(message)).digest('hex');\r\n };\r\n return nodeMethod;\r\n };\r\n\r\n /**\r\n * Md5 class\r\n * @class Md5\r\n * @description This is internal class.\r\n * @see {@link md5.create}\r\n */\r\n function Md5(sharedMemory) {\r\n if (sharedMemory) {\r\n blocks[0] = blocks[16] = blocks[1] = blocks[2] = blocks[3] =\r\n blocks[4] = blocks[5] = blocks[6] = blocks[7] =\r\n blocks[8] = blocks[9] = blocks[10] = blocks[11] =\r\n blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0;\r\n this.blocks = blocks;\r\n this.buffer8 = buffer8;\r\n } else {\r\n if (ARRAY_BUFFER) {\r\n var buffer = new ArrayBuffer(68);\r\n this.buffer8 = new Uint8Array(buffer);\r\n this.blocks = new Uint32Array(buffer);\r\n } else {\r\n this.blocks = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];\r\n }\r\n }\r\n this.h0 = this.h1 = this.h2 = this.h3 = this.start = this.bytes = 0;\r\n this.finalized = this.hashed = false;\r\n this.first = true;\r\n }\r\n\r\n /**\r\n * @method update\r\n * @memberof Md5\r\n * @instance\r\n * @description Update hash\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {Md5} Md5 object.\r\n * @see {@link md5.update}\r\n */\r\n Md5.prototype.update = function (message) {\r\n if (this.finalized) {\r\n return;\r\n }\r\n var notString = typeof(message) != 'string';\r\n if (notString && message.constructor == root.ArrayBuffer) {\r\n message = new Uint8Array(message);\r\n }\r\n var code, index = 0, i, length = message.length || 0, blocks = this.blocks;\r\n var buffer8 = this.buffer8;\r\n\r\n while (index < length) {\r\n if (this.hashed) {\r\n this.hashed = false;\r\n blocks[0] = blocks[16];\r\n blocks[16] = blocks[1] = blocks[2] = blocks[3] =\r\n blocks[4] = blocks[5] = blocks[6] = blocks[7] =\r\n blocks[8] = blocks[9] = blocks[10] = blocks[11] =\r\n blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0;\r\n }\r\n\r\n if (notString) {\r\n if (ARRAY_BUFFER) {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n buffer8[i++] = message[index];\r\n }\r\n } else {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n blocks[i >> 2] |= message[index] << SHIFT[i++ & 3];\r\n }\r\n }\r\n } else {\r\n if (ARRAY_BUFFER) {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n code = message.charCodeAt(index);\r\n if (code < 0x80) {\r\n buffer8[i++] = code;\r\n } else if (code < 0x800) {\r\n buffer8[i++] = 0xc0 | (code >> 6);\r\n buffer8[i++] = 0x80 | (code & 0x3f);\r\n } else if (code < 0xd800 || code >= 0xe000) {\r\n buffer8[i++] = 0xe0 | (code >> 12);\r\n buffer8[i++] = 0x80 | ((code >> 6) & 0x3f);\r\n buffer8[i++] = 0x80 | (code & 0x3f);\r\n } else {\r\n code = 0x10000 + (((code & 0x3ff) << 10) | (message.charCodeAt(++index) & 0x3ff));\r\n buffer8[i++] = 0xf0 | (code >> 18);\r\n buffer8[i++] = 0x80 | ((code >> 12) & 0x3f);\r\n buffer8[i++] = 0x80 | ((code >> 6) & 0x3f);\r\n buffer8[i++] = 0x80 | (code & 0x3f);\r\n }\r\n }\r\n } else {\r\n for (i = this.start; index < length && i < 64; ++index) {\r\n code = message.charCodeAt(index);\r\n if (code < 0x80) {\r\n blocks[i >> 2] |= code << SHIFT[i++ & 3];\r\n } else if (code < 0x800) {\r\n blocks[i >> 2] |= (0xc0 | (code >> 6)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3];\r\n } else if (code < 0xd800 || code >= 0xe000) {\r\n blocks[i >> 2] |= (0xe0 | (code >> 12)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | ((code >> 6) & 0x3f)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3];\r\n } else {\r\n code = 0x10000 + (((code & 0x3ff) << 10) | (message.charCodeAt(++index) & 0x3ff));\r\n blocks[i >> 2] |= (0xf0 | (code >> 18)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | ((code >> 12) & 0x3f)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | ((code >> 6) & 0x3f)) << SHIFT[i++ & 3];\r\n blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3];\r\n }\r\n }\r\n }\r\n }\r\n this.lastByteIndex = i;\r\n this.bytes += i - this.start;\r\n if (i >= 64) {\r\n this.start = i - 64;\r\n this.hash();\r\n this.hashed = true;\r\n } else {\r\n this.start = i;\r\n }\r\n }\r\n return this;\r\n };\r\n\r\n Md5.prototype.finalize = function () {\r\n if (this.finalized) {\r\n return;\r\n }\r\n this.finalized = true;\r\n var blocks = this.blocks, i = this.lastByteIndex;\r\n blocks[i >> 2] |= EXTRA[i & 3];\r\n if (i >= 56) {\r\n if (!this.hashed) {\r\n this.hash();\r\n }\r\n blocks[0] = blocks[16];\r\n blocks[16] = blocks[1] = blocks[2] = blocks[3] =\r\n blocks[4] = blocks[5] = blocks[6] = blocks[7] =\r\n blocks[8] = blocks[9] = blocks[10] = blocks[11] =\r\n blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0;\r\n }\r\n blocks[14] = this.bytes << 3;\r\n this.hash();\r\n };\r\n\r\n Md5.prototype.hash = function () {\r\n var a, b, c, d, bc, da, blocks = this.blocks;\r\n\r\n if (this.first) {\r\n a = blocks[0] - 680876937;\r\n a = (a << 7 | a >>> 25) - 271733879 << 0;\r\n d = (-1732584194 ^ a & 2004318071) + blocks[1] - 117830708;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c = (-271733879 ^ (d & (a ^ -271733879))) + blocks[2] - 1126478375;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b = (a ^ (c & (d ^ a))) + blocks[3] - 1316259209;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n } else {\r\n a = this.h0;\r\n b = this.h1;\r\n c = this.h2;\r\n d = this.h3;\r\n a += (d ^ (b & (c ^ d))) + blocks[0] - 680876936;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[1] - 389564586;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[2] + 606105819;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[3] - 1044525330;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n }\r\n\r\n a += (d ^ (b & (c ^ d))) + blocks[4] - 176418897;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[5] + 1200080426;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[6] - 1473231341;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[7] - 45705983;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n a += (d ^ (b & (c ^ d))) + blocks[8] + 1770035416;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[9] - 1958414417;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[10] - 42063;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[11] - 1990404162;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n a += (d ^ (b & (c ^ d))) + blocks[12] + 1804603682;\r\n a = (a << 7 | a >>> 25) + b << 0;\r\n d += (c ^ (a & (b ^ c))) + blocks[13] - 40341101;\r\n d = (d << 12 | d >>> 20) + a << 0;\r\n c += (b ^ (d & (a ^ b))) + blocks[14] - 1502002290;\r\n c = (c << 17 | c >>> 15) + d << 0;\r\n b += (a ^ (c & (d ^ a))) + blocks[15] + 1236535329;\r\n b = (b << 22 | b >>> 10) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[1] - 165796510;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[6] - 1069501632;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[11] + 643717713;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[0] - 373897302;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[5] - 701558691;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[10] + 38016083;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[15] - 660478335;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[4] - 405537848;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[9] + 568446438;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[14] - 1019803690;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[3] - 187363961;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[8] + 1163531501;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n a += (c ^ (d & (b ^ c))) + blocks[13] - 1444681467;\r\n a = (a << 5 | a >>> 27) + b << 0;\r\n d += (b ^ (c & (a ^ b))) + blocks[2] - 51403784;\r\n d = (d << 9 | d >>> 23) + a << 0;\r\n c += (a ^ (b & (d ^ a))) + blocks[7] + 1735328473;\r\n c = (c << 14 | c >>> 18) + d << 0;\r\n b += (d ^ (a & (c ^ d))) + blocks[12] - 1926607734;\r\n b = (b << 20 | b >>> 12) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[5] - 378558;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[8] - 2022574463;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[11] + 1839030562;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[14] - 35309556;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[1] - 1530992060;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[4] + 1272893353;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[7] - 155497632;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[10] - 1094730640;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[13] + 681279174;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[0] - 358537222;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[3] - 722521979;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[6] + 76029189;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n bc = b ^ c;\r\n a += (bc ^ d) + blocks[9] - 640364487;\r\n a = (a << 4 | a >>> 28) + b << 0;\r\n d += (bc ^ a) + blocks[12] - 421815835;\r\n d = (d << 11 | d >>> 21) + a << 0;\r\n da = d ^ a;\r\n c += (da ^ b) + blocks[15] + 530742520;\r\n c = (c << 16 | c >>> 16) + d << 0;\r\n b += (da ^ c) + blocks[2] - 995338651;\r\n b = (b << 23 | b >>> 9) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[0] - 198630844;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[7] + 1126891415;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[14] - 1416354905;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[5] - 57434055;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[12] + 1700485571;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[3] - 1894986606;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[10] - 1051523;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[1] - 2054922799;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[8] + 1873313359;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[15] - 30611744;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[6] - 1560198380;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[13] + 1309151649;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n a += (c ^ (b | ~d)) + blocks[4] - 145523070;\r\n a = (a << 6 | a >>> 26) + b << 0;\r\n d += (b ^ (a | ~c)) + blocks[11] - 1120210379;\r\n d = (d << 10 | d >>> 22) + a << 0;\r\n c += (a ^ (d | ~b)) + blocks[2] + 718787259;\r\n c = (c << 15 | c >>> 17) + d << 0;\r\n b += (d ^ (c | ~a)) + blocks[9] - 343485551;\r\n b = (b << 21 | b >>> 11) + c << 0;\r\n\r\n if (this.first) {\r\n this.h0 = a + 1732584193 << 0;\r\n this.h1 = b - 271733879 << 0;\r\n this.h2 = c - 1732584194 << 0;\r\n this.h3 = d + 271733878 << 0;\r\n this.first = false;\r\n } else {\r\n this.h0 = this.h0 + a << 0;\r\n this.h1 = this.h1 + b << 0;\r\n this.h2 = this.h2 + c << 0;\r\n this.h3 = this.h3 + d << 0;\r\n }\r\n };\r\n\r\n /**\r\n * @method hex\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as hex string\r\n * @returns {String} Hex string\r\n * @see {@link md5.hex}\r\n * @example\r\n * hash.hex();\r\n */\r\n Md5.prototype.hex = function () {\r\n this.finalize();\r\n\r\n var h0 = this.h0, h1 = this.h1, h2 = this.h2, h3 = this.h3;\r\n\r\n return HEX_CHARS[(h0 >> 4) & 0x0F] + HEX_CHARS[h0 & 0x0F] +\r\n HEX_CHARS[(h0 >> 12) & 0x0F] + HEX_CHARS[(h0 >> 8) & 0x0F] +\r\n HEX_CHARS[(h0 >> 20) & 0x0F] + HEX_CHARS[(h0 >> 16) & 0x0F] +\r\n HEX_CHARS[(h0 >> 28) & 0x0F] + HEX_CHARS[(h0 >> 24) & 0x0F] +\r\n HEX_CHARS[(h1 >> 4) & 0x0F] + HEX_CHARS[h1 & 0x0F] +\r\n HEX_CHARS[(h1 >> 12) & 0x0F] + HEX_CHARS[(h1 >> 8) & 0x0F] +\r\n HEX_CHARS[(h1 >> 20) & 0x0F] + HEX_CHARS[(h1 >> 16) & 0x0F] +\r\n HEX_CHARS[(h1 >> 28) & 0x0F] + HEX_CHARS[(h1 >> 24) & 0x0F] +\r\n HEX_CHARS[(h2 >> 4) & 0x0F] + HEX_CHARS[h2 & 0x0F] +\r\n HEX_CHARS[(h2 >> 12) & 0x0F] + HEX_CHARS[(h2 >> 8) & 0x0F] +\r\n HEX_CHARS[(h2 >> 20) & 0x0F] + HEX_CHARS[(h2 >> 16) & 0x0F] +\r\n HEX_CHARS[(h2 >> 28) & 0x0F] + HEX_CHARS[(h2 >> 24) & 0x0F] +\r\n HEX_CHARS[(h3 >> 4) & 0x0F] + HEX_CHARS[h3 & 0x0F] +\r\n HEX_CHARS[(h3 >> 12) & 0x0F] + HEX_CHARS[(h3 >> 8) & 0x0F] +\r\n HEX_CHARS[(h3 >> 20) & 0x0F] + HEX_CHARS[(h3 >> 16) & 0x0F] +\r\n HEX_CHARS[(h3 >> 28) & 0x0F] + HEX_CHARS[(h3 >> 24) & 0x0F];\r\n };\r\n\r\n /**\r\n * @method toString\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as hex string\r\n * @returns {String} Hex string\r\n * @see {@link md5.hex}\r\n * @example\r\n * hash.toString();\r\n */\r\n Md5.prototype.toString = Md5.prototype.hex;\r\n\r\n /**\r\n * @method digest\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as bytes array\r\n * @returns {Array} Bytes array\r\n * @see {@link md5.digest}\r\n * @example\r\n * hash.digest();\r\n */\r\n Md5.prototype.digest = function () {\r\n this.finalize();\r\n\r\n var h0 = this.h0, h1 = this.h1, h2 = this.h2, h3 = this.h3;\r\n return [\r\n h0 & 0xFF, (h0 >> 8) & 0xFF, (h0 >> 16) & 0xFF, (h0 >> 24) & 0xFF,\r\n h1 & 0xFF, (h1 >> 8) & 0xFF, (h1 >> 16) & 0xFF, (h1 >> 24) & 0xFF,\r\n h2 & 0xFF, (h2 >> 8) & 0xFF, (h2 >> 16) & 0xFF, (h2 >> 24) & 0xFF,\r\n h3 & 0xFF, (h3 >> 8) & 0xFF, (h3 >> 16) & 0xFF, (h3 >> 24) & 0xFF\r\n ];\r\n };\r\n\r\n /**\r\n * @method array\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as bytes array\r\n * @returns {Array} Bytes array\r\n * @see {@link md5.array}\r\n * @example\r\n * hash.array();\r\n */\r\n Md5.prototype.array = Md5.prototype.digest;\r\n\r\n /**\r\n * @method arrayBuffer\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as ArrayBuffer\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @see {@link md5.arrayBuffer}\r\n * @example\r\n * hash.arrayBuffer();\r\n */\r\n Md5.prototype.arrayBuffer = function () {\r\n this.finalize();\r\n\r\n var buffer = new ArrayBuffer(16);\r\n var blocks = new Uint32Array(buffer);\r\n blocks[0] = this.h0;\r\n blocks[1] = this.h1;\r\n blocks[2] = this.h2;\r\n blocks[3] = this.h3;\r\n return buffer;\r\n };\r\n\r\n /**\r\n * @method buffer\r\n * @deprecated This maybe confuse with Buffer in node.js. Please use arrayBuffer instead.\r\n * @memberof Md5\r\n * @instance\r\n * @description Output hash as ArrayBuffer\r\n * @returns {ArrayBuffer} ArrayBuffer\r\n * @see {@link md5.buffer}\r\n * @example\r\n * hash.buffer();\r\n */\r\n Md5.prototype.buffer = Md5.prototype.arrayBuffer;\r\n\r\n var exports = createMethod();\r\n\r\n if (COMMON_JS) {\r\n module.exports = exports;\r\n } else {\r\n /**\r\n * @method md5\b\r\n * @description Md5 hash function, export to global in browsers.\r\n * @param {String|Array|Uint8Array|ArrayBuffer} message message to hash\r\n * @returns {String} md5 hashes\r\n * @example\r\n * md5(''); // d41d8cd98f00b204e9800998ecf8427e\r\n * md5('The quick brown fox jumps over the lazy dog'); // 9e107d9d372bb6826bd81d3542a419d6\r\n * md5('The quick brown fox jumps over the lazy dog.'); // e4d909c290d0fb1ca068ffaddf22cbd0\r\n *\r\n * // It also supports UTF-8 encoding\r\n * md5('中文'); // a7bac2239fcdcb3a067903d8077c4a07\r\n *\r\n * // It also supports byte `Array`, `Uint8Array`, `ArrayBuffer`\r\n * md5([]); // d41d8cd98f00b204e9800998ecf8427e\r\n * md5(new Uint8Array([])); // d41d8cd98f00b204e9800998ecf8427e\r\n */\r\n root.md5 = exports;\r\n if (AMD) {\r\n define(function () {\r\n return exports;\r\n });\r\n }\r\n }\r\n})();\r\n","'use strict';\n\n\nvar TYPED_OK = (typeof Uint8Array !== 'undefined') &&\n (typeof Uint16Array !== 'undefined') &&\n (typeof Int32Array !== 'undefined');\n\n\nexports.assign = function (obj /*from1, from2, from3, ...*/) {\n var sources = Array.prototype.slice.call(arguments, 1);\n while (sources.length) {\n var source = sources.shift();\n if (!source) { continue; }\n\n if (typeof source !== 'object') {\n throw new TypeError(source + 'must be non-object');\n }\n\n for (var p in source) {\n if (source.hasOwnProperty(p)) {\n obj[p] = source[p];\n }\n }\n }\n\n return obj;\n};\n\n\n// reduce buffer size, avoiding mem copy\nexports.shrinkBuf = function (buf, size) {\n if (buf.length === size) { return buf; }\n if (buf.subarray) { return buf.subarray(0, size); }\n buf.length = size;\n return buf;\n};\n\n\nvar fnTyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n if (src.subarray && dest.subarray) {\n dest.set(src.subarray(src_offs, src_offs + len), dest_offs);\n return;\n }\n // Fallback to ordinary array\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n var i, l, len, pos, chunk, result;\n\n // calculate data length\n len = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n len += chunks[i].length;\n }\n\n // join chunks\n result = new Uint8Array(len);\n pos = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n chunk = chunks[i];\n result.set(chunk, pos);\n pos += chunk.length;\n }\n\n return result;\n }\n};\n\nvar fnUntyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n return [].concat.apply([], chunks);\n }\n};\n\n\n// Enable/Disable typed arrays use, for testing\n//\nexports.setTyped = function (on) {\n if (on) {\n exports.Buf8 = Uint8Array;\n exports.Buf16 = Uint16Array;\n exports.Buf32 = Int32Array;\n exports.assign(exports, fnTyped);\n } else {\n exports.Buf8 = Array;\n exports.Buf16 = Array;\n exports.Buf32 = Array;\n exports.assign(exports, fnUntyped);\n }\n};\n\nexports.setTyped(TYPED_OK);\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n//var Z_FILTERED = 1;\n//var Z_HUFFMAN_ONLY = 2;\n//var Z_RLE = 3;\nvar Z_FIXED = 4;\n//var Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\nvar Z_BINARY = 0;\nvar Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n/*============================================================================*/\n\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n// From zutil.h\n\nvar STORED_BLOCK = 0;\nvar STATIC_TREES = 1;\nvar DYN_TREES = 2;\n/* The three kinds of block type */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\n/* The minimum and maximum match lengths */\n\n// From deflate.h\n/* ===========================================================================\n * Internal compression state.\n */\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\n\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\n\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\n\nvar D_CODES = 30;\n/* number of distance codes */\n\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\n\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\n\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar Buf_size = 16;\n/* size of bit buffer in bi_buf */\n\n\n/* ===========================================================================\n * Constants\n */\n\nvar MAX_BL_BITS = 7;\n/* Bit length codes must not exceed MAX_BL_BITS bits */\n\nvar END_BLOCK = 256;\n/* end of block literal code */\n\nvar REP_3_6 = 16;\n/* repeat previous bit length 3-6 times (2 bits of repeat count) */\n\nvar REPZ_3_10 = 17;\n/* repeat a zero length 3-10 times (3 bits of repeat count) */\n\nvar REPZ_11_138 = 18;\n/* repeat a zero length 11-138 times (7 bits of repeat count) */\n\n/* eslint-disable comma-spacing,array-bracket-spacing */\nvar extra_lbits = /* extra bits for each length code */\n [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0];\n\nvar extra_dbits = /* extra bits for each distance code */\n [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13];\n\nvar extra_blbits = /* extra bits for each bit length code */\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7];\n\nvar bl_order =\n [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];\n/* eslint-enable comma-spacing,array-bracket-spacing */\n\n/* The lengths of the bit length codes are sent in order of decreasing\n * probability, to avoid transmitting the lengths for unused bit length codes.\n */\n\n/* ===========================================================================\n * Local data. These are initialized only once.\n */\n\n// We pre-fill arrays with 0 to avoid uninitialized gaps\n\nvar DIST_CODE_LEN = 512; /* see definition of array dist_code below */\n\n// !!!! Use flat array insdead of structure, Freq = i*2, Len = i*2+1\nvar static_ltree = new Array((L_CODES + 2) * 2);\nzero(static_ltree);\n/* The static literal tree. Since the bit lengths are imposed, there is no\n * need for the L_CODES extra codes used during heap construction. However\n * The codes 286 and 287 are needed to build a canonical tree (see _tr_init\n * below).\n */\n\nvar static_dtree = new Array(D_CODES * 2);\nzero(static_dtree);\n/* The static distance tree. (Actually a trivial tree since all codes use\n * 5 bits.)\n */\n\nvar _dist_code = new Array(DIST_CODE_LEN);\nzero(_dist_code);\n/* Distance codes. The first 256 values correspond to the distances\n * 3 .. 258, the last 256 values correspond to the top 8 bits of\n * the 15 bit distances.\n */\n\nvar _length_code = new Array(MAX_MATCH - MIN_MATCH + 1);\nzero(_length_code);\n/* length code for each normalized match length (0 == MIN_MATCH) */\n\nvar base_length = new Array(LENGTH_CODES);\nzero(base_length);\n/* First normalized length for each code (0 = MIN_MATCH) */\n\nvar base_dist = new Array(D_CODES);\nzero(base_dist);\n/* First normalized distance for each code (0 = distance of 1) */\n\n\nfunction StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {\n\n this.static_tree = static_tree; /* static tree or NULL */\n this.extra_bits = extra_bits; /* extra bits for each code or NULL */\n this.extra_base = extra_base; /* base index for extra_bits */\n this.elems = elems; /* max number of elements in the tree */\n this.max_length = max_length; /* max bit length for the codes */\n\n // show if `static_tree` has data or dummy - needed for monomorphic objects\n this.has_stree = static_tree && static_tree.length;\n}\n\n\nvar static_l_desc;\nvar static_d_desc;\nvar static_bl_desc;\n\n\nfunction TreeDesc(dyn_tree, stat_desc) {\n this.dyn_tree = dyn_tree; /* the dynamic tree */\n this.max_code = 0; /* largest code with non zero frequency */\n this.stat_desc = stat_desc; /* the corresponding static tree */\n}\n\n\n\nfunction d_code(dist) {\n return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];\n}\n\n\n/* ===========================================================================\n * Output a short LSB first on the stream.\n * IN assertion: there is enough room in pendingBuf.\n */\nfunction put_short(s, w) {\n// put_byte(s, (uch)((w) & 0xff));\n// put_byte(s, (uch)((ush)(w) >> 8));\n s.pending_buf[s.pending++] = (w) & 0xff;\n s.pending_buf[s.pending++] = (w >>> 8) & 0xff;\n}\n\n\n/* ===========================================================================\n * Send a value on a given number of bits.\n * IN assertion: length <= 16 and value fits in length bits.\n */\nfunction send_bits(s, value, length) {\n if (s.bi_valid > (Buf_size - length)) {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n put_short(s, s.bi_buf);\n s.bi_buf = value >> (Buf_size - s.bi_valid);\n s.bi_valid += length - Buf_size;\n } else {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n s.bi_valid += length;\n }\n}\n\n\nfunction send_code(s, c, tree) {\n send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);\n}\n\n\n/* ===========================================================================\n * Reverse the first len bits of a code, using straightforward code (a faster\n * method would use a table)\n * IN assertion: 1 <= len <= 15\n */\nfunction bi_reverse(code, len) {\n var res = 0;\n do {\n res |= code & 1;\n code >>>= 1;\n res <<= 1;\n } while (--len > 0);\n return res >>> 1;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer, keeping at most 7 bits in it.\n */\nfunction bi_flush(s) {\n if (s.bi_valid === 16) {\n put_short(s, s.bi_buf);\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n } else if (s.bi_valid >= 8) {\n s.pending_buf[s.pending++] = s.bi_buf & 0xff;\n s.bi_buf >>= 8;\n s.bi_valid -= 8;\n }\n}\n\n\n/* ===========================================================================\n * Compute the optimal bit lengths for a tree and update the total bit length\n * for the current block.\n * IN assertion: the fields freq and dad are set, heap[heap_max] and\n * above are the tree nodes sorted by increasing frequency.\n * OUT assertions: the field len is set to the optimal bit length, the\n * array bl_count contains the frequencies for each bit length.\n * The length opt_len is updated; static_len is also updated if stree is\n * not null.\n */\nfunction gen_bitlen(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var max_code = desc.max_code;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var extra = desc.stat_desc.extra_bits;\n var base = desc.stat_desc.extra_base;\n var max_length = desc.stat_desc.max_length;\n var h; /* heap index */\n var n, m; /* iterate over the tree elements */\n var bits; /* bit length */\n var xbits; /* extra bits */\n var f; /* frequency */\n var overflow = 0; /* number of elements with bit length too large */\n\n for (bits = 0; bits <= MAX_BITS; bits++) {\n s.bl_count[bits] = 0;\n }\n\n /* In a first pass, compute the optimal bit lengths (which may\n * overflow in the case of the bit length tree).\n */\n tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */\n\n for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {\n n = s.heap[h];\n bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;\n if (bits > max_length) {\n bits = max_length;\n overflow++;\n }\n tree[n * 2 + 1]/*.Len*/ = bits;\n /* We overwrite tree[n].Dad which is no longer needed */\n\n if (n > max_code) { continue; } /* not a leaf node */\n\n s.bl_count[bits]++;\n xbits = 0;\n if (n >= base) {\n xbits = extra[n - base];\n }\n f = tree[n * 2]/*.Freq*/;\n s.opt_len += f * (bits + xbits);\n if (has_stree) {\n s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);\n }\n }\n if (overflow === 0) { return; }\n\n // Trace((stderr,\"\\nbit length overflow\\n\"));\n /* This happens for example on obj2 and pic of the Calgary corpus */\n\n /* Find the first bit length which could increase: */\n do {\n bits = max_length - 1;\n while (s.bl_count[bits] === 0) { bits--; }\n s.bl_count[bits]--; /* move one leaf down the tree */\n s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */\n s.bl_count[max_length]--;\n /* The brother of the overflow item also moves one step up,\n * but this does not affect bl_count[max_length]\n */\n overflow -= 2;\n } while (overflow > 0);\n\n /* Now recompute all bit lengths, scanning in increasing frequency.\n * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all\n * lengths instead of fixing only the wrong ones. This idea is taken\n * from 'ar' written by Haruhiko Okumura.)\n */\n for (bits = max_length; bits !== 0; bits--) {\n n = s.bl_count[bits];\n while (n !== 0) {\n m = s.heap[--h];\n if (m > max_code) { continue; }\n if (tree[m * 2 + 1]/*.Len*/ !== bits) {\n // Trace((stderr,\"code %d bits %d->%d\\n\", m, tree[m].Len, bits));\n s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;\n tree[m * 2 + 1]/*.Len*/ = bits;\n }\n n--;\n }\n }\n}\n\n\n/* ===========================================================================\n * Generate the codes for a given tree and bit counts (which need not be\n * optimal).\n * IN assertion: the array bl_count contains the bit length statistics for\n * the given tree and the field len is set for all tree elements.\n * OUT assertion: the field code is set for all tree elements of non\n * zero code length.\n */\nfunction gen_codes(tree, max_code, bl_count)\n// ct_data *tree; /* the tree to decorate */\n// int max_code; /* largest code with non zero frequency */\n// ushf *bl_count; /* number of codes at each bit length */\n{\n var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */\n var code = 0; /* running code value */\n var bits; /* bit index */\n var n; /* code index */\n\n /* The distribution counts are first used to generate the code values\n * without bit reversal.\n */\n for (bits = 1; bits <= MAX_BITS; bits++) {\n next_code[bits] = code = (code + bl_count[bits - 1]) << 1;\n }\n /* Check that the bit counts in bl_count are consistent. The last code\n * must be all ones.\n */\n //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */\n length = 0;\n for (code = 0; code < LENGTH_CODES - 1; code++) {\n base_length[code] = length;\n for (n = 0; n < (1 << extra_lbits[code]); n++) {\n _length_code[length++] = code;\n }\n }\n //Assert (length == 256, \"tr_static_init: length != 256\");\n /* Note that the length 255 (match length 258) can be represented\n * in two different ways: code 284 + 5 bits or code 285, so we\n * overwrite length_code[255] to use the best encoding:\n */\n _length_code[length - 1] = code;\n\n /* Initialize the mapping dist (0..32K) -> dist code (0..29) */\n dist = 0;\n for (code = 0; code < 16; code++) {\n base_dist[code] = dist;\n for (n = 0; n < (1 << extra_dbits[code]); n++) {\n _dist_code[dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: dist != 256\");\n dist >>= 7; /* from now on, all distances are divided by 128 */\n for (; code < D_CODES; code++) {\n base_dist[code] = dist << 7;\n for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {\n _dist_code[256 + dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: 256+dist != 512\");\n\n /* Construct the codes of the static literal tree */\n for (bits = 0; bits <= MAX_BITS; bits++) {\n bl_count[bits] = 0;\n }\n\n n = 0;\n while (n <= 143) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n while (n <= 255) {\n static_ltree[n * 2 + 1]/*.Len*/ = 9;\n n++;\n bl_count[9]++;\n }\n while (n <= 279) {\n static_ltree[n * 2 + 1]/*.Len*/ = 7;\n n++;\n bl_count[7]++;\n }\n while (n <= 287) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n /* Codes 286 and 287 do not exist, but we must include them in the\n * tree construction to get a canonical Huffman tree (longest code\n * all ones)\n */\n gen_codes(static_ltree, L_CODES + 1, bl_count);\n\n /* The static distance tree is trivial: */\n for (n = 0; n < D_CODES; n++) {\n static_dtree[n * 2 + 1]/*.Len*/ = 5;\n static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);\n }\n\n // Now data ready and we can init static trees\n static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);\n static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS);\n static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS);\n\n //static_init_done = true;\n}\n\n\n/* ===========================================================================\n * Initialize a new block.\n */\nfunction init_block(s) {\n var n; /* iterates over tree elements */\n\n /* Initialize the trees. */\n for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }\n\n s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;\n s.opt_len = s.static_len = 0;\n s.last_lit = s.matches = 0;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer and align the output on a byte boundary\n */\nfunction bi_windup(s)\n{\n if (s.bi_valid > 8) {\n put_short(s, s.bi_buf);\n } else if (s.bi_valid > 0) {\n //put_byte(s, (Byte)s->bi_buf);\n s.pending_buf[s.pending++] = s.bi_buf;\n }\n s.bi_buf = 0;\n s.bi_valid = 0;\n}\n\n/* ===========================================================================\n * Copy a stored block, storing first the length and its\n * one's complement if requested.\n */\nfunction copy_block(s, buf, len, header)\n//DeflateState *s;\n//charf *buf; /* the input data */\n//unsigned len; /* its length */\n//int header; /* true if block header must be written */\n{\n bi_windup(s); /* align on byte boundary */\n\n if (header) {\n put_short(s, len);\n put_short(s, ~len);\n }\n// while (len--) {\n// put_byte(s, *buf++);\n// }\n utils.arraySet(s.pending_buf, s.window, buf, len, s.pending);\n s.pending += len;\n}\n\n/* ===========================================================================\n * Compares to subtrees, using the tree depth as tie breaker when\n * the subtrees have equal frequency. This minimizes the worst case length.\n */\nfunction smaller(tree, n, m, depth) {\n var _n2 = n * 2;\n var _m2 = m * 2;\n return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||\n (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));\n}\n\n/* ===========================================================================\n * Restore the heap property by moving down the tree starting at node k,\n * exchanging a node with the smallest of its two sons if necessary, stopping\n * when the heap property is re-established (each father smaller than its\n * two sons).\n */\nfunction pqdownheap(s, tree, k)\n// deflate_state *s;\n// ct_data *tree; /* the tree to restore */\n// int k; /* node to move down */\n{\n var v = s.heap[k];\n var j = k << 1; /* left son of k */\n while (j <= s.heap_len) {\n /* Set j to the smallest of the two sons: */\n if (j < s.heap_len &&\n smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {\n j++;\n }\n /* Exit if v is smaller than both sons */\n if (smaller(tree, v, s.heap[j], s.depth)) { break; }\n\n /* Exchange v with the smallest son */\n s.heap[k] = s.heap[j];\n k = j;\n\n /* And continue down the tree, setting j to the left son of k */\n j <<= 1;\n }\n s.heap[k] = v;\n}\n\n\n// inlined manually\n// var SMALLEST = 1;\n\n/* ===========================================================================\n * Send the block data compressed using the given Huffman trees\n */\nfunction compress_block(s, ltree, dtree)\n// deflate_state *s;\n// const ct_data *ltree; /* literal tree */\n// const ct_data *dtree; /* distance tree */\n{\n var dist; /* distance of matched string */\n var lc; /* match length or unmatched char (if dist == 0) */\n var lx = 0; /* running index in l_buf */\n var code; /* the code to send */\n var extra; /* number of extra bits to send */\n\n if (s.last_lit !== 0) {\n do {\n dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]);\n lc = s.pending_buf[s.l_buf + lx];\n lx++;\n\n if (dist === 0) {\n send_code(s, lc, ltree); /* send a literal byte */\n //Tracecv(isgraph(lc), (stderr,\" '%c' \", lc));\n } else {\n /* Here, lc is the match length - MIN_MATCH */\n code = _length_code[lc];\n send_code(s, code + LITERALS + 1, ltree); /* send the length code */\n extra = extra_lbits[code];\n if (extra !== 0) {\n lc -= base_length[code];\n send_bits(s, lc, extra); /* send the extra length bits */\n }\n dist--; /* dist is now the match distance - 1 */\n code = d_code(dist);\n //Assert (code < D_CODES, \"bad d_code\");\n\n send_code(s, code, dtree); /* send the distance code */\n extra = extra_dbits[code];\n if (extra !== 0) {\n dist -= base_dist[code];\n send_bits(s, dist, extra); /* send the extra distance bits */\n }\n } /* literal or match pair ? */\n\n /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */\n //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,\n // \"pendingBuf overflow\");\n\n } while (lx < s.last_lit);\n }\n\n send_code(s, END_BLOCK, ltree);\n}\n\n\n/* ===========================================================================\n * Construct one Huffman tree and assigns the code bit strings and lengths.\n * Update the total bit length for the current block.\n * IN assertion: the field freq is set for all tree elements.\n * OUT assertions: the fields len and code are set to the optimal bit length\n * and corresponding code. The length opt_len is updated; static_len is\n * also updated if stree is not null. The field max_code is set.\n */\nfunction build_tree(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var elems = desc.stat_desc.elems;\n var n, m; /* iterate over heap elements */\n var max_code = -1; /* largest code with non zero frequency */\n var node; /* new node being created */\n\n /* Construct the initial heap, with least frequent element in\n * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].\n * heap[0] is not used.\n */\n s.heap_len = 0;\n s.heap_max = HEAP_SIZE;\n\n for (n = 0; n < elems; n++) {\n if (tree[n * 2]/*.Freq*/ !== 0) {\n s.heap[++s.heap_len] = max_code = n;\n s.depth[n] = 0;\n\n } else {\n tree[n * 2 + 1]/*.Len*/ = 0;\n }\n }\n\n /* The pkzip format requires that at least one distance code exists,\n * and that at least one bit should be sent even if there is only one\n * possible code. So to avoid special checks later on we force at least\n * two codes of non zero frequency.\n */\n while (s.heap_len < 2) {\n node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);\n tree[node * 2]/*.Freq*/ = 1;\n s.depth[node] = 0;\n s.opt_len--;\n\n if (has_stree) {\n s.static_len -= stree[node * 2 + 1]/*.Len*/;\n }\n /* node is 0 or 1 so it does not have extra bits */\n }\n desc.max_code = max_code;\n\n /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,\n * establish sub-heaps of increasing lengths:\n */\n for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }\n\n /* Construct the Huffman tree by repeatedly combining the least two\n * frequent nodes.\n */\n node = elems; /* next internal node of the tree */\n do {\n //pqremove(s, tree, n); /* n = node of least frequency */\n /*** pqremove ***/\n n = s.heap[1/*SMALLEST*/];\n s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];\n pqdownheap(s, tree, 1/*SMALLEST*/);\n /***/\n\n m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */\n\n s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */\n s.heap[--s.heap_max] = m;\n\n /* Create a new node father of n and m */\n tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;\n s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;\n tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;\n\n /* and insert the new node in the heap */\n s.heap[1/*SMALLEST*/] = node++;\n pqdownheap(s, tree, 1/*SMALLEST*/);\n\n } while (s.heap_len >= 2);\n\n s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];\n\n /* At this point, the fields freq and dad are set. We can now\n * generate the bit lengths.\n */\n gen_bitlen(s, desc);\n\n /* The field len is now set, we can generate the bit codes */\n gen_codes(tree, max_code, s.bl_count);\n}\n\n\n/* ===========================================================================\n * Scan a literal or distance tree to determine the frequencies of the codes\n * in the bit length tree.\n */\nfunction scan_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n s.bl_tree[curlen * 2]/*.Freq*/ += count;\n\n } else if (curlen !== 0) {\n\n if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }\n s.bl_tree[REP_3_6 * 2]/*.Freq*/++;\n\n } else if (count <= 10) {\n s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;\n\n } else {\n s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;\n }\n\n count = 0;\n prevlen = curlen;\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Send a literal or distance tree in compressed form, using the codes in\n * bl_tree.\n */\nfunction send_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n /* tree[max_code+1].Len = -1; */ /* guard already set */\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);\n\n } else if (curlen !== 0) {\n if (curlen !== prevlen) {\n send_code(s, curlen, s.bl_tree);\n count--;\n }\n //Assert(count >= 3 && count <= 6, \" 3_6?\");\n send_code(s, REP_3_6, s.bl_tree);\n send_bits(s, count - 3, 2);\n\n } else if (count <= 10) {\n send_code(s, REPZ_3_10, s.bl_tree);\n send_bits(s, count - 3, 3);\n\n } else {\n send_code(s, REPZ_11_138, s.bl_tree);\n send_bits(s, count - 11, 7);\n }\n\n count = 0;\n prevlen = curlen;\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Construct the Huffman tree for the bit lengths and return the index in\n * bl_order of the last bit length code to send.\n */\nfunction build_bl_tree(s) {\n var max_blindex; /* index of last bit length code of non zero freq */\n\n /* Determine the bit length frequencies for literal and distance trees */\n scan_tree(s, s.dyn_ltree, s.l_desc.max_code);\n scan_tree(s, s.dyn_dtree, s.d_desc.max_code);\n\n /* Build the bit length tree: */\n build_tree(s, s.bl_desc);\n /* opt_len now includes the length of the tree representations, except\n * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.\n */\n\n /* Determine the number of bit length codes to send. The pkzip format\n * requires that at least 4 bit length codes be sent. (appnote.txt says\n * 3 but the actual value used is 4.)\n */\n for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {\n if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {\n break;\n }\n }\n /* Update opt_len to include the bit length tree and counts */\n s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;\n //Tracev((stderr, \"\\ndyn trees: dyn %ld, stat %ld\",\n // s->opt_len, s->static_len));\n\n return max_blindex;\n}\n\n\n/* ===========================================================================\n * Send the header for a block using dynamic Huffman trees: the counts, the\n * lengths of the bit length codes, the literal tree and the distance tree.\n * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.\n */\nfunction send_all_trees(s, lcodes, dcodes, blcodes)\n// deflate_state *s;\n// int lcodes, dcodes, blcodes; /* number of codes for each tree */\n{\n var rank; /* index in bl_order */\n\n //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, \"not enough codes\");\n //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,\n // \"too many codes\");\n //Tracev((stderr, \"\\nbl counts: \"));\n send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */\n send_bits(s, dcodes - 1, 5);\n send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */\n for (rank = 0; rank < blcodes; rank++) {\n //Tracev((stderr, \"\\nbl code %2d \", bl_order[rank]));\n send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);\n }\n //Tracev((stderr, \"\\nbl tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */\n //Tracev((stderr, \"\\nlit tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */\n //Tracev((stderr, \"\\ndist tree: sent %ld\", s->bits_sent));\n}\n\n\n/* ===========================================================================\n * Check if the data type is TEXT or BINARY, using the following algorithm:\n * - TEXT if the two conditions below are satisfied:\n * a) There are no non-portable control characters belonging to the\n * \"black list\" (0..6, 14..25, 28..31).\n * b) There is at least one printable character belonging to the\n * \"white list\" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).\n * - BINARY otherwise.\n * - The following partially-portable control characters form a\n * \"gray list\" that is ignored in this detection algorithm:\n * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).\n * IN assertion: the fields Freq of dyn_ltree are set.\n */\nfunction detect_data_type(s) {\n /* black_mask is the bit mask of black-listed bytes\n * set bits 0..6, 14..25, and 28..31\n * 0xf3ffc07f = binary 11110011111111111100000001111111\n */\n var black_mask = 0xf3ffc07f;\n var n;\n\n /* Check for non-textual (\"black-listed\") bytes. */\n for (n = 0; n <= 31; n++, black_mask >>>= 1) {\n if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {\n return Z_BINARY;\n }\n }\n\n /* Check for textual (\"white-listed\") bytes. */\n if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||\n s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n for (n = 32; n < LITERALS; n++) {\n if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n }\n\n /* There are no \"black-listed\" or \"white-listed\" bytes:\n * this stream either is empty or has tolerated (\"gray-listed\") bytes only.\n */\n return Z_BINARY;\n}\n\n\nvar static_init_done = false;\n\n/* ===========================================================================\n * Initialize the tree data structures for a new zlib stream.\n */\nfunction _tr_init(s)\n{\n\n if (!static_init_done) {\n tr_static_init();\n static_init_done = true;\n }\n\n s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);\n s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);\n s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);\n\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n /* Initialize the first block of the first file: */\n init_block(s);\n}\n\n\n/* ===========================================================================\n * Send a stored block\n */\nfunction _tr_stored_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */\n copy_block(s, buf, stored_len, true); /* with header */\n}\n\n\n/* ===========================================================================\n * Send one empty static block to give enough lookahead for inflate.\n * This takes 10 bits, of which 7 may remain in the bit buffer.\n */\nfunction _tr_align(s) {\n send_bits(s, STATIC_TREES << 1, 3);\n send_code(s, END_BLOCK, static_ltree);\n bi_flush(s);\n}\n\n\n/* ===========================================================================\n * Determine the best encoding for the current block: dynamic trees, static\n * trees or store, and output the encoded block to the zip file.\n */\nfunction _tr_flush_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block, or NULL if too old */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n var opt_lenb, static_lenb; /* opt_len and static_len in bytes */\n var max_blindex = 0; /* index of last bit length code of non zero freq */\n\n /* Build the Huffman trees unless a stored block is forced */\n if (s.level > 0) {\n\n /* Check if the file is binary or text */\n if (s.strm.data_type === Z_UNKNOWN) {\n s.strm.data_type = detect_data_type(s);\n }\n\n /* Construct the literal and distance trees */\n build_tree(s, s.l_desc);\n // Tracev((stderr, \"\\nlit data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n\n build_tree(s, s.d_desc);\n // Tracev((stderr, \"\\ndist data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n /* At this point, opt_len and static_len are the total bit lengths of\n * the compressed block data, excluding the tree representations.\n */\n\n /* Build the bit length tree for the above two trees, and get the index\n * in bl_order of the last bit length code to send.\n */\n max_blindex = build_bl_tree(s);\n\n /* Determine the best encoding. Compute the block lengths in bytes. */\n opt_lenb = (s.opt_len + 3 + 7) >>> 3;\n static_lenb = (s.static_len + 3 + 7) >>> 3;\n\n // Tracev((stderr, \"\\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u \",\n // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,\n // s->last_lit));\n\n if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }\n\n } else {\n // Assert(buf != (char*)0, \"lost buf\");\n opt_lenb = static_lenb = stored_len + 5; /* force a stored block */\n }\n\n if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {\n /* 4: two words for the lengths */\n\n /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.\n * Otherwise we can't have processed more than WSIZE input bytes since\n * the last block flush, because compression would have been\n * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to\n * transform a block into a stored block.\n */\n _tr_stored_block(s, buf, stored_len, last);\n\n } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) {\n\n send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);\n compress_block(s, static_ltree, static_dtree);\n\n } else {\n send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);\n send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);\n compress_block(s, s.dyn_ltree, s.dyn_dtree);\n }\n // Assert (s->compressed_len == s->bits_sent, \"bad compressed size\");\n /* The above check is made mod 2^32, for files larger than 512 MB\n * and uLong implemented on 32 bits.\n */\n init_block(s);\n\n if (last) {\n bi_windup(s);\n }\n // Tracev((stderr,\"\\ncomprlen %lu(%lu) \", s->compressed_len>>3,\n // s->compressed_len-7*last));\n}\n\n/* ===========================================================================\n * Save the match info and tally the frequency counts. Return true if\n * the current block must be flushed.\n */\nfunction _tr_tally(s, dist, lc)\n// deflate_state *s;\n// unsigned dist; /* distance of matched string */\n// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */\n{\n //var out_length, in_length, dcode;\n\n s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff;\n s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff;\n\n s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff;\n s.last_lit++;\n\n if (dist === 0) {\n /* lc is the unmatched char */\n s.dyn_ltree[lc * 2]/*.Freq*/++;\n } else {\n s.matches++;\n /* Here, lc is the match length - MIN_MATCH */\n dist--; /* dist = match distance - 1 */\n //Assert((ush)dist < (ush)MAX_DIST(s) &&\n // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&\n // (ush)d_code(dist) < (ush)D_CODES, \"_tr_tally: bad match\");\n\n s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++;\n s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;\n }\n\n// (!) This block is disabled in zlib defailts,\n// don't enable it for binary compatibility\n\n//#ifdef TRUNCATE_BLOCK\n// /* Try to guess if it is profitable to stop the current block here */\n// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {\n// /* Compute an upper bound for the compressed length */\n// out_length = s.last_lit*8;\n// in_length = s.strstart - s.block_start;\n//\n// for (dcode = 0; dcode < D_CODES; dcode++) {\n// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);\n// }\n// out_length >>>= 3;\n// //Tracev((stderr,\"\\nlast_lit %u, in %ld, out ~%ld(%ld%%) \",\n// // s->last_lit, in_length, out_length,\n// // 100L - out_length*100L/in_length));\n// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) {\n// return true;\n// }\n// }\n//#endif\n\n return (s.last_lit === s.lit_bufsize - 1);\n /* We avoid equality with lit_bufsize because of wraparound at 64K\n * on 16 bit machines and because stored blocks are restricted to\n * 64K-1 bytes.\n */\n}\n\nexports._tr_init = _tr_init;\nexports._tr_stored_block = _tr_stored_block;\nexports._tr_flush_block = _tr_flush_block;\nexports._tr_tally = _tr_tally;\nexports._tr_align = _tr_align;\n","'use strict';\n\n// Note: adler32 takes 12% for level 0 and 2% for level 6.\n// It doesn't worth to make additional optimizationa as in original.\n// Small size is preferable.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction adler32(adler, buf, len, pos) {\n var s1 = (adler & 0xffff) |0,\n s2 = ((adler >>> 16) & 0xffff) |0,\n n = 0;\n\n while (len !== 0) {\n // Set limit ~ twice less than 5552, to keep\n // s2 in 31-bits, because we force signed ints.\n // in other case %= will fail.\n n = len > 2000 ? 2000 : len;\n len -= n;\n\n do {\n s1 = (s1 + buf[pos++]) |0;\n s2 = (s2 + s1) |0;\n } while (--n);\n\n s1 %= 65521;\n s2 %= 65521;\n }\n\n return (s1 | (s2 << 16)) |0;\n}\n\n\nmodule.exports = adler32;\n","'use strict';\n\n// Note: we can't get significant speed boost here.\n// So write code to minimize size - no pregenerated tables\n// and array tools dependencies.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// Use ordinary array, since untyped makes no boost here\nfunction makeTable() {\n var c, table = [];\n\n for (var n = 0; n < 256; n++) {\n c = n;\n for (var k = 0; k < 8; k++) {\n c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));\n }\n table[n] = c;\n }\n\n return table;\n}\n\n// Create table on load. Just 255 signed longs. Not a problem.\nvar crcTable = makeTable();\n\n\nfunction crc32(crc, buf, len, pos) {\n var t = crcTable,\n end = pos + len;\n\n crc ^= -1;\n\n for (var i = pos; i < end; i++) {\n crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];\n }\n\n return (crc ^ (-1)); // >>> 0;\n}\n\n\nmodule.exports = crc32;\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nmodule.exports = {\n 2: 'need dictionary', /* Z_NEED_DICT 2 */\n 1: 'stream end', /* Z_STREAM_END 1 */\n 0: '', /* Z_OK 0 */\n '-1': 'file error', /* Z_ERRNO (-1) */\n '-2': 'stream error', /* Z_STREAM_ERROR (-2) */\n '-3': 'data error', /* Z_DATA_ERROR (-3) */\n '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */\n '-5': 'buffer error', /* Z_BUF_ERROR (-5) */\n '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\nvar trees = require('./trees');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar msg = require('./messages');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\nvar Z_NO_FLUSH = 0;\nvar Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\nvar Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\n//var Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\n//var Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\n//var Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n\n/* compression levels */\n//var Z_NO_COMPRESSION = 0;\n//var Z_BEST_SPEED = 1;\n//var Z_BEST_COMPRESSION = 9;\nvar Z_DEFAULT_COMPRESSION = -1;\n\n\nvar Z_FILTERED = 1;\nvar Z_HUFFMAN_ONLY = 2;\nvar Z_RLE = 3;\nvar Z_FIXED = 4;\nvar Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\n//var Z_BINARY = 0;\n//var Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n/*============================================================================*/\n\n\nvar MAX_MEM_LEVEL = 9;\n/* Maximum value for memLevel in deflateInit2 */\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_MEM_LEVEL = 8;\n\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\nvar D_CODES = 30;\n/* number of distance codes */\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\nvar MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);\n\nvar PRESET_DICT = 0x20;\n\nvar INIT_STATE = 42;\nvar EXTRA_STATE = 69;\nvar NAME_STATE = 73;\nvar COMMENT_STATE = 91;\nvar HCRC_STATE = 103;\nvar BUSY_STATE = 113;\nvar FINISH_STATE = 666;\n\nvar BS_NEED_MORE = 1; /* block not completed, need more input or more output */\nvar BS_BLOCK_DONE = 2; /* block flush performed */\nvar BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */\nvar BS_FINISH_DONE = 4; /* finish done, accept no more input or output */\n\nvar OS_CODE = 0x03; // Unix :) . Don't detect, use this default.\n\nfunction err(strm, errorCode) {\n strm.msg = msg[errorCode];\n return errorCode;\n}\n\nfunction rank(f) {\n return ((f) << 1) - ((f) > 4 ? 9 : 0);\n}\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n\n/* =========================================================================\n * Flush as much pending output as possible. All deflate() output goes\n * through this function so some applications may wish to modify it\n * to avoid allocating a large strm->output buffer and copying into it.\n * (See also read_buf()).\n */\nfunction flush_pending(strm) {\n var s = strm.state;\n\n //_tr_flush_bits(s);\n var len = s.pending;\n if (len > strm.avail_out) {\n len = strm.avail_out;\n }\n if (len === 0) { return; }\n\n utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out);\n strm.next_out += len;\n s.pending_out += len;\n strm.total_out += len;\n strm.avail_out -= len;\n s.pending -= len;\n if (s.pending === 0) {\n s.pending_out = 0;\n }\n}\n\n\nfunction flush_block_only(s, last) {\n trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);\n s.block_start = s.strstart;\n flush_pending(s.strm);\n}\n\n\nfunction put_byte(s, b) {\n s.pending_buf[s.pending++] = b;\n}\n\n\n/* =========================================================================\n * Put a short in the pending buffer. The 16-bit value is put in MSB order.\n * IN assertion: the stream state is correct and there is enough room in\n * pending_buf.\n */\nfunction putShortMSB(s, b) {\n// put_byte(s, (Byte)(b >> 8));\n// put_byte(s, (Byte)(b & 0xff));\n s.pending_buf[s.pending++] = (b >>> 8) & 0xff;\n s.pending_buf[s.pending++] = b & 0xff;\n}\n\n\n/* ===========================================================================\n * Read a new buffer from the current input stream, update the adler32\n * and total number of bytes read. All deflate() input goes through\n * this function so some applications may wish to modify it to avoid\n * allocating a large strm->input buffer and copying from it.\n * (See also flush_pending()).\n */\nfunction read_buf(strm, buf, start, size) {\n var len = strm.avail_in;\n\n if (len > size) { len = size; }\n if (len === 0) { return 0; }\n\n strm.avail_in -= len;\n\n // zmemcpy(buf, strm->next_in, len);\n utils.arraySet(buf, strm.input, strm.next_in, len, start);\n if (strm.state.wrap === 1) {\n strm.adler = adler32(strm.adler, buf, len, start);\n }\n\n else if (strm.state.wrap === 2) {\n strm.adler = crc32(strm.adler, buf, len, start);\n }\n\n strm.next_in += len;\n strm.total_in += len;\n\n return len;\n}\n\n\n/* ===========================================================================\n * Set match_start to the longest match starting at the given string and\n * return its length. Matches shorter or equal to prev_length are discarded,\n * in which case the result is equal to prev_length and match_start is\n * garbage.\n * IN assertions: cur_match is the head of the hash chain for the current\n * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1\n * OUT assertion: the match length is not greater than s->lookahead.\n */\nfunction longest_match(s, cur_match) {\n var chain_length = s.max_chain_length; /* max hash chain length */\n var scan = s.strstart; /* current string */\n var match; /* matched string */\n var len; /* length of current match */\n var best_len = s.prev_length; /* best match length so far */\n var nice_match = s.nice_match; /* stop if match long enough */\n var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?\n s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;\n\n var _win = s.window; // shortcut\n\n var wmask = s.w_mask;\n var prev = s.prev;\n\n /* Stop when cur_match becomes <= limit. To simplify the code,\n * we prevent matches with the string of window index 0.\n */\n\n var strend = s.strstart + MAX_MATCH;\n var scan_end1 = _win[scan + best_len - 1];\n var scan_end = _win[scan + best_len];\n\n /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.\n * It is easy to get rid of this optimization if necessary.\n */\n // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, \"Code too clever\");\n\n /* Do not waste too much time if we already have a good match: */\n if (s.prev_length >= s.good_match) {\n chain_length >>= 2;\n }\n /* Do not look for matches beyond the end of the input. This is necessary\n * to make deflate deterministic.\n */\n if (nice_match > s.lookahead) { nice_match = s.lookahead; }\n\n // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, \"need lookahead\");\n\n do {\n // Assert(cur_match < s->strstart, \"no future\");\n match = cur_match;\n\n /* Skip to next match if the match length cannot increase\n * or if the match length is less than 2. Note that the checks below\n * for insufficient lookahead only occur occasionally for performance\n * reasons. Therefore uninitialized memory will be accessed, and\n * conditional jumps will be made that depend on those values.\n * However the length of the match is limited to the lookahead, so\n * the output of deflate is not affected by the uninitialized values.\n */\n\n if (_win[match + best_len] !== scan_end ||\n _win[match + best_len - 1] !== scan_end1 ||\n _win[match] !== _win[scan] ||\n _win[++match] !== _win[scan + 1]) {\n continue;\n }\n\n /* The check at best_len-1 can be removed because it will be made\n * again later. (This heuristic is not always a win.)\n * It is not necessary to compare scan[2] and match[2] since they\n * are always equal when the other bytes match, given that\n * the hash keys are equal and that HASH_BITS >= 8.\n */\n scan += 2;\n match++;\n // Assert(*scan == *match, \"match[2]?\");\n\n /* We check for insufficient lookahead only every 8th comparison;\n * the 256th check will be made at strstart+258.\n */\n do {\n /*jshint noempty:false*/\n } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n scan < strend);\n\n // Assert(scan <= s->window+(unsigned)(s->window_size-1), \"wild scan\");\n\n len = MAX_MATCH - (strend - scan);\n scan = strend - MAX_MATCH;\n\n if (len > best_len) {\n s.match_start = cur_match;\n best_len = len;\n if (len >= nice_match) {\n break;\n }\n scan_end1 = _win[scan + best_len - 1];\n scan_end = _win[scan + best_len];\n }\n } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);\n\n if (best_len <= s.lookahead) {\n return best_len;\n }\n return s.lookahead;\n}\n\n\n/* ===========================================================================\n * Fill the window when the lookahead becomes insufficient.\n * Updates strstart and lookahead.\n *\n * IN assertion: lookahead < MIN_LOOKAHEAD\n * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD\n * At least one byte has been read, or avail_in == 0; reads are\n * performed for at least two bytes (required for the zip translate_eol\n * option -- not supported here).\n */\nfunction fill_window(s) {\n var _w_size = s.w_size;\n var p, n, m, more, str;\n\n //Assert(s->lookahead < MIN_LOOKAHEAD, \"already enough lookahead\");\n\n do {\n more = s.window_size - s.lookahead - s.strstart;\n\n // JS ints have 32 bit, block below not needed\n /* Deal with !@#$% 64K limit: */\n //if (sizeof(int) <= 2) {\n // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {\n // more = wsize;\n //\n // } else if (more == (unsigned)(-1)) {\n // /* Very unlikely, but possible on 16 bit machine if\n // * strstart == 0 && lookahead == 1 (input done a byte at time)\n // */\n // more--;\n // }\n //}\n\n\n /* If the window is almost full and there is insufficient lookahead,\n * move the upper half to the lower one to make room in the upper half.\n */\n if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {\n\n utils.arraySet(s.window, s.window, _w_size, _w_size, 0);\n s.match_start -= _w_size;\n s.strstart -= _w_size;\n /* we now have strstart >= MAX_DIST */\n s.block_start -= _w_size;\n\n /* Slide the hash table (could be avoided with 32 bit values\n at the expense of memory usage). We slide even when level == 0\n to keep the hash table consistent if we switch back to level > 0\n later. (Using level 0 permanently is not an optimal usage of\n zlib, so we don't care about this pathological case.)\n */\n\n n = s.hash_size;\n p = n;\n do {\n m = s.head[--p];\n s.head[p] = (m >= _w_size ? m - _w_size : 0);\n } while (--n);\n\n n = _w_size;\n p = n;\n do {\n m = s.prev[--p];\n s.prev[p] = (m >= _w_size ? m - _w_size : 0);\n /* If n is not on any hash chain, prev[n] is garbage but\n * its value will never be used.\n */\n } while (--n);\n\n more += _w_size;\n }\n if (s.strm.avail_in === 0) {\n break;\n }\n\n /* If there was no sliding:\n * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&\n * more == window_size - lookahead - strstart\n * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)\n * => more >= window_size - 2*WSIZE + 2\n * In the BIG_MEM or MMAP case (not yet supported),\n * window_size == input_size + MIN_LOOKAHEAD &&\n * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.\n * Otherwise, window_size == 2*WSIZE so more >= 2.\n * If there was sliding, more >= WSIZE. So in all cases, more >= 2.\n */\n //Assert(more >= 2, \"more < 2\");\n n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);\n s.lookahead += n;\n\n /* Initialize the hash value now that we have some input: */\n if (s.lookahead + s.insert >= MIN_MATCH) {\n str = s.strstart - s.insert;\n s.ins_h = s.window[str];\n\n /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask;\n//#if MIN_MATCH != 3\n// Call update_hash() MIN_MATCH-3 more times\n//#endif\n while (s.insert) {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = str;\n str++;\n s.insert--;\n if (s.lookahead + s.insert < MIN_MATCH) {\n break;\n }\n }\n }\n /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,\n * but this is not important since only literal bytes will be emitted.\n */\n\n } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);\n\n /* If the WIN_INIT bytes after the end of the current data have never been\n * written, then zero those bytes in order to avoid memory check reports of\n * the use of uninitialized (or uninitialised as Julian writes) bytes by\n * the longest match routines. Update the high water mark for the next\n * time through here. WIN_INIT is set to MAX_MATCH since the longest match\n * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.\n */\n// if (s.high_water < s.window_size) {\n// var curr = s.strstart + s.lookahead;\n// var init = 0;\n//\n// if (s.high_water < curr) {\n// /* Previous high water mark below current data -- zero WIN_INIT\n// * bytes or up to end of window, whichever is less.\n// */\n// init = s.window_size - curr;\n// if (init > WIN_INIT)\n// init = WIN_INIT;\n// zmemzero(s->window + curr, (unsigned)init);\n// s->high_water = curr + init;\n// }\n// else if (s->high_water < (ulg)curr + WIN_INIT) {\n// /* High water mark at or above current data, but below current data\n// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up\n// * to end of window, whichever is less.\n// */\n// init = (ulg)curr + WIN_INIT - s->high_water;\n// if (init > s->window_size - s->high_water)\n// init = s->window_size - s->high_water;\n// zmemzero(s->window + s->high_water, (unsigned)init);\n// s->high_water += init;\n// }\n// }\n//\n// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,\n// \"not enough room for search\");\n}\n\n/* ===========================================================================\n * Copy without compression as much as possible from the input stream, return\n * the current block state.\n * This function does not insert new strings in the dictionary since\n * uncompressible data is probably not useful. This function is used\n * only for the level=0 compression option.\n * NOTE: this function should be optimized to avoid extra copying from\n * window to pending_buf.\n */\nfunction deflate_stored(s, flush) {\n /* Stored blocks are limited to 0xffff bytes, pending_buf is limited\n * to pending_buf_size, and each stored block has a 5 byte header:\n */\n var max_block_size = 0xffff;\n\n if (max_block_size > s.pending_buf_size - 5) {\n max_block_size = s.pending_buf_size - 5;\n }\n\n /* Copy as much as possible from input to output: */\n for (;;) {\n /* Fill the window as much as possible: */\n if (s.lookahead <= 1) {\n\n //Assert(s->strstart < s->w_size+MAX_DIST(s) ||\n // s->block_start >= (long)s->w_size, \"slide too late\");\n// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) ||\n// s.block_start >= s.w_size)) {\n// throw new Error(\"slide too late\");\n// }\n\n fill_window(s);\n if (s.lookahead === 0 && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n\n if (s.lookahead === 0) {\n break;\n }\n /* flush the current block */\n }\n //Assert(s->block_start >= 0L, \"block gone\");\n// if (s.block_start < 0) throw new Error(\"block gone\");\n\n s.strstart += s.lookahead;\n s.lookahead = 0;\n\n /* Emit a stored block if pending_buf will be full: */\n var max_start = s.block_start + max_block_size;\n\n if (s.strstart === 0 || s.strstart >= max_start) {\n /* strstart == 0 is possible when wraparound on 16-bit machine */\n s.lookahead = s.strstart - max_start;\n s.strstart = max_start;\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n\n\n }\n /* Flush if we may have to slide, otherwise block_start may become\n * negative and the data will be gone:\n */\n if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n\n s.insert = 0;\n\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n\n if (s.strstart > s.block_start) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_NEED_MORE;\n}\n\n/* ===========================================================================\n * Compress as much as possible from the input stream, return the current\n * block state.\n * This function does not perform lazy evaluation of matches and inserts\n * new strings in the dictionary only for unmatched strings or for short\n * matches. It is used only for the fast compression options.\n */\nfunction deflate_fast(s, flush) {\n var hash_head; /* head of the hash chain */\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) {\n break; /* flush the current block */\n }\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n * At this point we have always match_length < MIN_MATCH\n */\n if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n }\n if (s.match_length >= MIN_MATCH) {\n // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only\n\n /*** _tr_tally_dist(s, s.strstart - s.match_start,\n s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n\n /* Insert new strings in the hash table only if the match length\n * is not too large. This saves time but degrades compression.\n */\n if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) {\n s.match_length--; /* string at strstart already in table */\n do {\n s.strstart++;\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n /* strstart never exceeds WSIZE-MAX_MATCH, so there are\n * always MIN_MATCH bytes ahead.\n */\n } while (--s.match_length !== 0);\n s.strstart++;\n } else\n {\n s.strstart += s.match_length;\n s.match_length = 0;\n s.ins_h = s.window[s.strstart];\n /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask;\n\n//#if MIN_MATCH != 3\n// Call UPDATE_HASH() MIN_MATCH-3 more times\n//#endif\n /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not\n * matter since it will be recomputed at next deflate call.\n */\n }\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s.window[s.strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * Same as above, but achieves better compression. We use a lazy\n * evaluation for matches: a match is finally adopted only if there is\n * no better match at the next window position.\n */\nfunction deflate_slow(s, flush) {\n var hash_head; /* head of hash chain */\n var bflush; /* set if current block must be flushed */\n\n var max_insert;\n\n /* Process the input block. */\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n */\n s.prev_length = s.match_length;\n s.prev_match = s.match_start;\n s.match_length = MIN_MATCH - 1;\n\n if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&\n s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n\n if (s.match_length <= 5 &&\n (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {\n\n /* If prev_match is also MIN_MATCH, match_start is garbage\n * but we will ignore the current match anyway.\n */\n s.match_length = MIN_MATCH - 1;\n }\n }\n /* If there was a match at the previous step and the current\n * match is not better, output the previous match:\n */\n if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) {\n max_insert = s.strstart + s.lookahead - MIN_MATCH;\n /* Do not insert strings in hash table beyond this. */\n\n //check_match(s, s.strstart-1, s.prev_match, s.prev_length);\n\n /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,\n s.prev_length - MIN_MATCH, bflush);***/\n bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH);\n /* Insert in hash table all strings up to the end of the match.\n * strstart-1 and strstart are already inserted. If there is not\n * enough lookahead, the last two strings are not inserted in\n * the hash table.\n */\n s.lookahead -= s.prev_length - 1;\n s.prev_length -= 2;\n do {\n if (++s.strstart <= max_insert) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n } while (--s.prev_length !== 0);\n s.match_available = 0;\n s.match_length = MIN_MATCH - 1;\n s.strstart++;\n\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n } else if (s.match_available) {\n /* If there was no match at the previous position, output a\n * single literal. If there was a match but the current match\n * is longer, truncate the previous match to a single literal.\n */\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n if (bflush) {\n /*** FLUSH_BLOCK_ONLY(s, 0) ***/\n flush_block_only(s, false);\n /***/\n }\n s.strstart++;\n s.lookahead--;\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n } else {\n /* There is no previous match to compare with, wait for\n * the next step to decide.\n */\n s.match_available = 1;\n s.strstart++;\n s.lookahead--;\n }\n }\n //Assert (flush != Z_NO_FLUSH, \"no flush?\");\n if (s.match_available) {\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n s.match_available = 0;\n }\n s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_BLOCK_DONE;\n}\n\n\n/* ===========================================================================\n * For Z_RLE, simply look for runs of bytes, generate matches only of distance\n * one. Do not maintain a hash table. (It will be regenerated if this run of\n * deflate switches away from Z_RLE.)\n */\nfunction deflate_rle(s, flush) {\n var bflush; /* set if current block must be flushed */\n var prev; /* byte at distance one to match */\n var scan, strend; /* scan goes up to strend for length of run */\n\n var _win = s.window;\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the longest run, plus one for the unrolled loop.\n */\n if (s.lookahead <= MAX_MATCH) {\n fill_window(s);\n if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* See how many times the previous byte repeats */\n s.match_length = 0;\n if (s.lookahead >= MIN_MATCH && s.strstart > 0) {\n scan = s.strstart - 1;\n prev = _win[scan];\n if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {\n strend = s.strstart + MAX_MATCH;\n do {\n /*jshint noempty:false*/\n } while (prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n scan < strend);\n s.match_length = MAX_MATCH - (strend - scan);\n if (s.match_length > s.lookahead) {\n s.match_length = s.lookahead;\n }\n }\n //Assert(scan <= s->window+(uInt)(s->window_size-1), \"wild scan\");\n }\n\n /* Emit match if have run of MIN_MATCH or longer, else emit literal */\n if (s.match_length >= MIN_MATCH) {\n //check_match(s, s.strstart, s.strstart - 1, s.match_length);\n\n /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n s.strstart += s.match_length;\n s.match_length = 0;\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.\n * (It will be regenerated if this run of deflate switches away from Huffman.)\n */\nfunction deflate_huff(s, flush) {\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we have a literal to write. */\n if (s.lookahead === 0) {\n fill_window(s);\n if (s.lookahead === 0) {\n if (flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n break; /* flush the current block */\n }\n }\n\n /* Output a literal byte */\n s.match_length = 0;\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n s.lookahead--;\n s.strstart++;\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* Values for max_lazy_match, good_match and max_chain_length, depending on\n * the desired pack level (0..9). The values given below have been tuned to\n * exclude worst case performance for pathological files. Better values may be\n * found for specific files.\n */\nfunction Config(good_length, max_lazy, nice_length, max_chain, func) {\n this.good_length = good_length;\n this.max_lazy = max_lazy;\n this.nice_length = nice_length;\n this.max_chain = max_chain;\n this.func = func;\n}\n\nvar configuration_table;\n\nconfiguration_table = [\n /* good lazy nice chain */\n new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */\n new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */\n new Config(4, 5, 16, 8, deflate_fast), /* 2 */\n new Config(4, 6, 32, 32, deflate_fast), /* 3 */\n\n new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */\n new Config(8, 16, 32, 32, deflate_slow), /* 5 */\n new Config(8, 16, 128, 128, deflate_slow), /* 6 */\n new Config(8, 32, 128, 256, deflate_slow), /* 7 */\n new Config(32, 128, 258, 1024, deflate_slow), /* 8 */\n new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */\n];\n\n\n/* ===========================================================================\n * Initialize the \"longest match\" routines for a new zlib stream\n */\nfunction lm_init(s) {\n s.window_size = 2 * s.w_size;\n\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n\n /* Set the default configuration parameters:\n */\n s.max_lazy_match = configuration_table[s.level].max_lazy;\n s.good_match = configuration_table[s.level].good_length;\n s.nice_match = configuration_table[s.level].nice_length;\n s.max_chain_length = configuration_table[s.level].max_chain;\n\n s.strstart = 0;\n s.block_start = 0;\n s.lookahead = 0;\n s.insert = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n s.ins_h = 0;\n}\n\n\nfunction DeflateState() {\n this.strm = null; /* pointer back to this zlib stream */\n this.status = 0; /* as the name implies */\n this.pending_buf = null; /* output still pending */\n this.pending_buf_size = 0; /* size of pending_buf */\n this.pending_out = 0; /* next pending byte to output to the stream */\n this.pending = 0; /* nb of bytes in the pending buffer */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.gzhead = null; /* gzip header information to write */\n this.gzindex = 0; /* where in extra, name, or comment */\n this.method = Z_DEFLATED; /* can only be DEFLATED */\n this.last_flush = -1; /* value of flush param for previous deflate call */\n\n this.w_size = 0; /* LZ77 window size (32K by default) */\n this.w_bits = 0; /* log2(w_size) (8..16) */\n this.w_mask = 0; /* w_size - 1 */\n\n this.window = null;\n /* Sliding window. Input bytes are read into the second half of the window,\n * and move to the first half later to keep a dictionary of at least wSize\n * bytes. With this organization, matches are limited to a distance of\n * wSize-MAX_MATCH bytes, but this ensures that IO is always\n * performed with a length multiple of the block size.\n */\n\n this.window_size = 0;\n /* Actual size of window: 2*wSize, except when the user input buffer\n * is directly used as sliding window.\n */\n\n this.prev = null;\n /* Link to older string with same hash index. To limit the size of this\n * array to 64K, this link is maintained only for the last 32K strings.\n * An index in this array is thus a window index modulo 32K.\n */\n\n this.head = null; /* Heads of the hash chains or NIL. */\n\n this.ins_h = 0; /* hash index of string to be inserted */\n this.hash_size = 0; /* number of elements in hash table */\n this.hash_bits = 0; /* log2(hash_size) */\n this.hash_mask = 0; /* hash_size-1 */\n\n this.hash_shift = 0;\n /* Number of bits by which ins_h must be shifted at each input\n * step. It must be such that after MIN_MATCH steps, the oldest\n * byte no longer takes part in the hash key, that is:\n * hash_shift * MIN_MATCH >= hash_bits\n */\n\n this.block_start = 0;\n /* Window position at the beginning of the current output block. Gets\n * negative when the window is moved backwards.\n */\n\n this.match_length = 0; /* length of best match */\n this.prev_match = 0; /* previous match */\n this.match_available = 0; /* set if previous match exists */\n this.strstart = 0; /* start of string to insert */\n this.match_start = 0; /* start of matching string */\n this.lookahead = 0; /* number of valid bytes ahead in window */\n\n this.prev_length = 0;\n /* Length of the best match at previous step. Matches not greater than this\n * are discarded. This is used in the lazy match evaluation.\n */\n\n this.max_chain_length = 0;\n /* To speed up deflation, hash chains are never searched beyond this\n * length. A higher limit improves compression ratio but degrades the\n * speed.\n */\n\n this.max_lazy_match = 0;\n /* Attempt to find a better match only when the current match is strictly\n * smaller than this value. This mechanism is used only for compression\n * levels >= 4.\n */\n // That's alias to max_lazy_match, don't use directly\n //this.max_insert_length = 0;\n /* Insert new strings in the hash table only if the match length is not\n * greater than this length. This saves time but degrades compression.\n * max_insert_length is used only for compression levels <= 3.\n */\n\n this.level = 0; /* compression level (1..9) */\n this.strategy = 0; /* favor or force Huffman coding*/\n\n this.good_match = 0;\n /* Use a faster search when the previous match is longer than this */\n\n this.nice_match = 0; /* Stop searching when current match exceeds this */\n\n /* used by trees.c: */\n\n /* Didn't use ct_data typedef below to suppress compiler warning */\n\n // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */\n // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */\n // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */\n\n // Use flat array of DOUBLE size, with interleaved fata,\n // because JS does not support effective\n this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2);\n this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2);\n this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2);\n zero(this.dyn_ltree);\n zero(this.dyn_dtree);\n zero(this.bl_tree);\n\n this.l_desc = null; /* desc. for literal tree */\n this.d_desc = null; /* desc. for distance tree */\n this.bl_desc = null; /* desc. for bit length tree */\n\n //ush bl_count[MAX_BITS+1];\n this.bl_count = new utils.Buf16(MAX_BITS + 1);\n /* number of codes at each bit length for an optimal tree */\n\n //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */\n this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */\n zero(this.heap);\n\n this.heap_len = 0; /* number of elements in the heap */\n this.heap_max = 0; /* element of largest frequency */\n /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.\n * The same heap array is used to build all trees.\n */\n\n this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1];\n zero(this.depth);\n /* Depth of each subtree used as tie breaker for trees of equal frequency\n */\n\n this.l_buf = 0; /* buffer index for literals or lengths */\n\n this.lit_bufsize = 0;\n /* Size of match buffer for literals/lengths. There are 4 reasons for\n * limiting lit_bufsize to 64K:\n * - frequencies can be kept in 16 bit counters\n * - if compression is not successful for the first block, all input\n * data is still in the window so we can still emit a stored block even\n * when input comes from standard input. (This can also be done for\n * all blocks if lit_bufsize is not greater than 32K.)\n * - if compression is not successful for a file smaller than 64K, we can\n * even emit a stored file instead of a stored block (saving 5 bytes).\n * This is applicable only for zip (not gzip or zlib).\n * - creating new Huffman trees less frequently may not provide fast\n * adaptation to changes in the input data statistics. (Take for\n * example a binary file with poorly compressible code followed by\n * a highly compressible string table.) Smaller buffer sizes give\n * fast adaptation but have of course the overhead of transmitting\n * trees more frequently.\n * - I can't count above 4\n */\n\n this.last_lit = 0; /* running index in l_buf */\n\n this.d_buf = 0;\n /* Buffer index for distances. To simplify the code, d_buf and l_buf have\n * the same number of elements. To use different lengths, an extra flag\n * array would be necessary.\n */\n\n this.opt_len = 0; /* bit length of current block with optimal trees */\n this.static_len = 0; /* bit length of current block with static trees */\n this.matches = 0; /* number of string matches in current block */\n this.insert = 0; /* bytes at end of window left to insert */\n\n\n this.bi_buf = 0;\n /* Output buffer. bits are inserted starting at the bottom (least\n * significant bits).\n */\n this.bi_valid = 0;\n /* Number of valid bits in bi_buf. All bits above the last valid bit\n * are always zero.\n */\n\n // Used for window memory init. We safely ignore it for JS. That makes\n // sense only for pointers and memory check tools.\n //this.high_water = 0;\n /* High water mark offset in window for initialized bytes -- bytes above\n * this are set to zero in order to avoid memory check warnings when\n * longest match routines access bytes past the input. This is then\n * updated to the new high water mark.\n */\n}\n\n\nfunction deflateResetKeep(strm) {\n var s;\n\n if (!strm || !strm.state) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.total_in = strm.total_out = 0;\n strm.data_type = Z_UNKNOWN;\n\n s = strm.state;\n s.pending = 0;\n s.pending_out = 0;\n\n if (s.wrap < 0) {\n s.wrap = -s.wrap;\n /* was made negative by deflate(..., Z_FINISH); */\n }\n s.status = (s.wrap ? INIT_STATE : BUSY_STATE);\n strm.adler = (s.wrap === 2) ?\n 0 // crc32(0, Z_NULL, 0)\n :\n 1; // adler32(0, Z_NULL, 0)\n s.last_flush = Z_NO_FLUSH;\n trees._tr_init(s);\n return Z_OK;\n}\n\n\nfunction deflateReset(strm) {\n var ret = deflateResetKeep(strm);\n if (ret === Z_OK) {\n lm_init(strm.state);\n }\n return ret;\n}\n\n\nfunction deflateSetHeader(strm, head) {\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; }\n strm.state.gzhead = head;\n return Z_OK;\n}\n\n\nfunction deflateInit2(strm, level, method, windowBits, memLevel, strategy) {\n if (!strm) { // === Z_NULL\n return Z_STREAM_ERROR;\n }\n var wrap = 1;\n\n if (level === Z_DEFAULT_COMPRESSION) {\n level = 6;\n }\n\n if (windowBits < 0) { /* suppress zlib wrapper */\n wrap = 0;\n windowBits = -windowBits;\n }\n\n else if (windowBits > 15) {\n wrap = 2; /* write gzip wrapper instead */\n windowBits -= 16;\n }\n\n\n if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED ||\n windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||\n strategy < 0 || strategy > Z_FIXED) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n\n if (windowBits === 8) {\n windowBits = 9;\n }\n /* until 256-byte window bug fixed */\n\n var s = new DeflateState();\n\n strm.state = s;\n s.strm = strm;\n\n s.wrap = wrap;\n s.gzhead = null;\n s.w_bits = windowBits;\n s.w_size = 1 << s.w_bits;\n s.w_mask = s.w_size - 1;\n\n s.hash_bits = memLevel + 7;\n s.hash_size = 1 << s.hash_bits;\n s.hash_mask = s.hash_size - 1;\n s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);\n\n s.window = new utils.Buf8(s.w_size * 2);\n s.head = new utils.Buf16(s.hash_size);\n s.prev = new utils.Buf16(s.w_size);\n\n // Don't need mem init magic for JS.\n //s.high_water = 0; /* nothing written to s->window yet */\n\n s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */\n\n s.pending_buf_size = s.lit_bufsize * 4;\n\n //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);\n //s->pending_buf = (uchf *) overlay;\n s.pending_buf = new utils.Buf8(s.pending_buf_size);\n\n // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)\n //s->d_buf = overlay + s->lit_bufsize/sizeof(ush);\n s.d_buf = 1 * s.lit_bufsize;\n\n //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;\n s.l_buf = (1 + 2) * s.lit_bufsize;\n\n s.level = level;\n s.strategy = strategy;\n s.method = method;\n\n return deflateReset(strm);\n}\n\nfunction deflateInit(strm, level) {\n return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);\n}\n\n\nfunction deflate(strm, flush) {\n var old_flush, s;\n var beg, val; // for gzip header write only\n\n if (!strm || !strm.state ||\n flush > Z_BLOCK || flush < 0) {\n return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;\n }\n\n s = strm.state;\n\n if (!strm.output ||\n (!strm.input && strm.avail_in !== 0) ||\n (s.status === FINISH_STATE && flush !== Z_FINISH)) {\n return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);\n }\n\n s.strm = strm; /* just in case */\n old_flush = s.last_flush;\n s.last_flush = flush;\n\n /* Write the header */\n if (s.status === INIT_STATE) {\n\n if (s.wrap === 2) { // GZIP header\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n put_byte(s, 31);\n put_byte(s, 139);\n put_byte(s, 8);\n if (!s.gzhead) { // s->gzhead == Z_NULL\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, OS_CODE);\n s.status = BUSY_STATE;\n }\n else {\n put_byte(s, (s.gzhead.text ? 1 : 0) +\n (s.gzhead.hcrc ? 2 : 0) +\n (!s.gzhead.extra ? 0 : 4) +\n (!s.gzhead.name ? 0 : 8) +\n (!s.gzhead.comment ? 0 : 16)\n );\n put_byte(s, s.gzhead.time & 0xff);\n put_byte(s, (s.gzhead.time >> 8) & 0xff);\n put_byte(s, (s.gzhead.time >> 16) & 0xff);\n put_byte(s, (s.gzhead.time >> 24) & 0xff);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, s.gzhead.os & 0xff);\n if (s.gzhead.extra && s.gzhead.extra.length) {\n put_byte(s, s.gzhead.extra.length & 0xff);\n put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);\n }\n if (s.gzhead.hcrc) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0);\n }\n s.gzindex = 0;\n s.status = EXTRA_STATE;\n }\n }\n else // DEFLATE header\n {\n var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8;\n var level_flags = -1;\n\n if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {\n level_flags = 0;\n } else if (s.level < 6) {\n level_flags = 1;\n } else if (s.level === 6) {\n level_flags = 2;\n } else {\n level_flags = 3;\n }\n header |= (level_flags << 6);\n if (s.strstart !== 0) { header |= PRESET_DICT; }\n header += 31 - (header % 31);\n\n s.status = BUSY_STATE;\n putShortMSB(s, header);\n\n /* Save the adler32 of the preset dictionary: */\n if (s.strstart !== 0) {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n strm.adler = 1; // adler32(0L, Z_NULL, 0);\n }\n }\n\n//#ifdef GZIP\n if (s.status === EXTRA_STATE) {\n if (s.gzhead.extra/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n\n while (s.gzindex < (s.gzhead.extra.length & 0xffff)) {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n break;\n }\n }\n put_byte(s, s.gzhead.extra[s.gzindex] & 0xff);\n s.gzindex++;\n }\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (s.gzindex === s.gzhead.extra.length) {\n s.gzindex = 0;\n s.status = NAME_STATE;\n }\n }\n else {\n s.status = NAME_STATE;\n }\n }\n if (s.status === NAME_STATE) {\n if (s.gzhead.name/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.name.length) {\n val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.gzindex = 0;\n s.status = COMMENT_STATE;\n }\n }\n else {\n s.status = COMMENT_STATE;\n }\n }\n if (s.status === COMMENT_STATE) {\n if (s.gzhead.comment/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.comment.length) {\n val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.status = HCRC_STATE;\n }\n }\n else {\n s.status = HCRC_STATE;\n }\n }\n if (s.status === HCRC_STATE) {\n if (s.gzhead.hcrc) {\n if (s.pending + 2 > s.pending_buf_size) {\n flush_pending(strm);\n }\n if (s.pending + 2 <= s.pending_buf_size) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n s.status = BUSY_STATE;\n }\n }\n else {\n s.status = BUSY_STATE;\n }\n }\n//#endif\n\n /* Flush as much pending output as possible */\n if (s.pending !== 0) {\n flush_pending(strm);\n if (strm.avail_out === 0) {\n /* Since avail_out is 0, deflate will be called again with\n * more output space, but possibly with both pending and\n * avail_in equal to zero. There won't be anything to do,\n * but this is not an error situation so make sure we\n * return OK instead of BUF_ERROR at next call of deflate:\n */\n s.last_flush = -1;\n return Z_OK;\n }\n\n /* Make sure there is something to do and avoid duplicate consecutive\n * flushes. For repeated and useless calls with Z_FINISH, we keep\n * returning Z_STREAM_END instead of Z_BUF_ERROR.\n */\n } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&\n flush !== Z_FINISH) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* User must not provide more input after the first FINISH: */\n if (s.status === FINISH_STATE && strm.avail_in !== 0) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* Start a new block or continue the current one.\n */\n if (strm.avail_in !== 0 || s.lookahead !== 0 ||\n (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) {\n var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) :\n (s.strategy === Z_RLE ? deflate_rle(s, flush) :\n configuration_table[s.level].func(s, flush));\n\n if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {\n s.status = FINISH_STATE;\n }\n if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {\n if (strm.avail_out === 0) {\n s.last_flush = -1;\n /* avoid BUF_ERROR next call, see above */\n }\n return Z_OK;\n /* If flush != Z_NO_FLUSH && avail_out == 0, the next call\n * of deflate should use the same flush parameter to make sure\n * that the flush is complete. So we don't have to output an\n * empty block here, this will be done at next call. This also\n * ensures that for a very small output buffer, we emit at most\n * one empty block.\n */\n }\n if (bstate === BS_BLOCK_DONE) {\n if (flush === Z_PARTIAL_FLUSH) {\n trees._tr_align(s);\n }\n else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */\n\n trees._tr_stored_block(s, 0, 0, false);\n /* For a full flush, this empty block will be recognized\n * as a special marker by inflate_sync().\n */\n if (flush === Z_FULL_FLUSH) {\n /*** CLEAR_HASH(s); ***/ /* forget history */\n zero(s.head); // Fill with NIL (= 0);\n\n if (s.lookahead === 0) {\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n }\n }\n flush_pending(strm);\n if (strm.avail_out === 0) {\n s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */\n return Z_OK;\n }\n }\n }\n //Assert(strm->avail_out > 0, \"bug2\");\n //if (strm.avail_out <= 0) { throw new Error(\"bug2\");}\n\n if (flush !== Z_FINISH) { return Z_OK; }\n if (s.wrap <= 0) { return Z_STREAM_END; }\n\n /* Write the trailer */\n if (s.wrap === 2) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n put_byte(s, (strm.adler >> 16) & 0xff);\n put_byte(s, (strm.adler >> 24) & 0xff);\n put_byte(s, strm.total_in & 0xff);\n put_byte(s, (strm.total_in >> 8) & 0xff);\n put_byte(s, (strm.total_in >> 16) & 0xff);\n put_byte(s, (strm.total_in >> 24) & 0xff);\n }\n else\n {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n\n flush_pending(strm);\n /* If avail_out is zero, the application will call deflate again\n * to flush the rest.\n */\n if (s.wrap > 0) { s.wrap = -s.wrap; }\n /* write the trailer only once! */\n return s.pending !== 0 ? Z_OK : Z_STREAM_END;\n}\n\nfunction deflateEnd(strm) {\n var status;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n status = strm.state.status;\n if (status !== INIT_STATE &&\n status !== EXTRA_STATE &&\n status !== NAME_STATE &&\n status !== COMMENT_STATE &&\n status !== HCRC_STATE &&\n status !== BUSY_STATE &&\n status !== FINISH_STATE\n ) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.state = null;\n\n return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK;\n}\n\n\n/* =========================================================================\n * Initializes the compression dictionary from the given byte\n * sequence without producing any compressed output.\n */\nfunction deflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var s;\n var str, n;\n var wrap;\n var avail;\n var next;\n var input;\n var tmpDict;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n s = strm.state;\n wrap = s.wrap;\n\n if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {\n return Z_STREAM_ERROR;\n }\n\n /* when using zlib wrappers, compute Adler-32 for provided dictionary */\n if (wrap === 1) {\n /* adler32(strm->adler, dictionary, dictLength); */\n strm.adler = adler32(strm.adler, dictionary, dictLength, 0);\n }\n\n s.wrap = 0; /* avoid computing Adler-32 in read_buf */\n\n /* if dictionary would fill window, just replace the history */\n if (dictLength >= s.w_size) {\n if (wrap === 0) { /* already empty otherwise */\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n /* use the tail */\n // dictionary = dictionary.slice(dictLength - s.w_size);\n tmpDict = new utils.Buf8(s.w_size);\n utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0);\n dictionary = tmpDict;\n dictLength = s.w_size;\n }\n /* insert dictionary into window and hash */\n avail = strm.avail_in;\n next = strm.next_in;\n input = strm.input;\n strm.avail_in = dictLength;\n strm.next_in = 0;\n strm.input = dictionary;\n fill_window(s);\n while (s.lookahead >= MIN_MATCH) {\n str = s.strstart;\n n = s.lookahead - (MIN_MATCH - 1);\n do {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n\n s.head[s.ins_h] = str;\n str++;\n } while (--n);\n s.strstart = str;\n s.lookahead = MIN_MATCH - 1;\n fill_window(s);\n }\n s.strstart += s.lookahead;\n s.block_start = s.strstart;\n s.insert = s.lookahead;\n s.lookahead = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n strm.next_in = next;\n strm.input = input;\n strm.avail_in = avail;\n s.wrap = wrap;\n return Z_OK;\n}\n\n\nexports.deflateInit = deflateInit;\nexports.deflateInit2 = deflateInit2;\nexports.deflateReset = deflateReset;\nexports.deflateResetKeep = deflateResetKeep;\nexports.deflateSetHeader = deflateSetHeader;\nexports.deflate = deflate;\nexports.deflateEnd = deflateEnd;\nexports.deflateSetDictionary = deflateSetDictionary;\nexports.deflateInfo = 'pako deflate (from Nodeca project)';\n\n/* Not implemented\nexports.deflateBound = deflateBound;\nexports.deflateCopy = deflateCopy;\nexports.deflateParams = deflateParams;\nexports.deflatePending = deflatePending;\nexports.deflatePrime = deflatePrime;\nexports.deflateTune = deflateTune;\n*/\n","// String encode/decode helpers\n'use strict';\n\n\nvar utils = require('./common');\n\n\n// Quick check if we can use fast array to bin string conversion\n//\n// - apply(Array) can fail on Android 2.2\n// - apply(Uint8Array) can fail on iOS 5.1 Safary\n//\nvar STR_APPLY_OK = true;\nvar STR_APPLY_UIA_OK = true;\n\ntry { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; }\ntry { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }\n\n\n// Table with utf8 lengths (calculated by first byte of sequence)\n// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,\n// because max possible codepoint is 0x10ffff\nvar _utf8len = new utils.Buf8(256);\nfor (var q = 0; q < 256; q++) {\n _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);\n}\n_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start\n\n\n// convert string to array (typed, when possible)\nexports.string2buf = function (str) {\n var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;\n\n // count binary size\n for (m_pos = 0; m_pos < str_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;\n }\n\n // allocate buffer\n buf = new utils.Buf8(buf_len);\n\n // convert\n for (i = 0, m_pos = 0; i < buf_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n if (c < 0x80) {\n /* one byte */\n buf[i++] = c;\n } else if (c < 0x800) {\n /* two bytes */\n buf[i++] = 0xC0 | (c >>> 6);\n buf[i++] = 0x80 | (c & 0x3f);\n } else if (c < 0x10000) {\n /* three bytes */\n buf[i++] = 0xE0 | (c >>> 12);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n } else {\n /* four bytes */\n buf[i++] = 0xf0 | (c >>> 18);\n buf[i++] = 0x80 | (c >>> 12 & 0x3f);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n }\n }\n\n return buf;\n};\n\n// Helper (used in 2 places)\nfunction buf2binstring(buf, len) {\n // use fallback for big arrays to avoid stack overflow\n if (len < 65537) {\n if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) {\n return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len));\n }\n }\n\n var result = '';\n for (var i = 0; i < len; i++) {\n result += String.fromCharCode(buf[i]);\n }\n return result;\n}\n\n\n// Convert byte array to binary string\nexports.buf2binstring = function (buf) {\n return buf2binstring(buf, buf.length);\n};\n\n\n// Convert binary string (typed, when possible)\nexports.binstring2buf = function (str) {\n var buf = new utils.Buf8(str.length);\n for (var i = 0, len = buf.length; i < len; i++) {\n buf[i] = str.charCodeAt(i);\n }\n return buf;\n};\n\n\n// convert array to string\nexports.buf2string = function (buf, max) {\n var i, out, c, c_len;\n var len = max || buf.length;\n\n // Reserve max possible length (2 words per char)\n // NB: by unknown reasons, Array is significantly faster for\n // String.fromCharCode.apply than Uint16Array.\n var utf16buf = new Array(len * 2);\n\n for (out = 0, i = 0; i < len;) {\n c = buf[i++];\n // quick process ascii\n if (c < 0x80) { utf16buf[out++] = c; continue; }\n\n c_len = _utf8len[c];\n // skip 5 & 6 byte codes\n if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }\n\n // apply mask on first byte\n c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;\n // join the rest\n while (c_len > 1 && i < len) {\n c = (c << 6) | (buf[i++] & 0x3f);\n c_len--;\n }\n\n // terminated by end of string?\n if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }\n\n if (c < 0x10000) {\n utf16buf[out++] = c;\n } else {\n c -= 0x10000;\n utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);\n utf16buf[out++] = 0xdc00 | (c & 0x3ff);\n }\n }\n\n return buf2binstring(utf16buf, out);\n};\n\n\n// Calculate max possible position in utf8 buffer,\n// that will not break sequence. If that's not possible\n// - (very small limits) return max size as is.\n//\n// buf[] - utf8 bytes array\n// max - length limit (mandatory);\nexports.utf8border = function (buf, max) {\n var pos;\n\n max = max || buf.length;\n if (max > buf.length) { max = buf.length; }\n\n // go back from last position, until start of sequence found\n pos = max - 1;\n while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }\n\n // Fuckup - very small and broken sequence,\n // return max, because we should return something anyway.\n if (pos < 0) { return max; }\n\n // If we came to start of buffer - that means vuffer is too small,\n // return max too.\n if (pos === 0) { return max; }\n\n return (pos + _utf8len[buf[pos]] > max) ? pos : max;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction ZStream() {\n /* next input byte */\n this.input = null; // JS specific, because we have no pointers\n this.next_in = 0;\n /* number of bytes available at input */\n this.avail_in = 0;\n /* total number of input bytes read so far */\n this.total_in = 0;\n /* next output byte should be put there */\n this.output = null; // JS specific, because we have no pointers\n this.next_out = 0;\n /* remaining free space at output */\n this.avail_out = 0;\n /* total number of bytes output so far */\n this.total_out = 0;\n /* last error message, NULL if no error */\n this.msg = ''/*Z_NULL*/;\n /* not visible by applications */\n this.state = null;\n /* best guess about the data type: binary or text */\n this.data_type = 2/*Z_UNKNOWN*/;\n /* adler32 value of the uncompressed data */\n this.adler = 0;\n}\n\nmodule.exports = ZStream;\n","'use strict';\n\n\nvar zlib_deflate = require('./zlib/deflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\n\nvar toString = Object.prototype.toString;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nvar Z_NO_FLUSH = 0;\nvar Z_FINISH = 4;\n\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_SYNC_FLUSH = 2;\n\nvar Z_DEFAULT_COMPRESSION = -1;\n\nvar Z_DEFAULT_STRATEGY = 0;\n\nvar Z_DEFLATED = 8;\n\n/* ===========================================================================*/\n\n\n/**\n * class Deflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[deflate]],\n * [[deflateRaw]] and [[gzip]].\n **/\n\n/* internal\n * Deflate.chunks -> Array\n *\n * Chunks of output data, if [[Deflate#onData]] not overriden.\n **/\n\n/**\n * Deflate.result -> Uint8Array|Array\n *\n * Compressed result, generated by default [[Deflate#onData]]\n * and [[Deflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Deflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Deflate.err -> Number\n *\n * Error code after deflate finished. 0 (Z_OK) on success.\n * You will not need it in real life, because deflate errors\n * are possible only on wrong options or bad `onData` / `onEnd`\n * custom handlers.\n **/\n\n/**\n * Deflate.msg -> String\n *\n * Error message, if [[Deflate.err]] != 0\n **/\n\n\n/**\n * new Deflate(options)\n * - options (Object): zlib deflate options.\n *\n * Creates new deflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `level`\n * - `windowBits`\n * - `memLevel`\n * - `strategy`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw deflate\n * - `gzip` (Boolean) - create gzip wrapper\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n * - `header` (Object) - custom header for gzip\n * - `text` (Boolean) - true if compressed data believed to be text\n * - `time` (Number) - modification time, unix timestamp\n * - `os` (Number) - operation system code\n * - `extra` (Array) - array of bytes with extra data (max 65536)\n * - `name` (String) - file name (binary string)\n * - `comment` (String) - comment (binary string)\n * - `hcrc` (Boolean) - true if header crc should be added\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var deflate = new pako.Deflate({ level: 3});\n *\n * deflate.push(chunk1, false);\n * deflate.push(chunk2, true); // true -> last chunk\n *\n * if (deflate.err) { throw new Error(deflate.err); }\n *\n * console.log(deflate.result);\n * ```\n **/\nfunction Deflate(options) {\n if (!(this instanceof Deflate)) return new Deflate(options);\n\n this.options = utils.assign({\n level: Z_DEFAULT_COMPRESSION,\n method: Z_DEFLATED,\n chunkSize: 16384,\n windowBits: 15,\n memLevel: 8,\n strategy: Z_DEFAULT_STRATEGY,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n if (opt.raw && (opt.windowBits > 0)) {\n opt.windowBits = -opt.windowBits;\n }\n\n else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {\n opt.windowBits += 16;\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_deflate.deflateInit2(\n this.strm,\n opt.level,\n opt.method,\n opt.windowBits,\n opt.memLevel,\n opt.strategy\n );\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n if (opt.header) {\n zlib_deflate.deflateSetHeader(this.strm, opt.header);\n }\n\n if (opt.dictionary) {\n var dict;\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n // If we need to compress text, change encoding to utf8.\n dict = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(opt.dictionary);\n } else {\n dict = opt.dictionary;\n }\n\n status = zlib_deflate.deflateSetDictionary(this.strm, dict);\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n this._dict_set = true;\n }\n}\n\n/**\n * Deflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be\n * converted to utf8 byte sequence.\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.\n *\n * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with\n * new compressed chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the compression context.\n *\n * On fail call [[Deflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * array format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nDeflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var status, _mode;\n\n if (this.ended) { return false; }\n\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // If we need to compress text, change encoding to utf8.\n strm.input = strings.string2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n status = zlib_deflate.deflate(strm, _mode); /* no bad return value */\n\n if (status !== Z_STREAM_END && status !== Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) {\n if (this.options.to === 'string') {\n this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out)));\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END);\n\n // Finalize on the last chunk.\n if (_mode === Z_FINISH) {\n status = zlib_deflate.deflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === Z_SYNC_FLUSH) {\n this.onEnd(Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Deflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): ouput data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nDeflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Deflate#onEnd(status) -> Void\n * - status (Number): deflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called once after you tell deflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nDeflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === Z_OK) {\n if (this.options.to === 'string') {\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * deflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * Compress `data` with deflate algorithm and `options`.\n *\n * Supported options are:\n *\n * - level\n * - windowBits\n * - memLevel\n * - strategy\n * - dictionary\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , data = Uint8Array([1,2,3,4,5,6,7,8,9]);\n *\n * console.log(pako.deflate(data));\n * ```\n **/\nfunction deflate(input, options) {\n var deflator = new Deflate(options);\n\n deflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (deflator.err) { throw deflator.msg || msg[deflator.err]; }\n\n return deflator.result;\n}\n\n\n/**\n * deflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction deflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return deflate(input, options);\n}\n\n\n/**\n * gzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but create gzip wrapper instead of\n * deflate one.\n **/\nfunction gzip(input, options) {\n options = options || {};\n options.gzip = true;\n return deflate(input, options);\n}\n\n\nexports.Deflate = Deflate;\nexports.deflate = deflate;\nexports.deflateRaw = deflateRaw;\nexports.gzip = gzip;\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// See state defs from inflate.js\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\n\n/*\n Decode literal, length, and distance codes and write out the resulting\n literal and match bytes until either not enough input or output is\n available, an end-of-block is encountered, or a data error is encountered.\n When large enough input and output buffers are supplied to inflate(), for\n example, a 16K input buffer and a 64K output buffer, more than 95% of the\n inflate execution time is spent in this routine.\n\n Entry assumptions:\n\n state.mode === LEN\n strm.avail_in >= 6\n strm.avail_out >= 258\n start >= strm.avail_out\n state.bits < 8\n\n On return, state.mode is one of:\n\n LEN -- ran out of enough output space or enough available input\n TYPE -- reached end of block code, inflate() to interpret next block\n BAD -- error in block data\n\n Notes:\n\n - The maximum input bits used by a length/distance pair is 15 bits for the\n length code, 5 bits for the length extra, 15 bits for the distance code,\n and 13 bits for the distance extra. This totals 48 bits, or six bytes.\n Therefore if strm.avail_in >= 6, then there is enough input to avoid\n checking for available input while decoding.\n\n - The maximum bytes that a single length/distance pair can output is 258\n bytes, which is the maximum length that can be coded. inflate_fast()\n requires strm.avail_out >= 258 for each loop to avoid checking for\n output space.\n */\nmodule.exports = function inflate_fast(strm, start) {\n var state;\n var _in; /* local strm.input */\n var last; /* have enough input while in < last */\n var _out; /* local strm.output */\n var beg; /* inflate()'s initial strm.output */\n var end; /* while out < end, enough space available */\n//#ifdef INFLATE_STRICT\n var dmax; /* maximum distance from zlib header */\n//#endif\n var wsize; /* window size or zero if not using window */\n var whave; /* valid bytes in the window */\n var wnext; /* window write index */\n // Use `s_window` instead `window`, avoid conflict with instrumentation tools\n var s_window; /* allocated sliding window, if wsize != 0 */\n var hold; /* local strm.hold */\n var bits; /* local strm.bits */\n var lcode; /* local strm.lencode */\n var dcode; /* local strm.distcode */\n var lmask; /* mask for first level of length codes */\n var dmask; /* mask for first level of distance codes */\n var here; /* retrieved table entry */\n var op; /* code bits, operation, extra bits, or */\n /* window position, window bytes to copy */\n var len; /* match length, unused bytes */\n var dist; /* match distance */\n var from; /* where to copy match from */\n var from_source;\n\n\n var input, output; // JS specific, because we have no pointers\n\n /* copy state to local variables */\n state = strm.state;\n //here = state.here;\n _in = strm.next_in;\n input = strm.input;\n last = _in + (strm.avail_in - 5);\n _out = strm.next_out;\n output = strm.output;\n beg = _out - (start - strm.avail_out);\n end = _out + (strm.avail_out - 257);\n//#ifdef INFLATE_STRICT\n dmax = state.dmax;\n//#endif\n wsize = state.wsize;\n whave = state.whave;\n wnext = state.wnext;\n s_window = state.window;\n hold = state.hold;\n bits = state.bits;\n lcode = state.lencode;\n dcode = state.distcode;\n lmask = (1 << state.lenbits) - 1;\n dmask = (1 << state.distbits) - 1;\n\n\n /* decode literals and length/distances until end-of-block or not enough\n input data or output space */\n\n top:\n do {\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n\n here = lcode[hold & lmask];\n\n dolen:\n for (;;) { // Goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n if (op === 0) { /* literal */\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n output[_out++] = here & 0xffff/*here.val*/;\n }\n else if (op & 16) { /* length base */\n len = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (op) {\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n len += hold & ((1 << op) - 1);\n hold >>>= op;\n bits -= op;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", len));\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n here = dcode[hold & dmask];\n\n dodist:\n for (;;) { // goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n\n if (op & 16) { /* distance base */\n dist = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n }\n dist += hold & ((1 << op) - 1);\n//#ifdef INFLATE_STRICT\n if (dist > dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n//#endif\n hold >>>= op;\n bits -= op;\n //Tracevv((stderr, \"inflate: distance %u\\n\", dist));\n op = _out - beg; /* max distance in output */\n if (dist > op) { /* see if copy from window */\n op = dist - op; /* distance back in window */\n if (op > whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n\n// (!) This block is disabled in zlib defailts,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// if (len <= op - whave) {\n// do {\n// output[_out++] = 0;\n// } while (--len);\n// continue top;\n// }\n// len -= op - whave;\n// do {\n// output[_out++] = 0;\n// } while (--op > whave);\n// if (op === 0) {\n// from = _out - dist;\n// do {\n// output[_out++] = output[from++];\n// } while (--len);\n// continue top;\n// }\n//#endif\n }\n from = 0; // window index\n from_source = s_window;\n if (wnext === 0) { /* very common case */\n from += wsize - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n else if (wnext < op) { /* wrap around window */\n from += wsize + wnext - op;\n op -= wnext;\n if (op < len) { /* some from end of window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = 0;\n if (wnext < len) { /* some from start of window */\n op = wnext;\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n }\n else { /* contiguous in window */\n from += wnext - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n while (len > 2) {\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n len -= 3;\n }\n if (len) {\n output[_out++] = from_source[from++];\n if (len > 1) {\n output[_out++] = from_source[from++];\n }\n }\n }\n else {\n from = _out - dist; /* copy direct from output */\n do { /* minimum length is three */\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n len -= 3;\n } while (len > 2);\n if (len) {\n output[_out++] = output[from++];\n if (len > 1) {\n output[_out++] = output[from++];\n }\n }\n }\n }\n else if ((op & 64) === 0) { /* 2nd level distance code */\n here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dodist;\n }\n else {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n }\n else if ((op & 64) === 0) { /* 2nd level length code */\n here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dolen;\n }\n else if (op & 32) { /* end-of-block */\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.mode = TYPE;\n break top;\n }\n else {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n } while (_in < last && _out < end);\n\n /* return unused bytes (on entry, bits < 8, so in won't go too far back) */\n len = bits >> 3;\n _in -= len;\n bits -= len << 3;\n hold &= (1 << bits) - 1;\n\n /* update state and return */\n strm.next_in = _in;\n strm.next_out = _out;\n strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last));\n strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end));\n state.hold = hold;\n state.bits = bits;\n return;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\n\nvar MAXBITS = 15;\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\nvar lbase = [ /* Length codes 257..285 base */\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,\n 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0\n];\n\nvar lext = [ /* Length codes 257..285 extra */\n 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,\n 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78\n];\n\nvar dbase = [ /* Distance codes 0..29 base */\n 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,\n 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,\n 8193, 12289, 16385, 24577, 0, 0\n];\n\nvar dext = [ /* Distance codes 0..29 extra */\n 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,\n 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,\n 28, 28, 29, 29, 64, 64\n];\n\nmodule.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts)\n{\n var bits = opts.bits;\n //here = opts.here; /* table entry for duplication */\n\n var len = 0; /* a code's length in bits */\n var sym = 0; /* index of code symbols */\n var min = 0, max = 0; /* minimum and maximum code lengths */\n var root = 0; /* number of index bits for root table */\n var curr = 0; /* number of index bits for current table */\n var drop = 0; /* code bits to drop for sub-table */\n var left = 0; /* number of prefix codes available */\n var used = 0; /* code entries in table used */\n var huff = 0; /* Huffman code */\n var incr; /* for incrementing code, index */\n var fill; /* index for replicating entries */\n var low; /* low bits for current root entry */\n var mask; /* mask for low root bits */\n var next; /* next available space in table */\n var base = null; /* base value table to use */\n var base_index = 0;\n// var shoextra; /* extra bits table to use */\n var end; /* use base and extra for symbol > end */\n var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */\n var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */\n var extra = null;\n var extra_index = 0;\n\n var here_bits, here_op, here_val;\n\n /*\n Process a set of code lengths to create a canonical Huffman code. The\n code lengths are lens[0..codes-1]. Each length corresponds to the\n symbols 0..codes-1. The Huffman code is generated by first sorting the\n symbols by length from short to long, and retaining the symbol order\n for codes with equal lengths. Then the code starts with all zero bits\n for the first code of the shortest length, and the codes are integer\n increments for the same length, and zeros are appended as the length\n increases. For the deflate format, these bits are stored backwards\n from their more natural integer increment ordering, and so when the\n decoding tables are built in the large loop below, the integer codes\n are incremented backwards.\n\n This routine assumes, but does not check, that all of the entries in\n lens[] are in the range 0..MAXBITS. The caller must assure this.\n 1..MAXBITS is interpreted as that code length. zero means that that\n symbol does not occur in this code.\n\n The codes are sorted by computing a count of codes for each length,\n creating from that a table of starting indices for each length in the\n sorted table, and then entering the symbols in order in the sorted\n table. The sorted table is work[], with that space being provided by\n the caller.\n\n The length counts are used for other purposes as well, i.e. finding\n the minimum and maximum length codes, determining if there are any\n codes at all, checking for a valid set of lengths, and looking ahead\n at length counts to determine sub-table sizes when building the\n decoding tables.\n */\n\n /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */\n for (len = 0; len <= MAXBITS; len++) {\n count[len] = 0;\n }\n for (sym = 0; sym < codes; sym++) {\n count[lens[lens_index + sym]]++;\n }\n\n /* bound code lengths, force root to be within code lengths */\n root = bits;\n for (max = MAXBITS; max >= 1; max--) {\n if (count[max] !== 0) { break; }\n }\n if (root > max) {\n root = max;\n }\n if (max === 0) { /* no symbols to code at all */\n //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */\n //table.bits[opts.table_index] = 1; //here.bits = (var char)1;\n //table.val[opts.table_index++] = 0; //here.val = (var short)0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n\n //table.op[opts.table_index] = 64;\n //table.bits[opts.table_index] = 1;\n //table.val[opts.table_index++] = 0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n opts.bits = 1;\n return 0; /* no symbols, but wait for decoding to report error */\n }\n for (min = 1; min < max; min++) {\n if (count[min] !== 0) { break; }\n }\n if (root < min) {\n root = min;\n }\n\n /* check for an over-subscribed or incomplete set of lengths */\n left = 1;\n for (len = 1; len <= MAXBITS; len++) {\n left <<= 1;\n left -= count[len];\n if (left < 0) {\n return -1;\n } /* over-subscribed */\n }\n if (left > 0 && (type === CODES || max !== 1)) {\n return -1; /* incomplete set */\n }\n\n /* generate offsets into symbol table for each length for sorting */\n offs[1] = 0;\n for (len = 1; len < MAXBITS; len++) {\n offs[len + 1] = offs[len] + count[len];\n }\n\n /* sort symbols by length, by symbol order within each length */\n for (sym = 0; sym < codes; sym++) {\n if (lens[lens_index + sym] !== 0) {\n work[offs[lens[lens_index + sym]]++] = sym;\n }\n }\n\n /*\n Create and fill in decoding tables. In this loop, the table being\n filled is at next and has curr index bits. The code being used is huff\n with length len. That code is converted to an index by dropping drop\n bits off of the bottom. For codes where len is less than drop + curr,\n those top drop + curr - len bits are incremented through all values to\n fill the table with replicated entries.\n\n root is the number of index bits for the root table. When len exceeds\n root, sub-tables are created pointed to by the root entry with an index\n of the low root bits of huff. This is saved in low to check for when a\n new sub-table should be started. drop is zero when the root table is\n being filled, and drop is root when sub-tables are being filled.\n\n When a new sub-table is needed, it is necessary to look ahead in the\n code lengths to determine what size sub-table is needed. The length\n counts are used for this, and so count[] is decremented as codes are\n entered in the tables.\n\n used keeps track of how many table entries have been allocated from the\n provided *table space. It is checked for LENS and DIST tables against\n the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in\n the initial root table size constants. See the comments in inftrees.h\n for more information.\n\n sym increments through all symbols, and the loop terminates when\n all codes of length max, i.e. all codes, have been processed. This\n routine permits incomplete codes, so another loop after this one fills\n in the rest of the decoding tables with invalid code markers.\n */\n\n /* set up for code type */\n // poor man optimization - use if-else instead of switch,\n // to avoid deopts in old v8\n if (type === CODES) {\n base = extra = work; /* dummy value--not used */\n end = 19;\n\n } else if (type === LENS) {\n base = lbase;\n base_index -= 257;\n extra = lext;\n extra_index -= 257;\n end = 256;\n\n } else { /* DISTS */\n base = dbase;\n extra = dext;\n end = -1;\n }\n\n /* initialize opts for loop */\n huff = 0; /* starting code */\n sym = 0; /* starting code symbol */\n len = min; /* starting code length */\n next = table_index; /* current table to fill in */\n curr = root; /* current table index bits */\n drop = 0; /* current bits to drop from code for index */\n low = -1; /* trigger new sub-table when len > root */\n used = 1 << root; /* use root table entries */\n mask = used - 1; /* mask for comparing low */\n\n /* check available table space */\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* process all codes and make table entries */\n for (;;) {\n /* create table entry */\n here_bits = len - drop;\n if (work[sym] < end) {\n here_op = 0;\n here_val = work[sym];\n }\n else if (work[sym] > end) {\n here_op = extra[extra_index + work[sym]];\n here_val = base[base_index + work[sym]];\n }\n else {\n here_op = 32 + 64; /* end of block */\n here_val = 0;\n }\n\n /* replicate for those indices with low len bits equal to huff */\n incr = 1 << (len - drop);\n fill = 1 << curr;\n min = fill; /* save offset to next table */\n do {\n fill -= incr;\n table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0;\n } while (fill !== 0);\n\n /* backwards increment the len-bit code huff */\n incr = 1 << (len - 1);\n while (huff & incr) {\n incr >>= 1;\n }\n if (incr !== 0) {\n huff &= incr - 1;\n huff += incr;\n } else {\n huff = 0;\n }\n\n /* go to next symbol, update count, len */\n sym++;\n if (--count[len] === 0) {\n if (len === max) { break; }\n len = lens[lens_index + work[sym]];\n }\n\n /* create new sub-table if needed */\n if (len > root && (huff & mask) !== low) {\n /* if first time, transition to sub-tables */\n if (drop === 0) {\n drop = root;\n }\n\n /* increment past last table */\n next += min; /* here min is 1 << curr */\n\n /* determine length of next table */\n curr = len - drop;\n left = 1 << curr;\n while (curr + drop < max) {\n left -= count[curr + drop];\n if (left <= 0) { break; }\n curr++;\n left <<= 1;\n }\n\n /* check for enough space */\n used += 1 << curr;\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* point entry in root table to sub-table */\n low = huff & mask;\n /*table.op[low] = curr;\n table.bits[low] = root;\n table.val[low] = next - opts.table_index;*/\n table[low] = (root << 24) | (curr << 16) | (next - table_index) |0;\n }\n }\n\n /* fill in remaining table entry if code is incomplete (guaranteed to have\n at most one remaining entry, since if the code is incomplete, the\n maximum code length that was allowed to get this far is one bit) */\n if (huff !== 0) {\n //table.op[next + huff] = 64; /* invalid code marker */\n //table.bits[next + huff] = len - drop;\n //table.val[next + huff] = 0;\n table[next + huff] = ((len - drop) << 24) | (64 << 16) |0;\n }\n\n /* set return parameters */\n //opts.table_index += used;\n opts.bits = root;\n return 0;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar inflate_fast = require('./inffast');\nvar inflate_table = require('./inftrees');\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\n//var Z_NO_FLUSH = 0;\n//var Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\n//var Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\nvar Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\nvar Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n\n/* STATES ====================================================================*/\n/* ===========================================================================*/\n\n\nvar HEAD = 1; /* i: waiting for magic header */\nvar FLAGS = 2; /* i: waiting for method and flags (gzip) */\nvar TIME = 3; /* i: waiting for modification time (gzip) */\nvar OS = 4; /* i: waiting for extra flags and operating system (gzip) */\nvar EXLEN = 5; /* i: waiting for extra length (gzip) */\nvar EXTRA = 6; /* i: waiting for extra bytes (gzip) */\nvar NAME = 7; /* i: waiting for end of file name (gzip) */\nvar COMMENT = 8; /* i: waiting for end of comment (gzip) */\nvar HCRC = 9; /* i: waiting for header crc (gzip) */\nvar DICTID = 10; /* i: waiting for dictionary check value */\nvar DICT = 11; /* waiting for inflateSetDictionary() call */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\nvar TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */\nvar STORED = 14; /* i: waiting for stored size (length and complement) */\nvar COPY_ = 15; /* i/o: same as COPY below, but only first time in */\nvar COPY = 16; /* i/o: waiting for input or output to copy stored block */\nvar TABLE = 17; /* i: waiting for dynamic block table lengths */\nvar LENLENS = 18; /* i: waiting for code length code lengths */\nvar CODELENS = 19; /* i: waiting for length/lit and distance code lengths */\nvar LEN_ = 20; /* i: same as LEN below, but only first time in */\nvar LEN = 21; /* i: waiting for length/lit/eob code */\nvar LENEXT = 22; /* i: waiting for length extra bits */\nvar DIST = 23; /* i: waiting for distance code */\nvar DISTEXT = 24; /* i: waiting for distance extra bits */\nvar MATCH = 25; /* o: waiting for output space to copy string */\nvar LIT = 26; /* o: waiting for output space to write literal */\nvar CHECK = 27; /* i: waiting for 32-bit check value */\nvar LENGTH = 28; /* i: waiting for 32-bit length (gzip) */\nvar DONE = 29; /* finished check, done -- remain here until reset */\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar MEM = 31; /* got an inflate() memory error -- remain here until reset */\nvar SYNC = 32; /* looking for synchronization bytes to restart inflate() */\n\n/* ===========================================================================*/\n\n\n\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_WBITS = MAX_WBITS;\n\n\nfunction zswap32(q) {\n return (((q >>> 24) & 0xff) +\n ((q >>> 8) & 0xff00) +\n ((q & 0xff00) << 8) +\n ((q & 0xff) << 24));\n}\n\n\nfunction InflateState() {\n this.mode = 0; /* current inflate mode */\n this.last = false; /* true if processing last block */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.havedict = false; /* true if dictionary provided */\n this.flags = 0; /* gzip header method and flags (0 if zlib) */\n this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */\n this.check = 0; /* protected copy of check value */\n this.total = 0; /* protected copy of output count */\n // TODO: may be {}\n this.head = null; /* where to save gzip header information */\n\n /* sliding window */\n this.wbits = 0; /* log base 2 of requested window size */\n this.wsize = 0; /* window size or zero if not using window */\n this.whave = 0; /* valid bytes in the window */\n this.wnext = 0; /* window write index */\n this.window = null; /* allocated sliding window, if needed */\n\n /* bit accumulator */\n this.hold = 0; /* input bit accumulator */\n this.bits = 0; /* number of bits in \"in\" */\n\n /* for string and stored block copying */\n this.length = 0; /* literal or length of data to copy */\n this.offset = 0; /* distance back to copy string from */\n\n /* for table and code decoding */\n this.extra = 0; /* extra bits needed */\n\n /* fixed and dynamic code tables */\n this.lencode = null; /* starting table for length/literal codes */\n this.distcode = null; /* starting table for distance codes */\n this.lenbits = 0; /* index bits for lencode */\n this.distbits = 0; /* index bits for distcode */\n\n /* dynamic table building */\n this.ncode = 0; /* number of code length code lengths */\n this.nlen = 0; /* number of length code lengths */\n this.ndist = 0; /* number of distance code lengths */\n this.have = 0; /* number of code lengths in lens[] */\n this.next = null; /* next available space in codes[] */\n\n this.lens = new utils.Buf16(320); /* temporary storage for code lengths */\n this.work = new utils.Buf16(288); /* work area for code table building */\n\n /*\n because we don't have pointers in js, we use lencode and distcode directly\n as buffers so we don't need codes\n */\n //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */\n this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */\n this.distdyn = null; /* dynamic table for distance codes (JS specific) */\n this.sane = 0; /* if false, allow invalid distance too far */\n this.back = 0; /* bits back of last unprocessed length/lit */\n this.was = 0; /* initial length of match */\n}\n\nfunction inflateResetKeep(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n strm.total_in = strm.total_out = state.total = 0;\n strm.msg = ''; /*Z_NULL*/\n if (state.wrap) { /* to support ill-conceived Java test suite */\n strm.adler = state.wrap & 1;\n }\n state.mode = HEAD;\n state.last = 0;\n state.havedict = 0;\n state.dmax = 32768;\n state.head = null/*Z_NULL*/;\n state.hold = 0;\n state.bits = 0;\n //state.lencode = state.distcode = state.next = state.codes;\n state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS);\n state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS);\n\n state.sane = 1;\n state.back = -1;\n //Tracev((stderr, \"inflate: reset\\n\"));\n return Z_OK;\n}\n\nfunction inflateReset(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n state.wsize = 0;\n state.whave = 0;\n state.wnext = 0;\n return inflateResetKeep(strm);\n\n}\n\nfunction inflateReset2(strm, windowBits) {\n var wrap;\n var state;\n\n /* get the state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n /* extract wrap request from windowBits parameter */\n if (windowBits < 0) {\n wrap = 0;\n windowBits = -windowBits;\n }\n else {\n wrap = (windowBits >> 4) + 1;\n if (windowBits < 48) {\n windowBits &= 15;\n }\n }\n\n /* set number of window bits, free window if different */\n if (windowBits && (windowBits < 8 || windowBits > 15)) {\n return Z_STREAM_ERROR;\n }\n if (state.window !== null && state.wbits !== windowBits) {\n state.window = null;\n }\n\n /* update state and reset the rest of it */\n state.wrap = wrap;\n state.wbits = windowBits;\n return inflateReset(strm);\n}\n\nfunction inflateInit2(strm, windowBits) {\n var ret;\n var state;\n\n if (!strm) { return Z_STREAM_ERROR; }\n //strm.msg = Z_NULL; /* in case we return an error */\n\n state = new InflateState();\n\n //if (state === Z_NULL) return Z_MEM_ERROR;\n //Tracev((stderr, \"inflate: allocated\\n\"));\n strm.state = state;\n state.window = null/*Z_NULL*/;\n ret = inflateReset2(strm, windowBits);\n if (ret !== Z_OK) {\n strm.state = null/*Z_NULL*/;\n }\n return ret;\n}\n\nfunction inflateInit(strm) {\n return inflateInit2(strm, DEF_WBITS);\n}\n\n\n/*\n Return state with length and distance decoding tables and index sizes set to\n fixed code decoding. Normally this returns fixed tables from inffixed.h.\n If BUILDFIXED is defined, then instead this routine builds the tables the\n first time it's called, and returns those tables the first time and\n thereafter. This reduces the size of the code by about 2K bytes, in\n exchange for a little execution time. However, BUILDFIXED should not be\n used for threaded applications, since the rewriting of the tables and virgin\n may not be thread-safe.\n */\nvar virgin = true;\n\nvar lenfix, distfix; // We have no pointers in JS, so keep tables separate\n\nfunction fixedtables(state) {\n /* build fixed huffman tables if first call (may not be thread safe) */\n if (virgin) {\n var sym;\n\n lenfix = new utils.Buf32(512);\n distfix = new utils.Buf32(32);\n\n /* literal/length table */\n sym = 0;\n while (sym < 144) { state.lens[sym++] = 8; }\n while (sym < 256) { state.lens[sym++] = 9; }\n while (sym < 280) { state.lens[sym++] = 7; }\n while (sym < 288) { state.lens[sym++] = 8; }\n\n inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 });\n\n /* distance table */\n sym = 0;\n while (sym < 32) { state.lens[sym++] = 5; }\n\n inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 });\n\n /* do this just once */\n virgin = false;\n }\n\n state.lencode = lenfix;\n state.lenbits = 9;\n state.distcode = distfix;\n state.distbits = 5;\n}\n\n\n/*\n Update the window with the last wsize (normally 32K) bytes written before\n returning. If window does not exist yet, create it. This is only called\n when a window is already in use, or when output has been written during this\n inflate call, but the end of the deflate stream has not been reached yet.\n It is also called to create a window for dictionary data when a dictionary\n is loaded.\n\n Providing output buffers larger than 32K to inflate() should provide a speed\n advantage, since only the last 32K of output is copied to the sliding window\n upon return from inflate(), and since all distances after the first 32K of\n output will fall in the output data, making match copies simpler and faster.\n The advantage may be dependent on the size of the processor's data caches.\n */\nfunction updatewindow(strm, src, end, copy) {\n var dist;\n var state = strm.state;\n\n /* if it hasn't been done already, allocate space for the window */\n if (state.window === null) {\n state.wsize = 1 << state.wbits;\n state.wnext = 0;\n state.whave = 0;\n\n state.window = new utils.Buf8(state.wsize);\n }\n\n /* copy state->wsize or less output bytes into the circular window */\n if (copy >= state.wsize) {\n utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0);\n state.wnext = 0;\n state.whave = state.wsize;\n }\n else {\n dist = state.wsize - state.wnext;\n if (dist > copy) {\n dist = copy;\n }\n //zmemcpy(state->window + state->wnext, end - copy, dist);\n utils.arraySet(state.window, src, end - copy, dist, state.wnext);\n copy -= dist;\n if (copy) {\n //zmemcpy(state->window, end - copy, copy);\n utils.arraySet(state.window, src, end - copy, copy, 0);\n state.wnext = copy;\n state.whave = state.wsize;\n }\n else {\n state.wnext += dist;\n if (state.wnext === state.wsize) { state.wnext = 0; }\n if (state.whave < state.wsize) { state.whave += dist; }\n }\n }\n return 0;\n}\n\nfunction inflate(strm, flush) {\n var state;\n var input, output; // input/output buffers\n var next; /* next input INDEX */\n var put; /* next output INDEX */\n var have, left; /* available input and output */\n var hold; /* bit buffer */\n var bits; /* bits in bit buffer */\n var _in, _out; /* save starting available input and output */\n var copy; /* number of stored or match bytes to copy */\n var from; /* where to copy match bytes from */\n var from_source;\n var here = 0; /* current decoding table entry */\n var here_bits, here_op, here_val; // paked \"here\" denormalized (JS specific)\n //var last; /* parent table entry */\n var last_bits, last_op, last_val; // paked \"last\" denormalized (JS specific)\n var len; /* length to copy for repeats, bits to drop */\n var ret; /* return code */\n var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */\n var opts;\n\n var n; // temporary var for NEED_BITS\n\n var order = /* permutation of code lengths */\n [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];\n\n\n if (!strm || !strm.state || !strm.output ||\n (!strm.input && strm.avail_in !== 0)) {\n return Z_STREAM_ERROR;\n }\n\n state = strm.state;\n if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */\n\n\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n _in = have;\n _out = left;\n ret = Z_OK;\n\n inf_leave: // goto emulation\n for (;;) {\n switch (state.mode) {\n case HEAD:\n if (state.wrap === 0) {\n state.mode = TYPEDO;\n break;\n }\n //=== NEEDBITS(16);\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */\n state.check = 0/*crc32(0L, Z_NULL, 0)*/;\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = FLAGS;\n break;\n }\n state.flags = 0; /* expect zlib header */\n if (state.head) {\n state.head.done = false;\n }\n if (!(state.wrap & 1) || /* check if zlib header allowed */\n (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) {\n strm.msg = 'incorrect header check';\n state.mode = BAD;\n break;\n }\n if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n len = (hold & 0x0f)/*BITS(4)*/ + 8;\n if (state.wbits === 0) {\n state.wbits = len;\n }\n else if (len > state.wbits) {\n strm.msg = 'invalid window size';\n state.mode = BAD;\n break;\n }\n state.dmax = 1 << len;\n //Tracev((stderr, \"inflate: zlib header ok\\n\"));\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = hold & 0x200 ? DICTID : TYPE;\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n break;\n case FLAGS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.flags = hold;\n if ((state.flags & 0xff) !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n if (state.flags & 0xe000) {\n strm.msg = 'unknown header flags set';\n state.mode = BAD;\n break;\n }\n if (state.head) {\n state.head.text = ((hold >> 8) & 1);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = TIME;\n /* falls through */\n case TIME:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.time = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC4(state.check, hold)\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n hbuf[2] = (hold >>> 16) & 0xff;\n hbuf[3] = (hold >>> 24) & 0xff;\n state.check = crc32(state.check, hbuf, 4, 0);\n //===\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = OS;\n /* falls through */\n case OS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.xflags = (hold & 0xff);\n state.head.os = (hold >> 8);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = EXLEN;\n /* falls through */\n case EXLEN:\n if (state.flags & 0x0400) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length = hold;\n if (state.head) {\n state.head.extra_len = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n else if (state.head) {\n state.head.extra = null/*Z_NULL*/;\n }\n state.mode = EXTRA;\n /* falls through */\n case EXTRA:\n if (state.flags & 0x0400) {\n copy = state.length;\n if (copy > have) { copy = have; }\n if (copy) {\n if (state.head) {\n len = state.head.extra_len - state.length;\n if (!state.head.extra) {\n // Use untyped array for more conveniend processing later\n state.head.extra = new Array(state.head.extra_len);\n }\n utils.arraySet(\n state.head.extra,\n input,\n next,\n // extra field is limited to 65536 bytes\n // - no need for additional size check\n copy,\n /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/\n len\n );\n //zmemcpy(state.head.extra + len, next,\n // len + copy > state.head.extra_max ?\n // state.head.extra_max - len : copy);\n }\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n state.length -= copy;\n }\n if (state.length) { break inf_leave; }\n }\n state.length = 0;\n state.mode = NAME;\n /* falls through */\n case NAME:\n if (state.flags & 0x0800) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n // TODO: 2 or 1 bytes?\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.name_max*/)) {\n state.head.name += String.fromCharCode(len);\n }\n } while (len && copy < have);\n\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.name = null;\n }\n state.length = 0;\n state.mode = COMMENT;\n /* falls through */\n case COMMENT:\n if (state.flags & 0x1000) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.comm_max*/)) {\n state.head.comment += String.fromCharCode(len);\n }\n } while (len && copy < have);\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.comment = null;\n }\n state.mode = HCRC;\n /* falls through */\n case HCRC:\n if (state.flags & 0x0200) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.check & 0xffff)) {\n strm.msg = 'header crc mismatch';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n if (state.head) {\n state.head.hcrc = ((state.flags >> 9) & 1);\n state.head.done = true;\n }\n strm.adler = state.check = 0;\n state.mode = TYPE;\n break;\n case DICTID:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n strm.adler = state.check = zswap32(hold);\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = DICT;\n /* falls through */\n case DICT:\n if (state.havedict === 0) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n return Z_NEED_DICT;\n }\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = TYPE;\n /* falls through */\n case TYPE:\n if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case TYPEDO:\n if (state.last) {\n //--- BYTEBITS() ---//\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n state.mode = CHECK;\n break;\n }\n //=== NEEDBITS(3); */\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.last = (hold & 0x01)/*BITS(1)*/;\n //--- DROPBITS(1) ---//\n hold >>>= 1;\n bits -= 1;\n //---//\n\n switch ((hold & 0x03)/*BITS(2)*/) {\n case 0: /* stored block */\n //Tracev((stderr, \"inflate: stored block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = STORED;\n break;\n case 1: /* fixed block */\n fixedtables(state);\n //Tracev((stderr, \"inflate: fixed codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = LEN_; /* decode codes */\n if (flush === Z_TREES) {\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break inf_leave;\n }\n break;\n case 2: /* dynamic block */\n //Tracev((stderr, \"inflate: dynamic codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = TABLE;\n break;\n case 3:\n strm.msg = 'invalid block type';\n state.mode = BAD;\n }\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break;\n case STORED:\n //--- BYTEBITS() ---// /* go to byte boundary */\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) {\n strm.msg = 'invalid stored block lengths';\n state.mode = BAD;\n break;\n }\n state.length = hold & 0xffff;\n //Tracev((stderr, \"inflate: stored length %u\\n\",\n // state.length));\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = COPY_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case COPY_:\n state.mode = COPY;\n /* falls through */\n case COPY:\n copy = state.length;\n if (copy) {\n if (copy > have) { copy = have; }\n if (copy > left) { copy = left; }\n if (copy === 0) { break inf_leave; }\n //--- zmemcpy(put, next, copy); ---\n utils.arraySet(output, input, next, copy, put);\n //---//\n have -= copy;\n next += copy;\n left -= copy;\n put += copy;\n state.length -= copy;\n break;\n }\n //Tracev((stderr, \"inflate: stored end\\n\"));\n state.mode = TYPE;\n break;\n case TABLE:\n //=== NEEDBITS(14); */\n while (bits < 14) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4;\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n//#ifndef PKZIP_BUG_WORKAROUND\n if (state.nlen > 286 || state.ndist > 30) {\n strm.msg = 'too many length or distance symbols';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracev((stderr, \"inflate: table sizes ok\\n\"));\n state.have = 0;\n state.mode = LENLENS;\n /* falls through */\n case LENLENS:\n while (state.have < state.ncode) {\n //=== NEEDBITS(3);\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.lens[order[state.have++]] = (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n while (state.have < 19) {\n state.lens[order[state.have++]] = 0;\n }\n // We have separate tables & no pointers. 2 commented lines below not needed.\n //state.next = state.codes;\n //state.lencode = state.next;\n // Switch to use dynamic table\n state.lencode = state.lendyn;\n state.lenbits = 7;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts);\n state.lenbits = opts.bits;\n\n if (ret) {\n strm.msg = 'invalid code lengths set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, \"inflate: code lengths ok\\n\"));\n state.have = 0;\n state.mode = CODELENS;\n /* falls through */\n case CODELENS:\n while (state.have < state.nlen + state.ndist) {\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_val < 16) {\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.lens[state.have++] = here_val;\n }\n else {\n if (here_val === 16) {\n //=== NEEDBITS(here.bits + 2);\n n = here_bits + 2;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n if (state.have === 0) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n len = state.lens[state.have - 1];\n copy = 3 + (hold & 0x03);//BITS(2);\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n }\n else if (here_val === 17) {\n //=== NEEDBITS(here.bits + 3);\n n = here_bits + 3;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 3 + (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n else {\n //=== NEEDBITS(here.bits + 7);\n n = here_bits + 7;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 11 + (hold & 0x7f);//BITS(7);\n //--- DROPBITS(7) ---//\n hold >>>= 7;\n bits -= 7;\n //---//\n }\n if (state.have + copy > state.nlen + state.ndist) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n while (copy--) {\n state.lens[state.have++] = len;\n }\n }\n }\n\n /* handle error breaks in while */\n if (state.mode === BAD) { break; }\n\n /* check for end-of-block code (better have one) */\n if (state.lens[256] === 0) {\n strm.msg = 'invalid code -- missing end-of-block';\n state.mode = BAD;\n break;\n }\n\n /* build code tables -- note: do not change the lenbits or distbits\n values here (9 and 6) without reading the comments in inftrees.h\n concerning the ENOUGH constants, which depend on those values */\n state.lenbits = 9;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.lenbits = opts.bits;\n // state.lencode = state.next;\n\n if (ret) {\n strm.msg = 'invalid literal/lengths set';\n state.mode = BAD;\n break;\n }\n\n state.distbits = 6;\n //state.distcode.copy(state.codes);\n // Switch to use dynamic table\n state.distcode = state.distdyn;\n opts = { bits: state.distbits };\n ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.distbits = opts.bits;\n // state.distcode = state.next;\n\n if (ret) {\n strm.msg = 'invalid distances set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, 'inflate: codes ok\\n'));\n state.mode = LEN_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case LEN_:\n state.mode = LEN;\n /* falls through */\n case LEN:\n if (have >= 6 && left >= 258) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n inflate_fast(strm, _out);\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n if (state.mode === TYPE) {\n state.back = -1;\n }\n break;\n }\n state.back = 0;\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if (here_bits <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_op && (here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.lencode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n state.length = here_val;\n if (here_op === 0) {\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n state.mode = LIT;\n break;\n }\n if (here_op & 32) {\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.back = -1;\n state.mode = TYPE;\n break;\n }\n if (here_op & 64) {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break;\n }\n state.extra = here_op & 15;\n state.mode = LENEXT;\n /* falls through */\n case LENEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", state.length));\n state.was = state.length;\n state.mode = DIST;\n /* falls through */\n case DIST:\n for (;;) {\n here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if ((here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.distcode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n if (here_op & 64) {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break;\n }\n state.offset = here_val;\n state.extra = (here_op) & 15;\n state.mode = DISTEXT;\n /* falls through */\n case DISTEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n//#ifdef INFLATE_STRICT\n if (state.offset > state.dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracevv((stderr, \"inflate: distance %u\\n\", state.offset));\n state.mode = MATCH;\n /* falls through */\n case MATCH:\n if (left === 0) { break inf_leave; }\n copy = _out - left;\n if (state.offset > copy) { /* copy from window */\n copy = state.offset - copy;\n if (copy > state.whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n// (!) This block is disabled in zlib defailts,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// Trace((stderr, \"inflate.c too far\\n\"));\n// copy -= state.whave;\n// if (copy > state.length) { copy = state.length; }\n// if (copy > left) { copy = left; }\n// left -= copy;\n// state.length -= copy;\n// do {\n// output[put++] = 0;\n// } while (--copy);\n// if (state.length === 0) { state.mode = LEN; }\n// break;\n//#endif\n }\n if (copy > state.wnext) {\n copy -= state.wnext;\n from = state.wsize - copy;\n }\n else {\n from = state.wnext - copy;\n }\n if (copy > state.length) { copy = state.length; }\n from_source = state.window;\n }\n else { /* copy from output */\n from_source = output;\n from = put - state.offset;\n copy = state.length;\n }\n if (copy > left) { copy = left; }\n left -= copy;\n state.length -= copy;\n do {\n output[put++] = from_source[from++];\n } while (--copy);\n if (state.length === 0) { state.mode = LEN; }\n break;\n case LIT:\n if (left === 0) { break inf_leave; }\n output[put++] = state.length;\n left--;\n state.mode = LEN;\n break;\n case CHECK:\n if (state.wrap) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n // Use '|' insdead of '+' to make sure that result is signed\n hold |= input[next++] << bits;\n bits += 8;\n }\n //===//\n _out -= left;\n strm.total_out += _out;\n state.total += _out;\n if (_out) {\n strm.adler = state.check =\n /*UPDATE(state.check, put - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out));\n\n }\n _out = left;\n // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too\n if ((state.flags ? hold : zswap32(hold)) !== state.check) {\n strm.msg = 'incorrect data check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: check matches trailer\\n\"));\n }\n state.mode = LENGTH;\n /* falls through */\n case LENGTH:\n if (state.wrap && state.flags) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.total & 0xffffffff)) {\n strm.msg = 'incorrect length check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: length matches trailer\\n\"));\n }\n state.mode = DONE;\n /* falls through */\n case DONE:\n ret = Z_STREAM_END;\n break inf_leave;\n case BAD:\n ret = Z_DATA_ERROR;\n break inf_leave;\n case MEM:\n return Z_MEM_ERROR;\n case SYNC:\n /* falls through */\n default:\n return Z_STREAM_ERROR;\n }\n }\n\n // inf_leave <- here is real place for \"goto inf_leave\", emulated via \"break inf_leave\"\n\n /*\n Return from inflate(), updating the total counts and the check value.\n If there was no progress during the inflate() call, return a buffer\n error. Call updatewindow() to create and/or update the window state.\n Note: a memory error from inflate() is non-recoverable.\n */\n\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n\n if (state.wsize || (_out !== strm.avail_out && state.mode < BAD &&\n (state.mode < CHECK || flush !== Z_FINISH))) {\n if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n }\n _in -= strm.avail_in;\n _out -= strm.avail_out;\n strm.total_in += _in;\n strm.total_out += _out;\n state.total += _out;\n if (state.wrap && _out) {\n strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out));\n }\n strm.data_type = state.bits + (state.last ? 64 : 0) +\n (state.mode === TYPE ? 128 : 0) +\n (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0);\n if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) {\n ret = Z_BUF_ERROR;\n }\n return ret;\n}\n\nfunction inflateEnd(strm) {\n\n if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) {\n return Z_STREAM_ERROR;\n }\n\n var state = strm.state;\n if (state.window) {\n state.window = null;\n }\n strm.state = null;\n return Z_OK;\n}\n\nfunction inflateGetHeader(strm, head) {\n var state;\n\n /* check state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; }\n\n /* save header structure */\n state.head = head;\n head.done = false;\n return Z_OK;\n}\n\nfunction inflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var state;\n var dictid;\n var ret;\n\n /* check state */\n if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n if (state.wrap !== 0 && state.mode !== DICT) {\n return Z_STREAM_ERROR;\n }\n\n /* check for correct dictionary identifier */\n if (state.mode === DICT) {\n dictid = 1; /* adler32(0, null, 0)*/\n /* dictid = adler32(dictid, dictionary, dictLength); */\n dictid = adler32(dictid, dictionary, dictLength, 0);\n if (dictid !== state.check) {\n return Z_DATA_ERROR;\n }\n }\n /* copy dictionary to window using updatewindow(), which will amend the\n existing dictionary if appropriate */\n ret = updatewindow(strm, dictionary, dictLength, dictLength);\n if (ret) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n state.havedict = 1;\n // Tracev((stderr, \"inflate: dictionary set\\n\"));\n return Z_OK;\n}\n\nexports.inflateReset = inflateReset;\nexports.inflateReset2 = inflateReset2;\nexports.inflateResetKeep = inflateResetKeep;\nexports.inflateInit = inflateInit;\nexports.inflateInit2 = inflateInit2;\nexports.inflate = inflate;\nexports.inflateEnd = inflateEnd;\nexports.inflateGetHeader = inflateGetHeader;\nexports.inflateSetDictionary = inflateSetDictionary;\nexports.inflateInfo = 'pako inflate (from Nodeca project)';\n\n/* Not implemented\nexports.inflateCopy = inflateCopy;\nexports.inflateGetDictionary = inflateGetDictionary;\nexports.inflateMark = inflateMark;\nexports.inflatePrime = inflatePrime;\nexports.inflateSync = inflateSync;\nexports.inflateSyncPoint = inflateSyncPoint;\nexports.inflateUndermine = inflateUndermine;\n*/\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nmodule.exports = {\n\n /* Allowed flush values; see deflate() and inflate() below for details */\n Z_NO_FLUSH: 0,\n Z_PARTIAL_FLUSH: 1,\n Z_SYNC_FLUSH: 2,\n Z_FULL_FLUSH: 3,\n Z_FINISH: 4,\n Z_BLOCK: 5,\n Z_TREES: 6,\n\n /* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\n Z_OK: 0,\n Z_STREAM_END: 1,\n Z_NEED_DICT: 2,\n Z_ERRNO: -1,\n Z_STREAM_ERROR: -2,\n Z_DATA_ERROR: -3,\n //Z_MEM_ERROR: -4,\n Z_BUF_ERROR: -5,\n //Z_VERSION_ERROR: -6,\n\n /* compression levels */\n Z_NO_COMPRESSION: 0,\n Z_BEST_SPEED: 1,\n Z_BEST_COMPRESSION: 9,\n Z_DEFAULT_COMPRESSION: -1,\n\n\n Z_FILTERED: 1,\n Z_HUFFMAN_ONLY: 2,\n Z_RLE: 3,\n Z_FIXED: 4,\n Z_DEFAULT_STRATEGY: 0,\n\n /* Possible values of the data_type field (though see inflate()) */\n Z_BINARY: 0,\n Z_TEXT: 1,\n //Z_ASCII: 1, // = Z_TEXT (deprecated)\n Z_UNKNOWN: 2,\n\n /* The deflate compression method */\n Z_DEFLATED: 8\n //Z_NULL: null // Use -1 or null inline, depending on var type\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction GZheader() {\n /* true if compressed data believed to be text */\n this.text = 0;\n /* modification time */\n this.time = 0;\n /* extra flags (not used when writing a gzip file) */\n this.xflags = 0;\n /* operating system */\n this.os = 0;\n /* pointer to extra field or Z_NULL if none */\n this.extra = null;\n /* extra field length (valid if extra != Z_NULL) */\n this.extra_len = 0; // Actually, we don't need it in JS,\n // but leave for few code modifications\n\n //\n // Setup limits is not necessary because in js we should not preallocate memory\n // for inflate use constant limit in 65536 bytes\n //\n\n /* space at extra (only when reading header) */\n // this.extra_max = 0;\n /* pointer to zero-terminated file name or Z_NULL */\n this.name = '';\n /* space at name (only when reading header) */\n // this.name_max = 0;\n /* pointer to zero-terminated comment or Z_NULL */\n this.comment = '';\n /* space at comment (only when reading header) */\n // this.comm_max = 0;\n /* true if there was or will be a header crc */\n this.hcrc = 0;\n /* true when done reading gzip header (not used when writing a gzip file) */\n this.done = false;\n}\n\nmodule.exports = GZheader;\n","'use strict';\n\n\nvar zlib_inflate = require('./zlib/inflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar c = require('./zlib/constants');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\nvar GZheader = require('./zlib/gzheader');\n\nvar toString = Object.prototype.toString;\n\n/**\n * class Inflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[inflate]]\n * and [[inflateRaw]].\n **/\n\n/* internal\n * inflate.chunks -> Array\n *\n * Chunks of output data, if [[Inflate#onData]] not overriden.\n **/\n\n/**\n * Inflate.result -> Uint8Array|Array|String\n *\n * Uncompressed result, generated by default [[Inflate#onData]]\n * and [[Inflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Inflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Inflate.err -> Number\n *\n * Error code after inflate finished. 0 (Z_OK) on success.\n * Should be checked if broken data possible.\n **/\n\n/**\n * Inflate.msg -> String\n *\n * Error message, if [[Inflate.err]] != 0\n **/\n\n\n/**\n * new Inflate(options)\n * - options (Object): zlib inflate options.\n *\n * Creates new inflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `windowBits`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw inflate\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n * By default, when no options set, autodetect deflate/gzip data format via\n * wrapper header.\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var inflate = new pako.Inflate({ level: 3});\n *\n * inflate.push(chunk1, false);\n * inflate.push(chunk2, true); // true -> last chunk\n *\n * if (inflate.err) { throw new Error(inflate.err); }\n *\n * console.log(inflate.result);\n * ```\n **/\nfunction Inflate(options) {\n if (!(this instanceof Inflate)) return new Inflate(options);\n\n this.options = utils.assign({\n chunkSize: 16384,\n windowBits: 0,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n // Force window size for `raw` data, if not set directly,\n // because we have no header for autodetect.\n if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) {\n opt.windowBits = -opt.windowBits;\n if (opt.windowBits === 0) { opt.windowBits = -15; }\n }\n\n // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate\n if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&\n !(options && options.windowBits)) {\n opt.windowBits += 32;\n }\n\n // Gzip header has no info about windows size, we can do autodetect only\n // for deflate. So, if window size not set, force it to max when gzip possible\n if ((opt.windowBits > 15) && (opt.windowBits < 48)) {\n // bit 3 (16) -> gzipped data\n // bit 4 (32) -> autodetect gzip/deflate\n if ((opt.windowBits & 15) === 0) {\n opt.windowBits |= 15;\n }\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_inflate.inflateInit2(\n this.strm,\n opt.windowBits\n );\n\n if (status !== c.Z_OK) {\n throw new Error(msg[status]);\n }\n\n this.header = new GZheader();\n\n zlib_inflate.inflateGetHeader(this.strm, this.header);\n}\n\n/**\n * Inflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.\n *\n * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with\n * new output chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the decompression context.\n *\n * On fail call [[Inflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nInflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var dictionary = this.options.dictionary;\n var status, _mode;\n var next_out_utf8, tail, utf8str;\n var dict;\n\n // Flag to properly process Z_BUF_ERROR on testing inflate call\n // when we check that all output data was flushed.\n var allowBufError = false;\n\n if (this.ended) { return false; }\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // Only binary strings can be decompressed on practice\n strm.input = strings.binstring2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n\n status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */\n\n if (status === c.Z_NEED_DICT && dictionary) {\n // Convert data if needed\n if (typeof dictionary === 'string') {\n dict = strings.string2buf(dictionary);\n } else if (toString.call(dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(dictionary);\n } else {\n dict = dictionary;\n }\n\n status = zlib_inflate.inflateSetDictionary(this.strm, dict);\n\n }\n\n if (status === c.Z_BUF_ERROR && allowBufError === true) {\n status = c.Z_OK;\n allowBufError = false;\n }\n\n if (status !== c.Z_STREAM_END && status !== c.Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n\n if (strm.next_out) {\n if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) {\n\n if (this.options.to === 'string') {\n\n next_out_utf8 = strings.utf8border(strm.output, strm.next_out);\n\n tail = strm.next_out - next_out_utf8;\n utf8str = strings.buf2string(strm.output, next_out_utf8);\n\n // move tail\n strm.next_out = tail;\n strm.avail_out = chunkSize - tail;\n if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); }\n\n this.onData(utf8str);\n\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n }\n\n // When no more input data, we should check that internal inflate buffers\n // are flushed. The only way to do it when avail_out = 0 - run one more\n // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.\n // Here we set flag to process this error properly.\n //\n // NOTE. Deflate does not return error in this case and does not needs such\n // logic.\n if (strm.avail_in === 0 && strm.avail_out === 0) {\n allowBufError = true;\n }\n\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END);\n\n if (status === c.Z_STREAM_END) {\n _mode = c.Z_FINISH;\n }\n\n // Finalize on the last chunk.\n if (_mode === c.Z_FINISH) {\n status = zlib_inflate.inflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === c.Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === c.Z_SYNC_FLUSH) {\n this.onEnd(c.Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Inflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): ouput data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nInflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Inflate#onEnd(status) -> Void\n * - status (Number): inflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called either after you tell inflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nInflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === c.Z_OK) {\n if (this.options.to === 'string') {\n // Glue & convert here, until we teach pako to send\n // utf8 alligned strings to onData\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * inflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Decompress `data` with inflate/ungzip and `options`. Autodetect\n * format via wrapper header by default. That's why we don't provide\n * separate `ungzip` method.\n *\n * Supported options are:\n *\n * - windowBits\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , input = pako.deflate([1,2,3,4,5,6,7,8,9])\n * , output;\n *\n * try {\n * output = pako.inflate(input);\n * } catch (err)\n * console.log(err);\n * }\n * ```\n **/\nfunction inflate(input, options) {\n var inflator = new Inflate(options);\n\n inflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (inflator.err) { throw inflator.msg || msg[inflator.err]; }\n\n return inflator.result;\n}\n\n\n/**\n * inflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * The same as [[inflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction inflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return inflate(input, options);\n}\n\n\n/**\n * ungzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Just shortcut to [[inflate]], because it autodetects format\n * by header.content. Done for convenience.\n **/\n\n\nexports.Inflate = Inflate;\nexports.inflate = inflate;\nexports.inflateRaw = inflateRaw;\nexports.ungzip = inflate;\n","// Top level file is just a mixin of submodules & constants\n'use strict';\n\nvar assign = require('./lib/utils/common').assign;\n\nvar deflate = require('./lib/deflate');\nvar inflate = require('./lib/inflate');\nvar constants = require('./lib/zlib/constants');\n\nvar pako = {};\n\nassign(pako, deflate, inflate, constants);\n\nmodule.exports = pako;\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport md5 from 'js-md5'\nimport pako from 'pako'\nimport { Filter } from '../core/Filter.js';\n\n\n/**\n* Takes the File inputs from a HTML input of type \"file\" (aka. a file dialog), and reads it as a ArrayBuffer.\n* Every File given in input should be added separately using `addInput( file[i], 'uniqueID' )`.\n* The event \"ready\" must be set up ( using .on(\"ready\", function(){}) ) and will\n* be triggered when all the files given in input are translated into ArrayBuffers.\n* Once ready, all the outputs are accecible using the same uniqueID with the\n* method `getOutput(\"uniqueID\")`.\n* Gzip compressed files will be uncompressed.\n*\n* Once the filter is *updated*, you can query the `filenames` metadata (sorted by categories)\n* and also the `checksums` metadata using `.getMetadata()`. This later metadata \n* give a unique *md5*, very convenient to compare if two files are actually the same.\n* Note that in case the file is *gziped*, the checksum is computed on the raw file,\n* not on the *un-gziped* buffer.\n*\n* **Usage**\n* - [examples/fileToArrayBuffer.html](../examples/fileToArrayBuffer.html)\n*/\nclass FileToArrayBufferReader extends Filter {\n\n constructor(){\n super();\n this._outputCounter = 0;\n \n // filenames by categories\n this.setMetadata(\"filenames\", {});\n \n // md5 checksum by categories\n this.setMetadata(\"checksums\", {});\n }\n\n\n _run(){\n var that = this;\n this._outputCounter = 0;\n var inputCategories = this.getInputCategories();\n\n inputCategories.forEach( function(category){\n that._loadFile( category );\n })\n }\n\n\n /**\n * [PRIVATE]\n * Perform the loading for the input of the given category\n * @param {String} category - input category\n */\n _loadFile( category ){\n var that = this;\n var reader = new FileReader();\n\n reader.onloadend = function(event) {\n var result = event.target.result;\n \n var filename = that._getInput(category).name;\n var basename = filename.split(/[\\\\/]/).pop();\n var extension = basename.split('.').pop();\n var checksum = md5( result );\n \n // few metadata for recognizing files (potentially)\n that._metadata.filenames[ category ] = basename;\n that._metadata.checksums[ category ] = checksum;\n\n if( extension.localeCompare(\"pixp\") ){\n // trying to un-gzip it with Pako\n try {\n result = pako.inflate(result).buffer;\n console.log(\"File was un-gziped successfully\");\n } catch (err) {\n console.log(\"Pako: \" + err + \" (this content is not gziped)\");\n }\n }\n \n that._output[ category ] = result;\n that._fileLoadCount();\n }\n\n reader.onerror = function() {\n this._output[ category ] = null;\n that._fileLoadCount();\n console.warn( \"error reading file from category \" + category );\n //throw new Error(error_message);\n };\n\n reader.readAsArrayBuffer( this._getInput(category) );\n }\n\n\n /**\n * [PRIVATE]\n * Launch the \"ready\" event if all files are loaded\n */\n _fileLoadCount(){\n var that = this;\n this._outputCounter ++;\n\n if( this._outputCounter == this.getNumberOfInputs() ){\n that.triggerEvent(\"ready\");\n }\n }\n\n} /* END of class FileToArrayBufferReader */\n\n\nexport { FileToArrayBufferReader }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport pako from 'pako'\nimport md5 from 'js-md5'\nimport { Filter } from '../core/Filter.js';\n\n\n/**\n* Open a files as ArrayBuffer using their URL. You must specify one or several URL\n* (String) using `addInput(\"...\")` and add function to the event \"ready\" using\n* `.on( \"ready\", function(filter){ ... })`.\n* The \"ready\" event will be called only when all input are loaded.\n* Gzip compressed files will be uncompressed.\n* Once the filter is *updated*, you can query the `filenames` metadata (sorted by categories)\n* and also the `checksums` metadata using `.getMetadata()`. This later metadata \n* give a unique *md5*, very convenient to compare if two files are actually the same.\n* Note that in case the file is *gziped*, the checksum is computed on the raw file,\n* not on the *un-gziped* buffer.\n*\n* **Usage**\n* - [examples/urlFileToArrayBuffer.html](../examples/urlFileToArrayBuffer.html)\n*/\nclass UrlToArrayBufferReader extends Filter {\n\n constructor(){\n super();\n this._outputCounter = 0;\n \n // filenames by categories\n this.setMetadata(\"filenames\", {});\n \n // md5 checksum by categories\n this.setMetadata(\"checksums\", {});\n }\n\n\n _run(){\n var that = this;\n\n if(! this.getNumberOfInputs() ){\n console.warn(\"No input was specified, cannot run this filer.\");\n return;\n }\n\n\n this._forEachInput( function(category, input){\n that._loadUrl(category, input)\n });\n\n }\n\n\n /**\n * [PRIVATE]\n * Perform a XMLHttpRequest with the given url and adds it to the output\n */\n _loadUrl( category, url ){\n var that = this;\n\n var xhr = new XMLHttpRequest();\n xhr.open(\"GET\", url, true);\n xhr.responseType = \"arraybuffer\";\n\n xhr.onload = function(event) {\n var arrayBuff = xhr.response;\n \n var basename = url.split(/[\\\\/]/).pop();\n var extension = basename.split('.').pop();\n var checksum = md5( arrayBuff );\n \n // few metadata for recognizing files (potentially)\n that._metadata.filenames[ category ] = basename;\n that._metadata.checksums[ category ] = checksum;\n\n // trying to un-gzip it with Pako for non pixp files\n if( extension.localeCompare(\"pixp\") ){\n try {\n arrayBuff = pako.inflate(arrayBuff).buffer;\n console.log(\"File was un-gziped successfully\");\n } catch (err) {\n console.log(\"Pako: \" + err + \" (this content is not gziped)\");\n }\n }\n \n that._output[ category ] = arrayBuff\n \n\n that._outputCounter ++;\n\n if( that._outputCounter == that.getNumberOfInputs()){\n that.triggerEvent(\"ready\");\n }\n };\n\n xhr.error = function(){\n console.log(\"here go the error\");\n }\n\n xhr.send();\n }\n\n\n} /* END of class UrlToArrayBufferReader */\n\nexport { UrlToArrayBufferReader }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* Robert D. Vincent\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako'\nimport { Filter } from '../core/Filter.js';\nimport { MniVolume } from '../core/MniVolume.js';\n\n\n/**\n* Decode a HDF5 file, but is most likely to be restricted to the features that are\n* used for Minc2 file format.\n* The metadata \"debug\" can be set to true to\n* enable a verbose mode.\n* Takes an ArrayBuffer as input (0) and output a `MniVolume` (which inherit `Image3D`).\n*\n* **Usage**\n* - [examples/fileToMinc2.html](../examples/fileToMinc2.html)\n*/\nclass Minc2Decoder extends Filter{\n\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n\n this.setMetadata(\"debug\", false);\n\n this._type_enum = {\n INT8: 1,\n UINT8: 2,\n INT16: 3,\n UINT16: 4,\n INT32: 5,\n UINT32: 6,\n FLT: 7,\n DBL: 8,\n STR: 9\n };\n\n this._type_matching = [\n \"int8\",\n \"uint8\",\n \"int16\",\n \"uint16\",\n \"int32\",\n \"uint32\",\n \"float32\",\n \"float64\",\n \"undef\" // STR type is not compatible with minc\n // we deal rgb8 manually\n ];\n\n this.type_sizes = [0, 1, 1, 2, 2, 4, 4, 4, 8, 0];\n\n this._dv_offset = 0;\n this._align = 8;\n this._little_endian = true;\n this._continuation_queue = [];\n this._dv = null;//new DataView(abuf);\n this._superblk = {};\n this._start_offset = 0;\n this._huge_id = 0;\n\n }\n\n /**\n * [PRIVATE]\n */\n createLink() {\n var r = {};\n // internal/private\n r.hdr_offset = 0; // offset to object header.\n r.data_offset = 0; // offset to actual data.\n r.data_length = 0; // length of data.\n r.n_filled = 0; // counts elements written to array\n r.chunk_size = 0; // size of chunks\n r.sym_btree = 0; // offset of symbol table btree\n r.sym_lheap = 0; // offset of symbol table local heap\n // permanent/global\n r.name = \"\"; // name of this group or dataset.\n r.attributes = {}; // indexed by attribute name.\n r.children = []; // not associative for now.\n r.array = undefined; // actual data, if dataset.\n r.type = -1; // type of data.\n r.inflate = false; // true if need to inflate (gzip).\n r.dims = []; // dimension sizes.\n return r;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Turns out that alignment of the messages in at least the\n * version 1 object header is actually relative to the start\n * of the header. So we update the start position of the\n * header here, so we can refer to it when calculating the\n * alignment in this.checkAlignment().\n */\n startAlignment() {\n this._start_offset = this._dv_offset;\n }\n\n\n /**\n * [PRIVATE]\n */\n checkAlignment() {\n var tmp = this._dv_offset - this._start_offset;\n if ((tmp % this._align) !== 0) {\n var n = this._align - (tmp % this._align);\n this._dv_offset += n;\n if (this.getMetadata(\"debug\")) {\n console.log('skipping ' + n + ' bytes at ' + tmp + ' for alignmnent');\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * helper functions to manipulate the current DataView offset.\n */\n skip(n_bytes) {\n this._dv_offset += n_bytes;\n }\n\n\n /**\n * [PRIVATE]\n */\n seek(new_offset) {\n this._dv_offset = new_offset;\n }\n\n\n /**\n * [PRIVATE]\n */\n tell() {\n return this._dv_offset;\n }\n\n\n /**\n * [PRIVATE]\n *\n * helper functions for access to our DataView.\n */\n getU8() {\n var v = this._dv.getUint8(this._dv_offset);\n this._dv_offset += 1;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getU16() {\n var v = this._dv.getUint16(this._dv_offset, this._little_endian);\n this._dv_offset += 2;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getU32() {\n var v = this._dv.getUint32(this._dv_offset, this._little_endian);\n this._dv_offset += 4;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getU64() {\n var v = this._dv.getUint64(this._dv_offset, this._little_endian);\n this._dv_offset += 8;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getF32() {\n var v = this._dv.getFloat32(this._dv_offset, this._little_endian);\n this._dv_offset += 4;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getF64() {\n var v = this._dv.getFloat64(this._dv_offset, this._little_endian);\n this._dv_offset += 8;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getOffset(offsz) {\n var v = 0;\n offsz = offsz || this._superblk.offsz;\n if (offsz === 4) {\n v = this._dv.getUint32(this._dv_offset, this._little_endian);\n } else if (offsz === 8) {\n v = this._dv.getUint64(this._dv_offset, this._little_endian);\n } else {\n throw new Error('Unsupported value for offset size ' + offsz);\n }\n this._dv_offset += offsz;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getLength() {\n var v = this._dv.getUint64(this._dv_offset, this._little_endian);\n this._dv_offset += this._superblk.lensz;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n */\n getString(length) {\n var r = \"\";\n var i;\n var c;\n for (i = 0; i < length; i += 1) {\n c = this.getU8();\n if (c === 0) {\n this._dv_offset += (length - i - 1);\n break;\n }\n r += String.fromCharCode(c);\n }\n return r;\n }\n\n\n /**\n * [PRIVATE]\n */\n getArray(typ, n_bytes, new_off) {\n var value;\n var n_values;\n var new_abuf;\n var abuf = this._getInput();\n var i;\n var spp = this._dv_offset;\n if (new_off) {\n this._dv_offset = new_off;\n }\n switch (typ) {\n case this._type_enum.INT8:\n value = new Int8Array(abuf, this._dv_offset, n_bytes);\n break;\n case this._type_enum.UINT8:\n value = new Uint8Array(abuf, this._dv_offset, n_bytes);\n break;\n case this._type_enum.INT16:\n if ((this._dv_offset % 2) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 2;\n value = new Int16Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU16();\n }\n } else {\n value = new Int16Array(abuf, this._dv_offset, n_bytes / 2);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.UINT16:\n if ((this._dv_offset % 2) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 2;\n value = new Uint16Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU16();\n }\n } else {\n value = new Uint16Array(abuf, this._dv_offset, n_bytes / 2);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.INT32:\n if ((this._dv_offset % 4) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 4;\n value = new Int32Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU32();\n }\n } else {\n value = new Int32Array(abuf, this._dv_offset, n_bytes / 4);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.UINT32:\n if ((this._dv_offset % 4) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 4;\n value = new Uint32Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getU32();\n }\n } else {\n value = new Uint32Array(abuf, this._dv_offset, n_bytes / 4);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.FLT:\n if ((this._dv_offset % 4) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 4;\n value = new Float32Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getF32();\n }\n } else {\n value = new Float32Array(abuf, this._dv_offset, n_bytes / 4);\n this._dv_offset += n_bytes;\n }\n break;\n case this._type_enum.DBL:\n if ((this._dv_offset % 8) !== 0) {\n new_abuf = new ArrayBuffer(n_bytes);\n n_values = n_bytes / 8;\n value = new Float64Array(new_abuf);\n for (i = 0; i < n_values; i += 1) {\n value[i] = this.getF64();\n }\n } else {\n value = new Float64Array(abuf, this._dv_offset, n_bytes / 8);\n this._dv_offset += n_bytes;\n }\n break;\n default:\n throw new Error('Bad type in this.getArray ' + typ);\n }\n if (new_off) {\n this._dv_offset = spp;\n }\n return value;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Get a variably-sized integer from the DataView.\n */\n getUXX(n) {\n var v;\n var i;\n switch (n) {\n case 1:\n v = this._dv.getUint8(this._dv_offset);\n break;\n case 2:\n v = this._dv.getUint16(this._dv_offset, this._little_endian);\n break;\n case 4:\n v = this._dv.getUint32(this._dv_offset, this._little_endian);\n break;\n case 8:\n v = this._dv.getUint64(this._dv_offset, this._little_endian);\n break;\n default:\n /* Certain hdf5 types can have odd numbers of bytes. We try\n * to deal with that special case here.\n */\n v = 0;\n if (!this._little_endian) {\n for (i = 0; i < n; i++) {\n v = (v << 8) + this._dv.getUint8(this._dv_offset + i);\n }\n }\n else {\n for (i = n - 1; i >= 0; i--) {\n v = (v << 8) + this._dv.getUint8(this._dv_offset + i);\n }\n }\n }\n this._dv_offset += n;\n return v;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Verify that the expected signature is found at this offset.\n */\n checkSignature(str) {\n var i;\n for (i = 0; i < str.length; i += 1) {\n if (this._dv.getUint8(this._dv_offset + i) !== str.charCodeAt(i)) {\n return false;\n }\n }\n this.skip(str.length);\n return true;\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5Superblock() {\n var sb = {};\n if (!this.checkSignature(\"\\u0089HDF\\r\\n\\u001A\\n\")) {\n throw new Error('Bad magic string in HDF5');\n }\n sb.sbver = this.getU8();\n if (sb.sbver > 2) {\n throw new Error('Unsupported HDF5 superblock version ' + sb.sbver);\n }\n if (sb.sbver <= 1) {\n sb.fsver = this.getU8();\n sb.rgver = this.getU8();\n this.skip(1); // reserved\n sb.shver = this.getU8();\n sb.offsz = this.getU8();\n sb.lensz = this.getU8();\n this.skip(1); // reserved\n sb.gln_k = this.getU16();\n sb.gin_k = this.getU16();\n sb.cflags = this.getU32();\n if (sb.sbver === 1) {\n sb.isin_k = this.getU16();\n this.skip(2); // reserved\n }\n sb.base_addr = this.getOffset(sb.offsz);\n sb.gfsi_addr = this.getOffset(sb.offsz);\n sb.eof_addr = this.getOffset(sb.offsz);\n sb.dib_addr = this.getOffset(sb.offsz);\n sb.root_ln_offs = this.getOffset(sb.offsz);\n sb.root_addr = this.getOffset(sb.offsz);\n sb.root_cache_type = this.getU32();\n this.skip(4);\n this.skip(16);\n } else {\n sb.offsz = this.getU8();\n sb.lensz = this.getU8();\n sb.cflags = this.getU8();\n sb.base_addr = this.getOffset(sb.offsz);\n sb.ext_addr = this.getOffset(sb.offsz);\n sb.eof_addr = this.getOffset(sb.offsz);\n sb.root_addr = this.getOffset(sb.offsz);\n sb.checksum = this.getU32();\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"HDF5 SB \" + sb.sbver + \" \" + sb.offsz + \" \" + sb.lensz + \" \" + sb.cflags);\n }\n return sb;\n }\n\n\n /**\n * [PRIVATE]\n *\n * read the v2 fractal heap header\n */\n hdf5FractalHeapHeader() {\n var fh = {};\n if (!this.checkSignature(\"FRHP\")) {\n throw new Error('Bad or missing FRHP signature');\n }\n fh.ver = this.getU8(); // Version\n fh.idlen = this.getU16(); // Heap ID length\n fh.iof_el = this.getU16(); // I/O filter's encoded length\n fh.flags = this.getU8(); // Flags\n fh.objmax = this.getU32(); // Maximum size of managed objects.\n fh.objnid = this.getLength(); // Next huge object ID\n fh.objbta = this.getOffset(); // v2 B-tree address of huge objects\n fh.nf_blk = this.getLength(); // Amount of free space in managed blocks\n fh.af_blk = this.getOffset(); // Address of managed block free space manager\n fh.heap_total = this.getLength(); // Amount of managed space in heap\n fh.heap_alloc = this.getLength(); // Amount of allocated managed space in heap\n fh.bai_offset = this.getLength(); // Offset of direct block allocation iterator\n fh.heap_nobj = this.getLength(); // Number of managed objects in heap\n fh.heap_chuge = this.getLength(); // Size of huge objects in heap\n fh.heap_nhuge = this.getLength(); // Number of huge objects in heap\n fh.heap_ctiny = this.getLength(); // Size of tiny objects in heap\n fh.heap_ntiny = this.getLength(); // Number of tiny objects in heap\n fh.table_width = this.getU16(); // Table width\n fh.start_blksz = this.getLength(); // Starting block size\n fh.max_blksz = this.getLength(); // Maximum direct block size\n fh.max_heapsz = this.getU16(); // Maximum heap size\n fh.rib_srows = this.getU16(); // Starting # of rows in root indirect block\n fh.root_addr = this.getOffset(); // Address of root block\n fh.rib_crows = this.getU16(); // Current # of rows in root indirect block\n\n var max_dblock_rows = Math.log2(fh.max_blksz) - Math.log2(fh.start_blksz) + 2;\n fh.K = Math.min(fh.rib_crows, max_dblock_rows) * fh.table_width;\n fh.N = (fh.rib_crows < max_dblock_rows) ? 0 : fh.K - (max_dblock_rows * fh.table_width);\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"FRHP V\" + fh.ver + \" F\" + fh.flags + \" \" + fh.objbta + \" Total:\" + fh.heap_total + \" Alloc:\" + fh.heap_alloc + \" #obj:\" + fh.heap_nobj + \" width:\" + fh.table_width + \" start_blksz:\" + fh.start_blksz + \" max_blksz:\" + fh.max_blksz + \" \" + fh.max_heapsz + \" srows:\" + fh.rib_srows + \" crows:\" + fh.rib_crows + \" \" + fh.heap_nhuge);\n console.log(\" K: \" + fh.K + \" N: \" + fh.N);\n }\n\n if (fh.iof_el > 0) {\n throw new Error(\"Filters present in fractal heap.\");\n }\n return fh;\n }\n\n\n /**\n * [PRIVATE]\n *\n * read the v2 btree header\n */\n hdf5V2BtreeHeader() {\n var bh = {};\n if (!this.checkSignature(\"BTHD\")) {\n throw new Error('Bad or missing BTHD signature');\n }\n bh.ver = this.getU8();\n bh.type = this.getU8();\n bh.nodesz = this.getU32();\n bh.recsz = this.getU16();\n bh.depth = this.getU16();\n bh.splitp = this.getU8();\n bh.mergep = this.getU8();\n bh.root_addr = this.getOffset();\n bh.root_nrec = this.getU16();\n bh.total_nrec = this.getLength();\n bh.checksum = this.getU32();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTHD V\" + bh.ver + \" T\" + bh.type + \" \" + bh.nodesz + \" \" + bh.recsz + \" \" + bh.depth + \" \" + bh.root_addr + \" \" + bh.root_nrec + \" \" + bh.total_nrec);\n }\n return bh;\n }\n\n\n\n /**\n * [PRIVATE]\n *\n * Enumerates btree records in a block. Records are found both in direct\n * and indirect v2 btree blocks.\n */\n hdf5V2BtreeRecords(fh, bt_type, nrec, link) {\n var i;\n var spp; // saved position pointer\n var offset;\n var length;\n if (bt_type === 1) {\n for (i = 0; i < nrec; i++) {\n offset = this.getOffset();\n length = this.getLength();\n var id = this.getLength();\n if (this.getMetadata(\"debug\")) {\n console.log(\" -> \" + offset + \" \" + length + \" \" + id + \" \" + this._this._huge_id);\n }\n spp = this.tell();\n if (id === this._this._huge_id) {\n this.seek(offset);\n this.hdf5MsgAttribute(length, link);\n }\n this.seek(spp);\n }\n }\n else if (bt_type === 8) {\n var cb_offs;\n var cb_leng;\n /* maximum heap size is stored in bits! */\n cb_offs = fh.max_heapsz / 8;\n var tmp = Math.min(fh.objmax, fh.max_blksz);\n if (tmp <= 256) {\n cb_leng = 1;\n }\n else if (tmp <= 65536) {\n cb_leng = 2;\n }\n else {\n cb_leng = 4;\n }\n for (i = 0; i < nrec; i++) {\n /* Read managed fractal heap ID.\n */\n var vt = this.getU8();\n if ((vt & 0xc0) !== 0) {\n throw new Error('Bad Fractal Heap ID version ' + vt);\n }\n var id_type = (vt & 0x30);\n var flags;\n if (id_type === 0x10) { // huge!\n this._this._huge_id = this.getUXX(7);\n }\n else if (id_type === 0x00) { // managed.\n offset = this.getUXX(cb_offs);\n length = this.getUXX(cb_leng);\n }\n else {\n throw new Error(\"Can't handle this Heap ID: \" + vt);\n }\n flags = this.getU8();\n\n /* Read the rest of the record.\n */\n this.getU32(); // creation order (IGNORE)\n this.getU32(); // hash (IGNORE)\n if (this.getMetadata(\"debug\")) {\n console.log(\" -> \" + vt + \" \" + offset + \" \" + length + \" \" + flags);\n }\n spp = this.tell();\n if (id_type === 0x10) {\n /* A \"huge\" object is found by indexing through the btree\n * present in the header\n */\n this.seek(fh.objbta);\n var bh = this.hdf5V2BtreeHeader();\n if (bh.type === 1) {\n this.seek(bh.root_addr);\n this.hdf5V2BtreeLeafNode(fh, bh.root_nrec, link);\n }\n else {\n throw new Error(\"Can only handle type-1 btrees\");\n }\n }\n else {\n /*\n * A managed object implies that the attribute message is\n * found in the associated fractal heap at the specified\n * offset in the heap. We get the actual address\n * corresponding to the offset here.\n */\n var location = this.hdf5FractalHeapOffset(fh, offset);\n this.seek(location);\n this.hdf5MsgAttribute(length, link);\n }\n this.seek(spp);\n }\n }\n else {\n throw new Error(\"Unhandled V2 btree type.\");\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * read a v2 btree leaf node\n */\n hdf5V2BtreeLeafNode(fh, nrec, link) {\n\n if (!this.checkSignature(\"BTLF\")) {\n throw new Error('Bad or missing BTLF signature');\n }\n\n var ver = this.getU8();\n var typ = this.getU8();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTLF V\" + ver + \" T\" + typ + \" \" + this.tell());\n }\n this.hdf5V2BtreeRecords(fh, typ, nrec, link);\n }\n\n\n /**\n * [PRIVATE]\n *\n * read the hdf5 v2 btree internal node\n */\n hdf5V2BtreeInternalNode(fh, nrec, depth, link) {\n\n if (!this.checkSignature(\"BTIN\")) {\n throw new Error('Bad or missing BTIN signature');\n }\n var ver = this.getU8();\n var type = this.getU8();\n var i;\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTIN V\" + ver + \" T\" + type);\n }\n this.hdf5V2BtreeRecords(fh, type, nrec, link);\n for (i = 0; i <= nrec; i++) {\n var child_offset = this.getOffset();\n var child_nrec = this.getUXX(1); // TODO: calculate real size!!\n var child_total;\n /* TODO: unfortunately, this field is optional and\n * variably-sized. Calculating the size is non-trivial, as it\n * depends on the total depth and size of the tree. For now\n * we will just assume it is its minimum size, as I've never\n * encountered a file with depth > 1 anyway.\n */\n if (depth > 1) {\n child_total = this.getUXX(1);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\" child->\" + child_offset + \" \" + child_nrec + \" \" + child_total);\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5GetMsgName(n) {\n\n // JO: used to be in the global scope.\n /* Names of the various HDF5 messages.\n * Note that MESSAGE23 appears to be illegal. All the rest are defined,\n * although I've never encountered a BOGUS message!\n */\n var msg_names = [\n \"NIL\", \"Dataspace\", \"LinkInfo\", \"Datatype\", \"FillValue 1\", \"FillValue 2\",\n \"Link\", \"ExternalFiles\", \"Layout\", \"BOGUS\", \"GroupInfo\", \"FilterPipeline\",\n \"Attribute\", \"ObjectComment\", \"ObjectModTime 1\", \"SharedMsgTable\",\n \"ObjHdrContinue\", \"SymbolTable\", \"ObjectModTime 2\", \"BtreeKValue\",\n \"DriverInfo\", \"AttrInfo\", \"ObjectRefCnt\", \"MESSAGE23\",\n \"FileSpaceInfo\"\n ];\n\n if (n < msg_names.length) {\n return msg_names[n];\n }\n throw new Error('Unknown message type ' + n + \" \" + this.tell());\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5V1BtreeNode(link) {\n var abuf = this._getInput();\n var i;\n var bt = {};\n if (!this.checkSignature(\"TREE\")) {\n throw new Error('Bad TREE signature at ' + this.tell());\n }\n\n bt.keys = [];\n\n bt.node_type = this.getU8();\n bt.node_level = this.getU8();\n bt.entries_used = this.getU16();\n bt.left_sibling = this.getOffset();\n bt.right_sibling = this.getOffset();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"BTREE type \" + bt.node_type + \" lvl \" +\n bt.node_level + \" n_used \" + bt.entries_used + \" \" +\n bt.left_sibling + \" \" + bt.right_sibling);\n }\n\n if (!link) {\n /* If this BTREE is associated with a group (not a dataset),\n * then its keys are single \"length\" value.\n */\n for (i = 0; i < bt.entries_used; i += 1) {\n bt.keys[i] = {};\n bt.keys[i].key_value = this.getLength();\n bt.keys[i].child_address = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n console.log(\" BTREE \" + i + \" key \" +\n bt.keys[i].key_value + \" adr \" +\n bt.keys[i].child_address);\n }\n }\n } else {\n var j;\n\n /* If this BTREE is a \"chunked raw data node\" associated\n * with a dataset, then its keys are complex, consisting\n * of the chunk size in bytes, a filter mask, and a set of\n * offsets matching the dimensionality of the chunk layout.\n * The chunk size stores the actual stored length of the\n * data, so it may not equal the uncompressed chunk size.\n */\n var chunks = [];\n\n for (i = 0; i < bt.entries_used; i += 1) {\n bt.keys[i] = {};\n chunks[i] = {};\n chunks[i].chunk_size = this.getU32();\n chunks[i].filter_mask = this.getU32();\n chunks[i].chunk_offsets = [];\n for (j = 0; j < link.dims.length + 1; j += 1) {\n chunks[i].chunk_offsets.push(this.getU64());\n }\n bt.keys[i].child_address = this.getOffset();\n if (i < bt.entries_used) {\n if (this.getMetadata(\"debug\")) {\n console.log(\" BTREE \" + i +\n \" chunk_size \" + chunks[i].chunk_size +\n \" filter_mask \" + chunks[i].filter_mask +\n \" addr \" + bt.keys[i].child_address);\n }\n }\n }\n chunks[i] = {};\n chunks[i].chunk_size = this.getU32();\n chunks[i].filter_mask = this.getU32();\n chunks[i].chunk_offsets = [];\n for (j = 0; j < link.dims.length + 1; j += 1) {\n chunks[i].chunk_offsets.push(this.getU64());\n }\n\n /* If we're at a leaf node, we have data to deal with.\n * We might have to uncompress!\n */\n if (bt.node_level === 0) {\n var length;\n var offset;\n var sp;\n var dp;\n\n for (i = 0; i < bt.entries_used; i += 1) {\n length = chunks[i].chunk_size;\n offset = bt.keys[i].child_address;\n\n if (link.inflate) {\n sp = new Uint8Array(abuf, offset, length);\n dp = pako.inflate(sp);\n switch (link.type) {\n case this._type_enum.INT8:\n dp = new Int8Array(dp.buffer);\n break;\n case this._type_enum.UINT8:\n dp = new Uint8Array(dp.buffer);\n break;\n case this._type_enum.INT16:\n dp = new Int16Array(dp.buffer);\n break;\n case this._type_enum.UINT16:\n dp = new Uint16Array(dp.buffer);\n break;\n case this._type_enum.INT32:\n dp = new Int32Array(dp.buffer);\n break;\n case this._type_enum.UINT32:\n dp = new Uint32Array(dp.buffer);\n break;\n case this._type_enum.FLT:\n dp = new Float32Array(dp.buffer);\n break;\n case this._type_enum.DBL:\n dp = new Float64Array(dp.buffer);\n break;\n default:\n throw new Error('Unknown type code ' + link.type);\n }\n if (link.array.length - link.n_filled < dp.length) {\n dp = dp.subarray(0, link.array.length - link.n_filled);\n }\n link.array.set(dp, link.n_filled);\n link.n_filled += dp.length;\n if (this.getMetadata(\"debug\")) {\n console.log(link.name + \" \" + sp.length + \" \" + dp.length + \" \" + link.n_filled + \"/\" + link.array.length);\n }\n }\n else {\n /* no need to inflate data. */\n dp = this.getArray(link.type, length, offset);\n link.array.set(dp, link.n_filled);\n link.n_filled += dp.length;\n }\n }\n } else {\n for (i = 0; i < bt.entries_used; i += 1) {\n this.seek(bt.keys[i].child_address);\n this.hdf5V1BtreeNode(link);\n }\n }\n }\n return bt;\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5GroupSymbolTable(lh, link) {\n if (!this.checkSignature(\"SNOD\")) {\n throw new Error('Bad or missing SNOD signature');\n }\n var ver = this.getU8();\n this.skip(1);\n var n_sym = this.getU16();\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5GroupSymbolTable V\" + ver + \" #\" + n_sym +\n \" '\" + link.name + \"'\");\n }\n var i;\n var link_name_offset;\n var ohdr_address;\n var cache_type;\n var child;\n var spp;\n\n for (i = 0; i < 2 * this._superblk.gln_k; i += 1) {\n link_name_offset = this.getOffset();\n ohdr_address = this.getOffset();\n cache_type = this.getU32();\n this.skip(20);\n\n if (i < n_sym) {\n child = this.createLink();\n child.hdr_offset = ohdr_address;\n if (lh) {\n spp = this.tell();\n /* The link name is a zero-terminated string\n * starting at the link_name_off relative to\n * the beginning of the data segment of the local\n * heap.\n */\n this.seek(lh.lh_dseg_off + link_name_offset);\n child.name = this.getString(lh.lh_dseg_len);\n this.seek(spp);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\" \" + i + \" O \" + link_name_offset + \" A \" +\n ohdr_address + \" T \" + cache_type + \" '\" +\n child.name + \"'\");\n }\n link.children.push(child);\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a v1 local heap header. These define relatively small\n * regions used primarily for storing symbol names associated with\n * a symbol table message.\n */\n hdf5LocalHeap() {\n var lh = {};\n if (!this.checkSignature(\"HEAP\")) {\n throw new Error('Bad or missing HEAP signature');\n }\n lh.lh_ver = this.getU8();\n this.skip(3);\n lh.lh_dseg_len = this.getLength();\n lh.lh_flst_len = this.getLength();\n lh.lh_dseg_off = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n console.log(\"LHEAP V\" + lh.lh_ver + \" \" + lh.lh_dseg_len + \" \" +\n lh.lh_flst_len + \" \" + lh.lh_dseg_off);\n }\n return lh;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"dataspace\" message. Dataspaces define the\n * dimensionality of a dataset or attribute. They define the\n * number of dimensions (rank) and the current length of each\n * dimension. It is possible to specify a \"maximum\" length that is\n * greater than or equal to the current length, but MINC doesn't\n * rely on that feature so these values are ignored. Finally it\n * is also possible to specify a \"permutation index\" that alters\n * storage order of the dataset, but again, MINC doesn't rely on\n * this feature, so the values are ignored.\n */\n hdf5MsgDataspace(sz, link) {\n var cb;\n var ver = this.getU8();\n var n_dim = this.getU8();\n var flag = this.getU8();\n if (ver <= 1) {\n this.skip(5);\n } else {\n this.skip(1);\n }\n\n var n_items = 1;\n var dlen = [];\n var i;\n for (i = 0; i < n_dim; i += 1) {\n dlen[i] = this.getLength();\n n_items *= dlen[i];\n }\n\n cb = (n_dim * this._superblk.lensz) + ((ver <= 1) ? 8 : 4);\n\n var dmax = [];\n if ((flag & 1) !== 0) {\n cb += n_dim * this._superblk.lensz;\n for (i = 0; i < n_dim; i += 1) {\n dmax[i] = this.getLength();\n }\n }\n\n var dind = [];\n if ((flag & 2) !== 0) {\n cb += n_dim * this._superblk.lensz;\n for (i = 0; i < n_dim; i += 1) {\n dind[i] = this.getLength();\n }\n }\n var msg = \"this.hdf5MsgDataspace V\" + ver + \" N\" + n_dim + \" F\" + flag;\n if (this.getMetadata(\"debug\")) {\n if (n_dim !== 0) {\n msg += \"[\" + dlen.join(', ') + \"]\";\n }\n console.log(msg);\n }\n if (cb < sz) {\n this.skip(sz - cb);\n }\n if (link) {\n link.dims = dlen;\n }\n return n_items;\n }\n\n\n /**\n * [PRIVATE]\n *\n *\n * link info messages may contain a fractal heap address where we\n * can find additional link messages for this object. This\n * happens, for example, when there are lots of links in a\n * particular group.\n */\n hdf5MsgLinkInfo(link) {\n var that = this;\n\n var ver = this.getU8();\n var flags = this.getU8();\n if ((flags & 1) !== 0) {\n this.getU64(); // max. creation index (IGNORE).\n }\n var fh_address = this.getOffset(); // fractal heap address\n var bt_address = this.getOffset(); // v2 btree for name index\n if ((flags & 2) !== 0) {\n this.getOffset(); // creation order index (IGNORE).\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgLinkInfo V\" + ver + \" F\" + flags +\n \" FH \" + fh_address + \" BT \" + bt_address);\n }\n var spp = this.tell();\n if (fh_address < this._superblk.eof_addr) {\n this.seek(fh_address);\n /* If there is a valid fractal heap address in the link info message, that\n * means the fractal heap is a collection of link messages. We can ignore\n * the btree address because we can get the names from the link messages.\n */\n var fh = this.hdf5FractalHeapHeader();\n var n_msg = 0;\n this.hdf5FractalHeapEnumerate( fh, function(row, address, block_offset, block_length) {\n var end_address = address + block_length;\n while (n_msg < fh.heap_nobj && that.tell() < end_address) {\n that.hdf5MsgLink(link);\n n_msg += 1;\n }\n return true; // continue with enumeration.\n });\n }\n this.seek(spp);\n }\n\n\n /**\n * [PRIVATE]\n */\n dt_class_name(cls) {\n var names = [\n \"Fixed-Point\", \"Floating-Point\", \"Time\", \"String\",\n \"BitField\", \"Opaque\", \"Compound\", \"Reference\",\n \"Enumerated\", \"Variable-Length\", \"Array\"\n ];\n\n if (cls < names.length) {\n return names[cls];\n }\n throw new Error('Unknown datatype class: ' + cls);\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"datatype\" message. These messages specify the data\n * type of a single element within a dataset or attribute. Data\n * types are extremely flexible, HDF5 supports a range of options\n * for bit widths and organization atomic types. We support only\n * fixed, float, and string atomic types, and those only for\n * certain restricted (but common) cases. At this point we\n * provide no support for more exotic types such as bit field,\n * enumerated, array, opaque, compound, time, reference,\n * variable-length, etc.\n *\n * TODO: should support enumerated types, possibly a few others.\n */\n hdf5MsgDatatype(sz) {\n var type = {};\n var cb = 8;\n var msg = \"\";\n var bit_offs;\n var bit_prec;\n var exp_loc;\n var exp_sz;\n var mnt_loc;\n var mnt_sz;\n var exp_bias;\n\n var cv = this.getU8();\n var ver = cv >> 4;\n var cls = cv & 15;\n var bf = [];\n var i;\n for (i = 0; i < 3; i += 1) {\n bf[i] = this.getU8();\n }\n var dt_size = this.getU32();\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgDatatype V\" + ver + \" C\" + cls +\n \" \" + this.dt_class_name(cls) +\n \" \" + bf[0] + \".\" + bf[1] + \".\" + bf[2] +\n \" \" + dt_size);\n }\n\n switch (cls) {\n case 0: /* Fixed (integer): bit 0 for byte order, bit 3 for signed */\n bit_offs = this.getU16();\n bit_prec = this.getU16();\n switch (dt_size) {\n case 4:\n type.typ_type = (bf[0] & 8) ? this._type_enum.INT32 : this._type_enum.UINT32;\n break;\n case 2:\n type.typ_type = (bf[0] & 8) ? this._type_enum.INT16 : this._type_enum.UINT16;\n break;\n case 1:\n type.typ_type = (bf[0] & 8) ? this._type_enum.INT8 : this._type_enum.UINT8;\n break;\n default:\n throw new Error('Unknown type size ' + dt_size);\n }\n type.typ_length = dt_size;\n cb += 4;\n if (this.getMetadata(\"debug\")) {\n console.log(' (' + bit_offs + ' ' + bit_prec + ')');\n }\n break;\n case 1: /* Float: uses bits 0,6 for byte order */\n msg = \"\";\n if (this.getMetadata(\"debug\")) {\n switch (bf[0] & 0x41) {\n case 0:\n msg += \"LE \";\n break;\n case 1:\n msg += \"BE \";\n break;\n case 0x41:\n msg += \"VX \";\n break;\n default:\n throw new Error('Reserved fp byte order: ' + bf[0]);\n }\n }\n bit_offs = this.getU16();\n bit_prec = this.getU16();\n exp_loc = this.getU8();\n exp_sz = this.getU8();\n mnt_loc = this.getU8();\n mnt_sz = this.getU8();\n exp_bias = this.getU32();\n if (this.getMetadata(\"debug\")) {\n msg += (bit_offs + \" \" + bit_prec + \" \" + exp_loc + \" \" + exp_sz +\n \" \" + mnt_loc + \" \" + mnt_sz + \" \" + exp_bias);\n }\n /* See if it's one of the formats we recognize.\n IEEE 64-bit or IEEE 32-bit are the only two we handle.\n */\n if (bit_prec === 64 && bit_offs === 0 &&\n exp_loc === 52 && exp_sz === 11 &&\n mnt_loc === 0 && mnt_sz === 52 &&\n exp_bias === 1023 && dt_size === 8) {\n type.typ_type = this._type_enum.DBL;\n } else if (bit_prec === 32 && bit_offs === 0 &&\n exp_loc === 23 && exp_sz === 8 &&\n mnt_loc === 0 && mnt_sz === 23 &&\n exp_bias === 127 && dt_size === 4) {\n type.typ_type = this._type_enum.FLT;\n } else {\n throw new Error(\"Unsupported floating-point type\");\n }\n if (this.getMetadata(\"debug\")) {\n console.log(msg);\n }\n type.typ_length = dt_size;\n cb += 12;\n break;\n\n case 3: // string\n /* bits 0-3 = 0: null terminate, 1: null pad, 2: space pad */\n /* bits 4-7 = 0: ASCII, 1: UTF-8 */\n type.typ_type = this._type_enum.STR;\n type.typ_length = dt_size;\n break;\n\n default:\n throw new Error('Unimplemented HDF5 data class ' + cls);\n }\n if (sz > cb) {\n this.skip(sz - cb);\n }\n return type;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"layout\" message. These messages specify the location and organization\n * of data in a dataset. The organization can be either compact, contiguous, or\n * chunked. Compact data is stored in the message as a contiguous block. Contiguous\n * data is stored elsewhere in the file in a single chunk. Chunked data is stored within\n * a V1 Btree as a series of possibly filtered (e.g. compressed) chunks.\n */\n hdf5MsgLayout(link) {\n var msg = \"\";\n\n var ver = this.getU8();\n var cls;\n var n_dim;\n var cdsz;\n var dim = [];\n var i;\n var dtadr;\n var dtsz;\n var elsz;\n\n var n_items = 1;\n if (ver === 1 || ver === 2) {\n n_dim = this.getU8();\n cls = this.getU8();\n this.skip(5);\n if (this.getMetadata(\"debug\")) {\n msg += \"this.hdf5MsgLayout V\" + ver + \" N\" + n_dim + \" C\" + cls;\n }\n if (cls === 1 || cls === 2) { // contiguous or chunked\n var addr = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n msg += \" A\" + addr;\n }\n link.data_offset = addr;\n }\n\n for (i = 0; i < n_dim; i += 1) {\n dim[i] = this.getU32();\n n_items *= dim[i];\n }\n\n if (this.getMetadata(\"debug\")) {\n msg += \"[\" + dim.join(', ') + \"]\";\n }\n\n if (cls === 2) { // chunked\n elsz = this.getU32();\n link.chunk_size = n_items * elsz;\n if (this.getMetadata(\"debug\")) {\n msg += \" E\" + elsz;\n }\n }\n if (cls === 0) { // compact\n cdsz = this.getU32();\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + cdsz + \")\";\n }\n link.data_offset = this.tell();\n link.data_length = cdsz;\n } else if (cls === 1) {\n link.data_length = n_items;\n }\n } else if (ver === 3) {\n cls = this.getU8();\n msg = \"this.hdf5MsgLayout V\" + ver + \" C\" + cls;\n\n if (cls === 0) {\n cdsz = this.getU16();\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + cdsz + \")\";\n }\n link.data_offset = this.tell();\n link.data_length = cdsz;\n } else if (cls === 1) {\n dtadr = this.getOffset();\n dtsz = this.getLength();\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + dtadr + \", \" + dtsz + \")\";\n }\n link.data_offset = dtadr;\n link.data_length = dtsz;\n } else if (cls === 2) {\n n_dim = this.getU8();\n dtadr = this.getOffset();\n link.data_offset = dtadr;\n link.chunk_size = 1;\n for (i = 0; i < n_dim - 1; i += 1) {\n dim[i] = this.getU32();\n n_items *= dim[i];\n }\n if (this.getMetadata(\"debug\")) {\n msg += \"(N\" + n_dim + \", A\" + dtadr + \" [\" + dim.join(',') + \"]\";\n }\n elsz = this.getU32();\n link.chunk_size = n_items * elsz;\n if (this.getMetadata(\"debug\")) {\n msg += \" E\" + elsz;\n }\n }\n } else {\n throw new Error(\"Illegal layout version \" + ver);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(msg);\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a \"filter pipeline\" message. At the moment we _only_ handle\n * deflate/inflate. Anything else will cause us to throw an exception.\n */\n hdf5MsgPipeline(link) {\n var ver = this.getU8();\n var nflt = this.getU8();\n\n var msg = \"this.hdf5MsgPipeline V\" + ver + \" N\" + nflt;\n if (ver === 1) {\n this.skip(6);\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(msg);\n }\n\n var i;\n var fiv;\n var nlen;\n var flags;\n var ncdv;\n for (i = 0; i < nflt; i += 1) {\n fiv = this.getU16();\n if (fiv !== 1) { /* deflate */\n throw new Error(\"Unimplemented HDF5 filter \" + fiv);\n }\n else {\n if (typeof pako !== 'object') {\n throw new Error('Need pako to inflate data.');\n }\n link.inflate = true;\n }\n if (ver === 1 || fiv > 256) {\n nlen = this.getU16();\n } else {\n nlen = 0;\n }\n\n flags = this.getU16();\n ncdv = this.getU16();\n if ((ncdv & 1) !== 0) {\n ncdv += 1;\n }\n if (nlen !== 0) {\n this.skip(nlen); // ignore name.\n }\n\n this.skip(ncdv * 4);\n\n if (this.getMetadata(\"debug\")) {\n console.log(\" \" + i + \" ID\" + fiv + \" F\" + flags + \" \" + ncdv);\n }\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process an \"attribute\" message. This actually defines an attribute that is\n * to be associated with a group or dataset (what I generally call a \"link\"\n * in this code. Attributes include a name, a datatype, and a dataspace, followed\n * by the actual data.\n */\n hdf5MsgAttribute(sz, link) {\n var ver = this.getU8();\n var flags = this.getU8();\n var nm_len = this.getU16();\n var dt_len = this.getU16();\n var ds_len = this.getU16();\n var msg = \"this.hdf5MsgAttribute V\" + ver + \" F\" + flags + \" \" + sz + \": \";\n\n if ((flags & 3) !== 0) {\n throw new Error('Shared dataspaces and datatypes are not supported.');\n }\n\n if (ver === 3) {\n var cset = this.getU8();\n if (this.getMetadata(\"debug\")) {\n msg += (cset === 0) ? \"ASCII\" : \"UTF-8\";\n }\n }\n if (this.getMetadata(\"debug\")) {\n msg += \"(\" + nm_len + \" \" + dt_len + \" \" + ds_len + \")\";\n }\n if (ver < 3) {\n nm_len = Math.floor((nm_len + 7) / 8) * 8;\n dt_len = Math.floor((dt_len + 7) / 8) * 8;\n ds_len = Math.floor((ds_len + 7) / 8) * 8;\n\n if (this.getMetadata(\"debug\")) {\n msg += \"/(\" + nm_len + \" \" + dt_len + \" \" + ds_len + \")\";\n }\n }\n\n var att_name = this.getString(nm_len);\n if (this.getMetadata(\"debug\")) {\n msg += \" Name: \" + att_name;\n console.log(msg);\n }\n var val_type = this.hdf5MsgDatatype(dt_len);\n var n_items = this.hdf5MsgDataspace(ds_len);\n var val_len = 0;\n if (sz > 0) {\n if (ver < 3) {\n val_len = sz - (8 + nm_len + dt_len + ds_len);\n } else {\n val_len = sz - (9 + nm_len + dt_len + ds_len);\n }\n } else {\n val_len = val_type.typ_length * n_items;\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\" attribute data size \" + val_len + \" \" + this.tell());\n }\n var att_value;\n if (val_type.typ_type === this._type_enum.STR) {\n att_value = this.getString(val_len);\n } else {\n att_value = this.getArray(val_type.typ_type, val_len);\n }\n link.attributes[att_name] = att_value;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"group info\" message. We don't actually do anything with these.\n */\n hdf5MsgGroupInfo() {\n var n_ent = 4;\n var n_lnl = 8;\n var ver = this.getU8();\n var flags = this.getU8();\n if ((flags & 1) !== 0) {\n this.getU16(); // link phase change: max compact value (IGNORE)\n this.getU16(); // link phase cange: max dense value (IGNORE)\n }\n if ((flags & 2) !== 0) {\n n_ent = this.getU16();\n n_lnl = this.getU16();\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgGroupInfo V\" + ver + \" F\" + flags + \" ENT \" + n_ent + \" LNL \" + n_lnl);\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a \"link\" message. This specifies the name and header location of either a\n * group or a dataset within the current group. It is probably also used to implement\n * internal links but we don't really support that.\n */\n hdf5MsgLink(link) {\n var ver = this.getU8();\n var ltype = 0;\n if (ver !== 1) {\n throw new Error(\"Bad link message version \" + ver);\n }\n var flags = this.getU8();\n if ((flags & 8) !== 0) {\n ltype = this.getU8();\n }\n if ((flags & 4) !== 0) {\n this.getU64(); // creation order (IGNORE)\n }\n if ((flags & 16) !== 0) {\n this.getU8(); // link name character set (IGNORE)\n }\n var cb = 1 << (flags & 3);\n var lnsz = this.getUXX(cb);\n\n var child = this.createLink();\n\n child.name = this.getString(lnsz);\n\n if ((flags & 8) === 0) {\n child.hdr_offset = this.getOffset();\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgLink V\" + ver + \" F\" + flags + \" T\" + ltype +\n \" NM \" + child.name + \" OF \" + child.hdr_offset);\n }\n link.children.push(child);\n }\n\n\n /**\n * [PRIVATE]\n *\n * The fractal heap direct block contains:\n * 1. A signature.\n * 2. a byte version.\n * 3. an offset pointing to the header (for integrity checking).\n * 4. A variably-sized block offset that gives (_I think_) the mininum block offset\n * associated with this block.\n * 5. Variably-sized data. Block size varies with row number in a slightly tricky\n * fashion. Each \"row\" consists of \"table_width\" blocks. The first two rows, row 0 and 1,\n * have blocks of the \"starting block size\". Row 2-N have blocks of size 2^(row-1) times\n * the starting block size.\n */\n hdf5FractalHeapDirectBlock(fh, row, address, callback) {\n if (!this.checkSignature(\"FHDB\")) {\n throw new Error(\"Bad or missing FHDB signature\");\n }\n var ver = this.getU8();\n if (ver !== 0) {\n throw new Error('Bad FHDB version: ' + ver);\n }\n this.getOffset(); // heap header address (IGNORE)\n var cb = Math.ceil(fh.max_heapsz / 8.0);\n var block_offset = this.getUXX(cb); // block offset\n if ((fh.flags & 2) !== 0) {\n this.getU32(); // checksum (IGNORE)\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"FHDB V:\" + ver + \" R:\" + row + \" O:\" + block_offset + \" A:\" + address);\n }\n var header_length = 5 + this._superblk.offsz + cb;\n if ((fh.flags & 2) !== 0) {\n header_length += 4;\n }\n var block_length;\n if (row <= 1) {\n block_length = fh.start_blksz;\n }\n else {\n block_length = Math.pow(2, row - 1) * fh.start_blksz;\n }\n if (callback) {\n return callback(row, address, block_offset, block_length);\n }\n else {\n return true; // continue enumeration.\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * The fractal heap indirect block contains:\n * 1. A signature.\n * 2. a byte version\n * 3. an offset pointing to the header (for integrity checking).\n * 4. a variably-sized block offset that gives (_I think_) the mininum block offset\n * associated with children of this block.\n * 5. pointers to K direct blocks\n * 6. pointers to N indirect blocks\n * 7. A checksum. This code completely ignores checksums.\n * See calculations of K and N in this.hdf5FractalHeapHeader(). Note that there can also\n * be additional information in the header if \"filtered\" direct blocks are used. I have\n * made no attempt to support this.\n */\n hdf5FractalHeapIndirectBlock(fh, callback) {\n if (!this.checkSignature(\"FHIB\")) {\n throw new Error(\"Bad or missing FHIB signature\");\n }\n var ver = this.getU8();\n if (ver !== 0) {\n throw new Error('Bad FHIB version: ' + ver);\n }\n this.getOffset(); // heap header address (IGNORE)\n var cb = Math.ceil(fh.max_heapsz / 8.0);\n var block_offset = this.getUXX(cb); // block offset\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"FHIB V:\" + ver + \" O:\" + block_offset);\n }\n var i;\n var address;\n var db_addrs = [];\n for (i = 0; i < fh.K; i += 1) {\n address = this.getOffset();\n if (address < this._superblk.eof_addr) {\n if (this.getMetadata(\"debug\")) {\n console.log(\"direct block at \" + address);\n }\n db_addrs.push(address);\n }\n }\n\n var ib_addrs = [];\n for (i = 0; i < fh.N; i += 1) {\n address = this.getOffset();\n if (address < this._superblk.eof_addr) {\n if (this.getMetadata(\"debug\")) {\n console.log(\"indirect block at \" + address);\n }\n ib_addrs.push(address);\n }\n }\n this.getU32(); // checksum (IGNORE)\n\n /* Finished reading the indirect block, now go read its children.\n */\n for (i = 0; i < db_addrs.length; i++) {\n this.seek(db_addrs[i]);\n /* TODO: check row calculation!\n */\n if (!this.hdf5FractalHeapDirectBlock(fh, i / fh.table_width, db_addrs[i], callback)) {\n return false;\n }\n }\n for (i = 0; i < ib_addrs.length; i++) {\n this.seek(ib_addrs[i]);\n if (!this.hdf5FractalHeapIndirectBlock(fh, callback)) {\n return false;\n }\n }\n return true;\n }\n\n\n /**\n * [PRIVATE]\n *\n * enumerate over all of the direct blocks in the fractal heap.\n */\n hdf5FractalHeapEnumerate(fh, callback) {\n this.seek(fh.root_addr);\n if (fh.K === 0) {\n this.hdf5FractalHeapDirectBlock(fh, 0, fh.root_addr, callback);\n }\n else {\n this.hdf5FractalHeapIndirectBlock(fh, callback);\n }\n }\n\n\n /**\n * [PRIVATE]\n */\n hdf5FractalHeapOffset(fh, offset) {\n var location;\n this.hdf5FractalHeapEnumerate(fh, function(row, address, block_offset, block_length) {\n if (offset >= block_offset && offset < block_offset + block_length) {\n location = address + (offset - block_offset);\n return false; // stop enumeration.\n }\n return true; // continue enumeration.\n });\n return location;\n }\n\n\n /**\n * [PRIVATE]\n *\n * Attribute info messages contain pointers to a fractal heap and a v2 btree.\n * If these pointers are valid, we must follow them to find more attributes.\n * The attributes are indexed by records in the \"type 8\" btree. These btree\n * records\n */\n hdf5MsgAttrInfo(link) {\n var ver = this.getU8();\n if (ver !== 0) {\n throw new Error('Bad attribute information message version: ' + ver);\n }\n\n var flags = this.getU8();\n\n if ((flags & 1) !== 0) {\n this.getU16(); // maximum creation index (IGNORE)\n }\n var fh_addr = this.getOffset();\n var bt_addr = this.getOffset();\n if ((flags & 2) !== 0) {\n this.getOffset(); // attribute creation order (IGNORE)\n }\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5MsgAttrInfo V\" + ver + \" F\" + flags + \" HP \" + fh_addr +\n \" AN \" + bt_addr);\n }\n\n var spp = this.tell();\n var fh; // fractal heap header.\n if (fh_addr < this._superblk.eof_addr) {\n this.seek(fh_addr);\n fh = this.hdf5FractalHeapHeader();\n }\n if (bt_addr < this._superblk.eof_addr) {\n this.seek(bt_addr);\n var bh = this.hdf5V2BtreeHeader();\n if (bh.type !== 8) {\n throw new Error(\"Can only handle indexed attributes.\");\n }\n this.seek(bh.root_addr);\n if (bh.depth > 0) {\n this.hdf5V2BtreeInternalNode(fh, bh.root_nrec, bh.depth, link);\n }\n else {\n this.hdf5V2BtreeLeafNode(fh, bh.root_nrec, link);\n }\n }\n this.seek(spp);\n }\n\n\n /**\n * [PRIVATE]\n *\n * Process a single message, given a message header. Assumes that\n * the data view offset is pointing to the remainder of the\n * message.\n *\n * V1 and V2 files use different sets of messages to accomplish\n * similar things. For example, V1 files tend to use \"symbol\n * table\" messages to describe links within a group, whereas V2\n * files use \"link\" and \"linkinfo\" messages.\n */\n hdf5ProcessMessage(msg, link) {\n var cq_new = {};\n var val_type;\n\n switch (msg.hm_type) {\n case 1:\n this.hdf5MsgDataspace(msg.hm_size, link);\n break;\n case 2:\n this.hdf5MsgLinkInfo(link);\n break;\n case 3:\n val_type = this.hdf5MsgDatatype(msg.hm_size);\n if (link) {\n link.type = val_type.typ_type;\n }\n break;\n case 6:\n this.hdf5MsgLink(link);\n break;\n case 8:\n this.hdf5MsgLayout(link);\n break;\n case 10:\n this.hdf5MsgGroupInfo();\n break;\n case 11:\n this.hdf5MsgPipeline(link);\n break;\n case 12:\n this.hdf5MsgAttribute(msg.hm_size, link);\n break;\n case 16:\n /* Process an object header continuation message. These\n * basically just say this header continues with a new segment\n * with a given location and length. They can come before the\n * end of the current message segment, and multiple\n * continuation messages can occur in any particular segment.\n * This means we have to enqueue them and shift them off the\n * queue when we finish processing the current segment.\n */\n cq_new.cq_off = this.getOffset();\n cq_new.cq_len = this.getLength();\n this._continuation_queue.push(cq_new);\n if (this.getMetadata(\"debug\")) {\n console.log(\"hdf5MsgObjHdrContinue \" + cq_new.cq_off + \" \" + cq_new.cq_len);\n }\n break;\n case 17: // SymbolTable\n link.sym_btree = this.getOffset();\n link.sym_lheap = this.getOffset();\n if (this.getMetadata(\"debug\")) {\n console.log(\"hdf5MsgSymbolTable \" + link.sym_btree + \" \" + link.sym_lheap);\n }\n break;\n case 21:\n this.hdf5MsgAttrInfo(link);\n break;\n case 0:\n case 4:\n case 5:\n case 7:\n case 18:\n case 19:\n case 20:\n case 22:\n case 24:\n this.skip(msg.hm_size);\n break;\n default:\n throw new Error('Unknown message type: ' + msg.hm_type);\n }\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a V2 object header. Object headers contain a series of messages that define\n * an HDF5 object, primarily a group or a dataset. V2 object headers, and V2 objects\n * generally, are much less concerned about alignment than V1 objects.\n */\n hdf5V2ObjectHeader(link) {\n if (!this.checkSignature(\"OHDR\")) {\n throw new Error('Bad or missing OHDR signature');\n }\n\n var ver = this.getU8();\n var flags = this.getU8();\n\n if ((flags & 0x20) !== 0) {\n this.getU32(); // access time (IGNORE)\n this.getU32(); // modify time (IGNORE)\n this.getU32(); // change time (IGNORE)\n this.getU32(); // birth time (IGNORE)\n }\n\n if ((flags & 0x10) !== 0) {\n this.getU16(); // maximum number of compact attributes (IGNORE)\n this.getU16(); // maximum number of dense attributes (IGNORE)\n }\n\n var cb = 1 << (flags & 3);\n var ck0_size = this.getUXX(cb);\n\n var msg_num = 0;\n var msg_offs = 0;\n var msg_bytes = ck0_size;\n\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5V2ObjectHeader V\" + ver + \" F\" + flags + \" HS\" + ck0_size);\n }\n\n var hmsg;\n var cq_head;\n var spp;\n\n while (true) {\n while (msg_bytes - msg_offs >= 8) {\n hmsg = {};\n hmsg.hm_type = this.getU8();\n hmsg.hm_size = this.getU16();\n hmsg.hm_flags = this.getU8();\n if (this.getMetadata(\"debug\")) {\n console.log(\" msg\" + msg_num + \" F\" + hmsg.hm_flags + \" T \" +\n hmsg.hm_type + \" S \" + hmsg.hm_size +\n \" (\" + msg_offs + \"/\" + msg_bytes + \") \" +\n this.hdf5GetMsgName(hmsg.hm_type));\n }\n if ((flags & 0x04) !== 0) {\n hmsg.hm_corder = this.getU16();\n }\n spp = this.tell();\n this.hdf5ProcessMessage(hmsg, link);\n this.seek(spp + hmsg.hm_size); // this.skip past message.\n\n msg_offs += hmsg.hm_size + 4;\n\n msg_num += 1;\n }\n\n if ((msg_bytes - msg_offs) > 4) {\n this.skip(msg_bytes - (msg_offs + 4));\n }\n\n this.getU32(); // checksum (IGNORE)\n\n if (this._continuation_queue.length !== 0) {\n cq_head = this._continuation_queue.shift();\n this.seek(cq_head.cq_off);\n msg_bytes = cq_head.cq_len - 4;\n msg_offs = 0;\n if (this.getMetadata(\"debug\")) {\n console.log('continuing with ' + cq_head.cq_len + ' bytes at ' + this.tell());\n }\n if (!this.checkSignature(\"OCHK\")) {\n throw new Error(\"Bad v2 object continuation\");\n }\n } else {\n break;\n }\n }\n\n link.children.forEach(function (child, link_num) {\n that.seek(child.hdr_offset);\n if (that.getMetadata(\"debug\")) {\n console.log(link_num + \" \" + child.hdr_offset + \" \" + child.name);\n }\n if (this.checkSignature(\"OHDR\")) {\n that.seek(child.hdr_offset);\n that.hdf5V2ObjectHeader(child);\n }\n else {\n that.seek(child.hdr_offset);\n that.hdf5V1ObjectHeader(child);\n }\n });\n }\n\n\n /**\n * [PRIVATE]\n */\n loadData(link) {\n var that = this;\n\n if (link.chunk_size !== 0) {\n this.seek(link.data_offset);\n\n var n_bytes = 1;\n var i;\n for (i = 0; i < link.dims.length; i += 1) {\n n_bytes *= link.dims[i];\n }\n n_bytes *= this.typeSize(link.type);\n if (this.getMetadata(\"debug\")) {\n console.log('allocating ' + n_bytes + ' bytes');\n }\n var ab = new ArrayBuffer(n_bytes);\n link.n_filled = 0;\n switch (link.type) {\n case this._type_enum.INT8:\n link.array = new Int8Array(ab);\n break;\n case this._type_enum.UINT8:\n link.array = new Uint8Array(ab);\n break;\n case this._type_enum.INT16:\n link.array = new Int16Array(ab);\n break;\n case this._type_enum.UINT16:\n link.array = new Uint16Array(ab);\n break;\n case this._type_enum.INT32:\n link.array = new Int32Array(ab);\n break;\n case this._type_enum.UINT32:\n link.array = new Uint32Array(ab);\n break;\n case this._type_enum.FLT:\n link.array = new Float32Array(ab);\n break;\n case this._type_enum.DBL:\n link.array = new Float64Array(ab);\n break;\n default:\n throw new Error('Illegal type: ' + link.type);\n }\n this.hdf5V1BtreeNode(link);\n } else {\n if (link.data_offset > 0 && link.data_offset < this._superblk.eof_addr) {\n if (this.getMetadata(\"debug\")) {\n console.log('loading ' + link.data_length + ' bytes from ' + link.data_offset + ' to ' + link.name);\n }\n link.array = this.getArray(link.type, link.data_length,\n link.data_offset);\n } else {\n if (this.getMetadata(\"debug\")) {\n console.log('data not present for /' + link.name + '/');\n }\n }\n }\n\n link.children.forEach(function (child) {\n that.loadData(child);\n });\n }\n\n\n /**\n * [PRIVATE]\n *\n * Read a v1 object header. Object headers contain a series of\n * messages that define an HDF5 object, primarily a group or a\n * dataset. The v1 object header, like most of the v1 format, is\n * very careful about alignment. Every message must be on an\n * 8-byte alignment RELATIVE TO THE START OF THE HEADER. So if the\n * header starts on an odd boundary, messages may start on odd\n * boundaries as well. No, this doesn't make much sense.\n */\n hdf5V1ObjectHeader(link) {\n var that = this;\n var oh = {};\n this.startAlignment();\n oh.oh_ver = this.getU8();\n this.skip(1); // reserved\n oh.oh_n_msgs = this.getU16();\n oh.oh_ref_cnt = this.getU32();\n oh.oh_hdr_sz = this.getU32();\n if (oh.oh_ver !== 1) {\n throw new Error(\"Bad v1 object header version: \" + oh.oh_ver);\n }\n if (this.getMetadata(\"debug\")) {\n console.log(\"this.hdf5V1ObjectHeader V\" + oh.oh_ver +\n \" #M \" + oh.oh_n_msgs +\n \" RC \" + oh.oh_ref_cnt +\n \" HS \" + oh.oh_hdr_sz);\n }\n\n var msg_bytes = oh.oh_hdr_sz;\n var cq_head;\n var msg_num;\n var hmsg;\n var spp;\n\n for (msg_num = 0; msg_num < oh.oh_n_msgs; msg_num += 1) {\n if (msg_bytes <= 8) {\n if (this._continuation_queue.length !== 0) {\n cq_head = this._continuation_queue.shift();\n this.seek(cq_head.cq_off);\n msg_bytes = cq_head.cq_len;\n if (this.getMetadata(\"debug\")) {\n console.log('continuing with ' + msg_bytes + ' bytes at ' + this.tell());\n }\n this.startAlignment();\n } else {\n break;\n }\n }\n\n this.checkAlignment();\n\n hmsg = {};\n hmsg.hm_type = this.getU16();\n hmsg.hm_size = this.getU16();\n hmsg.hm_flags = this.getU8();\n\n if ((hmsg.hm_size % 8) !== 0) {\n throw new Error('Size is not 8-byte aligned: ' + hmsg.hm_size);\n }\n this.skip(3); // this.skip reserved\n msg_bytes -= (8 + hmsg.hm_size);\n if (this.getMetadata(\"debug\")) {\n console.log(\" msg\" + msg_num +\n \" F \" + hmsg.hm_flags +\n \" T \" + hmsg.hm_type +\n \" S \" + hmsg.hm_size +\n \"(\" + msg_bytes + \") \" + this.hdf5GetMsgName(hmsg.hm_type));\n }\n\n spp = this.tell();\n this.hdf5ProcessMessage(hmsg, link);\n this.seek(spp + hmsg.hm_size); // this.skip whole message.\n }\n\n if (link.sym_btree !== 0 && link.sym_lheap !== 0) {\n this.seek(link.sym_btree);\n var bt = this.hdf5V1BtreeNode();\n this.seek(link.sym_lheap);\n var lh = this.hdf5LocalHeap();\n var i;\n for (i = 0; i < bt.entries_used; i += 1) {\n this.seek(bt.keys[i].child_address);\n if (this.checkSignature(\"SNOD\")) {\n this.seek(bt.keys[i].child_address);\n this.hdf5GroupSymbolTable(lh, link);\n } else {\n this.seek(bt.keys[i].child_address);\n this.hdf5V1ObjectHeader(link);\n }\n }\n\n link.children.forEach(function (child) {\n that.seek(child.hdr_offset);\n that.hdf5V1ObjectHeader(child);\n });\n }\n }\n\n\n//------------------------------------------------------------------------------\n// FROM hdf5_tools.js\n\n getTypeMatchMinc(typeEnumVal){\n return this._type_matching[typeEnumVal - 1];\n }\n\n\n\n defined(x) {\n return typeof x !== 'undefined';\n }\n\n\n typeName(x) {\n if (! this.defined(x)) {\n return \"undefined\";\n }\n return x.constructor.name;\n }\n\n\n\n typeSize(typ) {\n if (typ >= this._type_enum.INT8 && typ < this.type_sizes.length) {\n return this.type_sizes[typ];\n }\n throw new Error('Unknown type ' + typ);\n }\n\n\n typeIsFloat(typ) {\n return (typ >= this._type_enum.FLT && typ <=this._type_enum.DBL);\n }\n\n\n /*\n * The remaining code after this point is not truly HDF5 specific -\n * it's mostly about converting the MINC file into the form\n * BrainBrowser is able to use. Therefore it is used for both HDF5\n * and NetCDF files.\n */\n\n /*\n * Join does not seem to be defined on the typed arrays in\n * javascript, so I've re-implemented it here, sadly.\n */\n join(array, string) {\n var result = \"\";\n if (array && array.length) {\n var i;\n for (i = 0; i < array.length - 1; i += 1) {\n result += array[i];\n result += string;\n }\n result += array[i];\n }\n return result;\n }\n\n /*\n * Recursively print out the structure and contents of the file.\n * Primarily useful for debugging.\n */\n printStructure(link, level) {\n var that = this;\n\n var i;\n var msg = \"\";\n for (i = 0; i < level * 2; i += 1) {\n msg += \" \";\n }\n msg += link.name + (link.children.length ? \"/\" : \"\");\n if (link.type > 0) {\n msg += ' ' + this.typeName(link.array);\n if (link.dims.length) {\n msg += '[' + link.dims.join(', ') + ']';\n }\n if (link.array) {\n msg += \":\" + link.array.length;\n } else {\n msg += \" NULL\";\n }\n }\n console.log(msg);\n\n Object.keys(link.attributes).forEach(function (name) {\n var value = link.attributes[name];\n\n msg = \"\";\n for (i = 0; i < level * 2 + 1; i += 1) {\n msg += \" \";\n }\n msg += link.name + ':' + name + \" \" +\n that.typeName(value) + \"[\" + value.length + \"] \";\n if (typeof value === \"string\") {\n msg += JSON.stringify(value);\n } else {\n msg += \"{\" + that.join(value.slice(0, 16), ', ');\n if (value.length > 16) {\n msg += \", ...\";\n }\n msg += \"}\";\n }\n console.log(msg);\n });\n\n link.children.forEach(function (child) {\n that.printStructure(child, level + 1);\n });\n }\n\n /* Find a dataset with a given name, by recursively searching through\n * the links. Groups will have 'type' fields of -1, since they contain\n * no data.\n * TODO (maybe): Use associative array for children?\n */\n findDataset(link, name, level) {\n var that = this;\n var result;\n if (link && link.name === name && link.type > 0) {\n result = link;\n } else {\n link.children.find( function( child ) {\n result = that.findDataset(child, name, level + 1);\n return that.defined(result);\n });\n }\n return result;\n }\n\n /* Find an attribute with a given name.\n */\n findAttribute(link, name, level) {\n var that = this;\n var result = link.attributes[name];\n if (result)\n return result;\n\n link.children.find( function (child ) {\n result = that.findAttribute( child, name, level + 1);\n return that.defined(result);\n });\n return result;\n }\n\n /**\n * @doc function\n * @name hdf5.this.scaleVoxels\n * @param {object} image The link object corresponding to the image data.\n * @param {object} image_min The link object corresponding to the image-min\n * data.\n * @param {object} image_max The link object corresponding to the image-max\n * data.\n * @param {object} valid_range An array of exactly two items corresponding\n * to the minimum and maximum valid _raw_ voxel values.\n * @param {boolean} debug True if we should print debugging information.\n * @returns A new ArrayBuffer containing the rescaled data.\n * @description\n * Convert the MINC data from voxel to real range. This returns a\n * new buffer that contains the \"real\" voxel values. It does less\n * work for floating-point volumes, since they don't need scaling.\n *\n * For debugging/testing purposes, also gathers basic voxel statistics,\n * for comparison against mincstats.\n */\n scaleVoxels(image, image_min, image_max, valid_range, debug) {\n /*\n var new_abuf = new ArrayBuffer(image.array.length *\n Float32Array.BYTES_PER_ELEMENT);\n var new_data = new Float32Array(new_abuf);\n\n */\n\n // 1D array to store the voxel data,\n // not initialized yet because it depends on the hdf5 type.\n var new_abuf = null;\n var new_data = null;\n\n // we could simply use image.type, but written types are easier to read...\n switch (this.getTypeMatchMinc(image.type)) {\n case 'int8':\n new_abuf = new ArrayBuffer(image.array.length * Int8Array.BYTES_PER_ELEMENT);\n new_data = new Int8Array(new_abuf);\n break;\n\n case 'int16':\n new_abuf = new ArrayBuffer(image.array.length * Int16Array.BYTES_PER_ELEMENT);\n new_data = new Int16Array(new_abuf);\n break;\n\n case 'int32':\n new_abuf = new ArrayBuffer(image.array.length * Int32Array.BYTES_PER_ELEMENT);\n new_data = new Int32Array(new_abuf);\n break;\n\n case 'float32':\n new_abuf = new ArrayBuffer(image.array.length * Float32Array.BYTES_PER_ELEMENT);\n new_data = new Float32Array(new_abuf);\n break;\n\n case 'float64':\n new_abuf = new ArrayBuffer(image.array.length * Float64Array.BYTES_PER_ELEMENT);\n new_data = new Float64Array(new_abuf);\n break;\n\n case 'uint8':\n new_abuf = new ArrayBuffer(image.array.length * Uint8Array.BYTES_PER_ELEMENT);\n new_data = new Uint8Array(new_abuf);\n break;\n\n case 'uint16':\n new_abuf = new ArrayBuffer(image.array.length * Uint16Array.BYTES_PER_ELEMENT);\n new_data = new Uint16Array(new_abuf);\n break;\n\n case 'uint32':\n new_abuf = new ArrayBuffer(image.array.length * Uint32Array.BYTES_PER_ELEMENT);\n new_data = new Uint32Array(new_abuf);\n break;\n\n default:\n var error_message = \"Unsupported data type: \" + header.datatype;\n console.log({ message: error_message } );\n //BrainBrowser.events.triggerEvent(\"error\", { message: error_message } );\n throw new Error(error_message);\n\n }\n\n\n var n_slice_dims = image.dims.length - image_min.dims.length;\n\n if (n_slice_dims < 1) {\n throw new Error(\"Too few slice dimensions: \" + image.dims.length +\n \" \" + image_min.dims.length);\n }\n var n_slice_elements = 1;\n var i;\n for (i = image_min.dims.length; i < image.dims.length; i += 1) {\n n_slice_elements *= image.dims[i];\n }\n if (debug) {\n console.log(n_slice_elements + \" voxels in slice.\");\n }\n var s = 0;\n var c = 0;\n var x = -Number.MAX_VALUE;\n var n = Number.MAX_VALUE;\n var im = image.array;\n var im_max = image_max.array;\n var im_min = image_min.array;\n if (debug) {\n console.log(\"valid range is \" + valid_range[0] + \" to \" + valid_range[1]);\n }\n\n var vrange;\n var rrange;\n var vmin = valid_range[0];\n var rmin;\n var j;\n var v;\n var is_float = this.typeIsFloat(image.type);\n for (i = 0; i < image_min.array.length; i += 1) {\n if (debug) {\n console.log(i + \" \" + im_min[i] + \" \" + im_max[i] + \" \" +\n im[i * n_slice_elements]);\n }\n if (is_float) {\n /* For floating-point volumes there is no scaling to be performed.\n * We do scan the data and make sure voxels are within the valid\n * range, and collect our statistics.\n */\n for (j = 0; j < n_slice_elements; j += 1) {\n v = im[c];\n if (v < valid_range[0] || v > valid_range[1]) {\n new_data[c] = 0.0;\n }\n else {\n new_data[c] = v;\n s += v;\n if (v > x) {\n x = v;\n }\n if (v < n) {\n n = v;\n }\n }\n c += 1;\n }\n }\n else {\n /* For integer volumes we have to scale each slice according to image-min,\n * image-max, and valid_range.\n */\n vrange = (valid_range[1] - valid_range[0]);\n rrange = (im_max[i] - im_min[i]);\n rmin = im_min[i];\n\n /*\n console.log(n_slice_elements);\n console.log(vrange);\n console.log(rrange);\n console.log(rmin);\n console.log(\"-----------------\");\n */\n\n\n for (j = 0; j < n_slice_elements; j += 1) {\n\n // v normalization to avoid \"flickering\".\n // v is scaled to the range [0, im_max[i]]\n // (possibly uint16 if the original per-slice min-max was not scaled up/down)\n v = (im[c] - vmin) / vrange * rrange + rmin;\n\n // we scale up/down to match the type of the target array\n v = v / im_max[i] * valid_range[1];\n\n\n new_data[c] = v;\n s += v;\n c += 1;\n if (v > x) {\n x = v;\n }\n if (v < n) {\n n = v;\n }\n\n }\n\n }\n }\n\n if (debug) {\n console.log(\"Min: \" + n);\n console.log(\"Max: \" + x);\n console.log(\"Sum: \" + s);\n console.log(\"Mean: \" + s / c);\n }\n\n return new_abuf;\n }\n\n /**\n * @doc function\n * @name hdf5.this.isRgbVolume\n * @param {object} header The header object representing the structure\n * of the MINC file.\n * @param {object} image The typed array object used to represent the\n * image data.\n * @returns {boolean} True if this is an RGB volume.\n * @description\n * A MINC volume is an RGB volume if all three are true:\n * 1. The voxel type is unsigned byte.\n * 2. It has a vector_dimension in the last (fastest-varying) position.\n * 3. The vector dimension has length 3.\n */\n isRgbVolume(header, image) {\n var order = header.order;\n return (image.array.constructor.name === 'Uint8Array' &&\n order.length > 0 &&\n order[order.length - 1] === \"vector_dimension\" &&\n header.vector_dimension.space_length === 3);\n }\n\n /**\n * @doc function\n * @name hdf5.this.rgbVoxels\n * @param {object} image The 'link' object created using createLink(),\n * that corresponds to the image within the HDF5 or NetCDF file.\n * @returns {object} A new ArrayBuffer that contains the original RGB\n * data augmented with alpha values.\n * @description\n * This function copies the RGB voxels to the destination buffer.\n * Essentially we just convert from 24 to 32 bits per voxel. This\n * is another MINC-specific function.\n */\n rgbVoxels(image) {\n var im = image.array;\n var n = im.length;\n var new_abuf = new ArrayBuffer(n / 3 * 4);\n var new_byte = new Uint8Array(new_abuf);\n var i, j = 0;\n for (i = 0; i < n; i += 3) {\n new_byte[j+0] = im[i+0];\n new_byte[j+1] = im[i+1];\n new_byte[j+2] = im[i+2];\n new_byte[j+3] = 255;\n j += 4;\n }\n return new_abuf;\n }\n\n\n //----------------------------------------------------------------------------\n // FROM minc_reader.js\n parseHeader(header_text) {\n var header;\n var error_message;\n\n try{\n header = JSON.parse(header_text);\n } catch(error) {\n error_message = \"server did not respond with valid JSON\" + \"\\n\" +\n \"Response was: \\n\" + header_text;\n\n console.log( { message: error_message });\n\n // BrainBrowser.events.triggerEvent(\"error\", { message: error_message });\n throw new Error(error_message);\n }\n\n if(header.order.length === 4) {\n header.order = header.order.slice(1);\n }\n\n header.datatype = header.datatype || \"uint8\";\n\n header.xspace.space_length = parseFloat(header.xspace.space_length);\n header.yspace.space_length = parseFloat(header.yspace.space_length);\n header.zspace.space_length = parseFloat(header.zspace.space_length);\n\n header.xspace.start = parseFloat(header.xspace.start);\n header.yspace.start = parseFloat(header.yspace.start);\n header.zspace.start = parseFloat(header.zspace.start);\n\n header.xspace.step = parseFloat(header.xspace.step);\n header.yspace.step = parseFloat(header.yspace.step);\n header.zspace.step = parseFloat(header.zspace.step);\n\n header.xspace.direction_cosines = header.xspace.direction_cosines || [1, 0, 0];\n header.yspace.direction_cosines = header.yspace.direction_cosines || [0, 1, 0];\n header.zspace.direction_cosines = header.zspace.direction_cosines || [0, 0, 1];\n\n header.xspace.direction_cosines = header.xspace.direction_cosines.map(parseFloat);\n header.yspace.direction_cosines = header.yspace.direction_cosines.map(parseFloat);\n header.zspace.direction_cosines = header.zspace.direction_cosines.map(parseFloat);\n\n /* Incrementation offsets for each dimension of the volume.\n * Note that this somewhat format-specific, so it does not\n * belong in the generic \"createVolume()\" code.\n */\n header[header.order[0]].offset = header[header.order[1]].space_length * header[header.order[2]].space_length;\n header[header.order[1]].offset = header[header.order[2]].space_length;\n header[header.order[2]].offset = 1;\n\n if(header.time) {\n header.time.space_length = parseFloat(header.time.space_length);\n header.time.start = parseFloat(header.time.start);\n header.time.step = parseFloat(header.time.step);\n header.time.offset = header.xspace.space_length * header.yspace.space_length * header.zspace.space_length;\n }\n\n return header;\n }\n\n\n/*\n createMincVolume(header, raw_data){\n var volume = createVolume(header, this.createMincData(header, raw_data));\n volume.type = \"minc\";\n\n volume.saveOriginAndTransform(header);\n volume.intensity_min = header.voxel_min;\n volume.intensity_max = header.voxel_max;\n\n return volume;\n\n }\n*/\n\n\n /*\n initialize the large 1D array of data depending on the type found.\n Rearange the original ArrayBuffer into a typed array.\n args:\n header: obj - header of the data\n raw_data: ArrayBuffer - sub object given by hdf5Loader\n */\n createMincData(header, raw_data){\n\n var native_data = null;\n\n switch (header.datatype) {\n case 'int8':\n native_data = new Int8Array(raw_data);\n break;\n case 'int16':\n native_data = new Int16Array(raw_data);\n break;\n case 'int32':\n native_data = new Int32Array(raw_data);\n break;\n case 'float32':\n native_data = new Float32Array(raw_data);\n break;\n case 'float64':\n native_data = new Float64Array(raw_data);\n break;\n case 'uint8':\n native_data = new Uint8Array(raw_data);\n break;\n case 'uint16':\n native_data = new Uint16Array(raw_data);\n break;\n case 'uint32':\n case 'rgb8':\n native_data = new Uint32Array(raw_data);\n break;\n default:\n var error_message = \"Unsupported data type: \" + header.datatype;\n console.log({ message: error_message } );\n //BrainBrowser.events.triggerEvent(\"error\", { message: error_message } );\n throw new Error(error_message);\n }\n\n return native_data;\n }\n\n\n\n\n //----------------------------------------------------------------------------\n\n _run(){\n var that = this;\n\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"Minc2Decoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n\n this._dv = new DataView(inputBuffer);\n\n\n /* Patch in the missing function to get 64-bit integers.\n * Note: this won't really quite work b/c Javascript doesn't\n * have support for 64-bit integers.\n */\n this._dv.getUint64 = function (off, little_endian) {\n var l4 = that._dv.getUint32(off + 0, little_endian);\n var u4 = that._dv.getUint32(off + 4, little_endian);\n if (little_endian) {\n return (u4 << 32) + l4;\n } else {\n return (l4 << 32) + u4;\n }\n };\n\n\n var root = this.createLink();\n\n try{\n this._superblk = this.hdf5Superblock();\n }catch(e){\n //console.error(e);\n console.warn(\"The input file is not a Minc2 file.\");\n return;\n }\n\n\n this.seek(this._superblk.root_addr);\n\n if (this._superblk.sbver <= 1) {\n this.hdf5V1ObjectHeader(root);\n } else {\n this.hdf5V2ObjectHeader(root);\n }\n\n this.loadData(root);\n\n\n\n\n\n if (this.getMetadata(\"debug\")) {\n this.printStructure(root, 0);\n }\n\n /* The rest of this code is MINC-specific, so like some of the\n * functions above, it can migrate into minc.js once things have\n * stabilized.\n *\n * This code is responsible for collecting up the various pieces\n * of important data and metadata, and reorganizing them into the\n * form the volume viewer can handle.\n */\n var image = this.findDataset(root, \"image\");\n if (!this.defined(image)) {\n throw new Error(\"Can't find image dataset.\");\n }\n\n var valid_range = this.findAttribute(image, \"valid_range\", 0);\n /* If no valid_range is found, we substitute our own. */\n if (!this.defined(valid_range)) {\n var min_val;\n var max_val;\n switch (image.type) {\n case this._type_enum.INT8:\n min_val = -(1 << 7);\n max_val = (1 << 7) - 1;\n break;\n case this._type_enum.UINT8:\n min_val = 0;\n max_val = (1 << 8) - 1;\n break;\n case this._type_enum.INT16:\n min_val = -(1 << 15);\n max_val = (1 << 15) - 1;\n break;\n case this._type_enum.UINT16:\n min_val = 0;\n max_val = (1 << 16) - 1;\n break;\n case this._type_enum.INT32:\n min_val = -(1 << 31);\n max_val = (1 << 31) - 1;\n break;\n case this._type_enum.UINT32:\n min_val = 0;\n max_val = (1 << 32) - 1;\n break;\n }\n valid_range = Float32Array.of(min_val, max_val);\n }\n\n\n var image_min = this.findDataset(root, \"image-min\");\n if (!this.defined(image_min)) {\n image_min = {\n array: Float32Array.of(0),\n dims: []\n };\n }\n\n var image_max = this.findDataset(root, \"image-max\");\n if (!this.defined(image_max)) {\n image_max = {\n array: Float32Array.of(1),\n dims: []\n };\n }\n\n\n /* Create the header expected by the existing brainbrowser code.\n */\n var header = {};\n var tmp = this.findAttribute(image, \"dimorder\", 0);\n if (typeof tmp !== 'string') {\n throw new Error(\"Can't find dimension order.\");\n }\n header.order = tmp.split(',');\n\n header.order.forEach(function(dimname) {\n var dim = that.findDataset(root, dimname);\n if (!that.defined(dim)) {\n throw new Error(\"Can't find dimension variable \" + dimname);\n }\n\n header[dimname] = {};\n\n tmp = that.findAttribute(dim, \"step\", 0);\n if (!that.defined(tmp)) {\n tmp = Float32Array.of(1);\n }\n header[dimname].step = tmp[0];\n\n tmp = that.findAttribute(dim, \"start\", 0);\n if (!that.defined(tmp)) {\n tmp = Float32Array.of(0);\n }\n header[dimname].start = tmp[0];\n\n tmp = that.findAttribute(dim, \"length\", 0);\n if (!that.defined(tmp)) {\n throw new Error(\"Can't find length for \" + dimname);\n }\n header[dimname].space_length = tmp[0];\n\n tmp = that.findAttribute(dim, \"direction_cosines\", 0);\n if (that.defined(tmp)) {\n // why is the bizarre call to slice needed?? it seems to work, though!\n header[dimname].direction_cosines = Array.prototype.slice.call(tmp);\n }\n else {\n if (dimname === \"xspace\") {\n header[dimname].direction_cosines = [1, 0, 0];\n } else if (dimname === \"yspace\") {\n header[dimname].direction_cosines = [0, 1, 0];\n } else if (dimname === \"zspace\") {\n header[dimname].direction_cosines = [0, 0, 1];\n }\n }\n });\n\n var new_abuf;\n\n if (this.isRgbVolume(header, image)) {\n header.order.pop();\n header.datatype = 'rgb8';\n new_abuf = this.rgbVoxels(image);\n }\n else {\n\n //header.datatype = 'float32';\n header.datatype = this.getTypeMatchMinc(image.type)\n\n new_abuf = this.scaleVoxels(image, image_min, image_max, valid_range, this.getMetadata(\"debug\"));\n }\n\n var minc_header = this.parseHeader( JSON.stringify(header) );\n var dataArray = this.createMincData(minc_header, new_abuf)\n\n // add the output to this filter\n this._addOutput(MniVolume);\n var mniVol = this.getOutput();\n mniVol.setData(dataArray, minc_header);\n mniVol.setMetadata(\"format\", \"minc2\");\n }\n\n\n\n} /* END of class Minc2Decoder */\n\nexport { Minc2Decoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* Robert D. Vincent\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport { Filter } from '../core/Filter.js';\nimport { MniVolume } from '../core/MniVolume.js';\n\n\n/**\n* Decodes a NIfTI file.\n* Takes an ArrayBuffer as input (0) and output a `MniVolume` (which inherit `Image3D`).\n*\n* **Usage**\n* - [examples/fileToNifti.html](../examples/fileToNifti.html)\n*/\nclass NiftiDecoder extends Filter {\n\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n this.setMetadata(\"debug\", false);\n }\n\n\n /**\n * [PRIVATE]\n */\n parseNifti1Header(raw_data) {\n var header = {\n order: [\"zspace\", \"yspace\", \"xspace\"],\n xspace: {},\n yspace: {},\n zspace: {}\n };\n var error_message = null;\n var dview = new DataView(raw_data, 0, 348);\n var bytes = new Uint8Array(raw_data, 0, 348);\n var littleEndian = true;\n\n var sizeof_hdr = dview.getUint32(0, true);\n if (sizeof_hdr === 0x0000015c) {\n littleEndian = true;\n } else if (sizeof_hdr === 0x5c010000) {\n littleEndian = false;\n } else {\n error_message = \"This does not look like a NIfTI-1 file.\";\n }\n\n var ndims = dview.getUint16(40, littleEndian);\n if (ndims < 3 || ndims > 4) {\n error_message = \"Cannot handle \" + ndims + \"-dimensional images yet.\";\n }\n\n var magic = String.fromCharCode.apply(null, bytes.subarray(344, 348));\n if (magic !== \"n+1\\0\") {\n error_message = \"Bad magic number: '\" + magic + \"'\";\n }\n\n if (error_message) {\n //throw new Error(error_message);\n console.warn(\"The input file is not a NIfTI file.\");\n return null;\n }\n\n header.xspace.space_length = dview.getUint16(42, littleEndian);\n header.yspace.space_length = dview.getUint16(44, littleEndian);\n header.zspace.space_length = dview.getUint16(46, littleEndian);\n var tlength = dview.getUint16(48, littleEndian);\n\n var datatype = dview.getUint16(70, littleEndian);\n var bitpix = dview.getUint16(72, littleEndian);\n\n var xstep = dview.getFloat32(80, littleEndian);\n var ystep = dview.getFloat32(84, littleEndian);\n var zstep = dview.getFloat32(88, littleEndian);\n var tstep = dview.getFloat32(92, littleEndian);\n\n var vox_offset = dview.getFloat32(108, littleEndian);\n if (vox_offset < 352) {\n vox_offset = 352;\n }\n\n var scl_slope = dview.getFloat32(112, littleEndian);\n var scl_inter = dview.getFloat32(116, littleEndian);\n\n var qform_code = dview.getUint16(252, littleEndian);\n var sform_code = dview.getUint16(254, littleEndian);\n\n var transform = [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ];\n\n if (tlength >= 1) {\n header.time = {};\n header.time.space_length = tlength;\n header.time.step = tstep;\n header.time.start = 0;\n header.time.name = \"time\";\n header.order = [\"time\", \"zspace\", \"yspace\", \"xspace\"];\n }\n\n /* Record the number of bytes per voxel, and note whether we need\n * to swap bytes in the voxel data.\n */\n header.bytes_per_voxel = bitpix / 8;\n header.must_swap_data = !littleEndian && header.bytes_per_voxel > 1;\n\n if (sform_code > 0) {\n /* The \"Sform\", if present, defines an affine transform which is\n * generally assumed to correspond to some standard coordinate\n * space (e.g. Talairach).\n */\n transform[0][0] = dview.getFloat32(280, littleEndian);\n transform[0][1] = dview.getFloat32(284, littleEndian);\n transform[0][2] = dview.getFloat32(288, littleEndian);\n transform[0][3] = dview.getFloat32(292, littleEndian);\n transform[1][0] = dview.getFloat32(296, littleEndian);\n transform[1][1] = dview.getFloat32(300, littleEndian);\n transform[1][2] = dview.getFloat32(304, littleEndian);\n transform[1][3] = dview.getFloat32(308, littleEndian);\n transform[2][0] = dview.getFloat32(312, littleEndian);\n transform[2][1] = dview.getFloat32(316, littleEndian);\n transform[2][2] = dview.getFloat32(320, littleEndian);\n transform[2][3] = dview.getFloat32(324, littleEndian);\n }\n else if (qform_code > 0) {\n /* The \"Qform\", if present, defines a quaternion which specifies\n * a less general transformation, often to scanner space.\n */\n var quatern_b = dview.getFloat32(256, littleEndian);\n var quatern_c = dview.getFloat32(260, littleEndian);\n var quatern_d = dview.getFloat32(264, littleEndian);\n var qoffset_x = dview.getFloat32(268, littleEndian);\n var qoffset_y = dview.getFloat32(272, littleEndian);\n var qoffset_z = dview.getFloat32(276, littleEndian);\n var qfac = (dview.getFloat32(76, littleEndian) < 0) ? -1.0 : 1.0;\n\n transform = this.niftiQuaternToMat44(quatern_b, quatern_c, quatern_d,\n qoffset_x, qoffset_y, qoffset_z,\n xstep, ystep, zstep, qfac);\n }\n else {\n transform[0][0] = xstep;\n transform[1][1] = ystep;\n transform[2][2] = zstep;\n }\n\n MniVolume.transformToMinc(transform, header);\n\n header.datatype = datatype;\n header.vox_offset = vox_offset;\n header.scl_slope = scl_slope;\n header.scl_inter = scl_inter;\n\n return header;\n }\n\n\n /**\n * [PRIVATE]\n * This function is a direct translation of the identical function\n * found in the standard NIfTI-1 library (nifti1_io.c).\n */\n niftiQuaternToMat44( qb, qc, qd,\n qx, qy, qz,\n dx, dy, dz, qfac )\n {\n var m = [ // 4x4 transform\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 1]\n ];\n var b = qb;\n var c = qc;\n var d = qd;\n var a, xd, yd, zd;\n\n // compute a parameter from b,c,d\n\n a = 1.0 - (b * b + c * c + d * d);\n if ( a < 1.e-7 ) { // special case\n a = 1.0 / Math.sqrt(b * b + c * c + d * d);\n b *= a; // normalize (b,c,d) vector\n c *= a;\n d *= a;\n a = 0.0; // a = 0 ==> 180 degree rotation\n } else {\n a = Math.sqrt(a); // angle = 2*arccos(a)\n }\n\n // load rotation matrix, including scaling factors for voxel sizes\n\n xd = (dx > 0.0) ? dx : 1.0; // make sure are positive\n yd = (dy > 0.0) ? dy : 1.0;\n zd = (dz > 0.0) ? dz : 1.0;\n\n if ( qfac < 0.0 ) // left handedness?\n zd = -zd;\n\n m[0][0] = (a * a + b * b - c * c - d * d) * xd;\n m[0][1] = 2.0 * (b * c - a * d ) * yd;\n m[0][2] = 2.0 * (b * d + a * c ) * zd;\n m[1][0] = 2.0 * (b * c + a * d ) * xd;\n m[1][1] = (a * a + c * c - b * b - d * d) * yd;\n m[1][2] = 2.0 * (c * d - a * b ) * zd;\n m[2][0] = 2.0 * (b * d - a * c ) * xd;\n m[2][1] = 2.0 * (c * d + a * b ) * yd;\n m[2][2] = (a * a + d * d - c * c - b * b) * zd;\n\n // load offsets\n m[0][3] = qx;\n m[1][3] = qy;\n m[2][3] = qz;\n\n return m;\n }\n\n\n /**\n * [PRIVATE]\n */\n createNifti1Data(header, raw_data) {\n var native_data = null;\n\n if (header.must_swap_data) {\n MniVolume.swapn(\n new Uint8Array(raw_data, header.vox_offset),\n header.bytes_per_voxel\n );\n }\n\n switch (header.datatype) {\n case 2: // DT_UNSIGNED_CHAR\n // no translation necessary; could optimize this out.\n native_data = new Uint8Array(raw_data, header.vox_offset);\n break;\n case 4: // DT_SIGNED_SHORT\n native_data = new Int16Array(raw_data, header.vox_offset);\n break;\n case 8: // DT_SIGNED_INT\n native_data = new Int32Array(raw_data, header.vox_offset);\n break;\n case 16: // DT_FLOAT\n native_data = new Float32Array(raw_data, header.vox_offset);\n break;\n case 64: // DT_DOUBLE\n native_data = new Float64Array(raw_data, header.vox_offset);\n break;\n // Values above 256 are NIfTI-specific, and rarely used.\n case 256: // DT_INT8\n native_data = new Int8Array(raw_data, header.vox_offset);\n break;\n case 512: // DT_UINT16\n native_data = new Uint16Array(raw_data, header.vox_offset);\n break;\n case 768: // DT_UINT32\n native_data = new Uint32Array(raw_data, header.vox_offset);\n break;\n default:\n // We don't yet support 64-bit, complex, RGB, and float 128 types.\n throw new Error(\"Unsupported data type: \" + header.datatype);\n }\n\n var d = 0; // Generic loop counter.\n var slope = header.scl_slope;\n var inter = header.scl_inter;\n\n // According to the NIfTI specification, a slope value of zero means\n // that the data should _not_ be scaled. Otherwise, every voxel is\n // transformed according to value = value * slope + inter\n //\n if (slope !== 0.0) {\n var float_data = new Float32Array(native_data.length);\n\n for (d = 0; d < native_data.length; d++) {\n float_data[d] = native_data[d] * slope + inter;\n }\n native_data = float_data; // Return the new float buffer.\n }\n\n if(header.order.length === 4) {\n header.order = header.order.slice(1);\n }\n\n // Incrementation offsets for each dimension of the volume.\n header[header.order[0]].offset = header[header.order[1]].space_length * header[header.order[2]].space_length;\n header[header.order[1]].offset = header[header.order[2]].space_length;\n header[header.order[2]].offset = 1;\n\n if(header.time) {\n header.time.offset = header.xspace.space_length * header.yspace.space_length * header.zspace.space_length;\n }\n\n return native_data;\n }\n\n\n //----------------------------------------------------------------------------\n\n _run(){\n var that = this;\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"NiftiDecoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n\n var header = this.parseNifti1Header( inputBuffer );\n\n // abort if header not valid\n if(!header)\n return;\n\n var dataArray = this.createNifti1Data(header, inputBuffer)\n\n // add the output to this filter\n this._addOutput(MniVolume);\n var mniVol = this.getOutput();\n mniVol.setData(dataArray, header);\n mniVol.setMetadata(\"format\", \"nifti\");\n\n }\n\n\n} /* END class NiftiDecoder */\n\nexport { NiftiDecoder }\n","/* FileSaver.js\n * A saveAs() FileSaver implementation.\n * 1.3.2\n * 2016-06-16 18:25:19\n *\n * By Eli Grey, http://eligrey.com\n * License: MIT\n * See https://github.com/eligrey/FileSaver.js/blob/master/LICENSE.md\n */\n\n/*global self */\n/*jslint bitwise: true, indent: 4, laxbreak: true, laxcomma: true, smarttabs: true, plusplus: true */\n\n/*! @source http://purl.eligrey.com/github/FileSaver.js/blob/master/FileSaver.js */\n\nvar saveAs = saveAs || (function(view) {\n\t\"use strict\";\n\t// IE <10 is explicitly unsupported\n\tif (typeof view === \"undefined\" || typeof navigator !== \"undefined\" && /MSIE [1-9]\\./.test(navigator.userAgent)) {\n\t\treturn;\n\t}\n\tvar\n\t\t doc = view.document\n\t\t // only get URL when necessary in case Blob.js hasn't overridden it yet\n\t\t, get_URL = function() {\n\t\t\treturn view.URL || view.webkitURL || view;\n\t\t}\n\t\t, save_link = doc.createElementNS(\"http://www.w3.org/1999/xhtml\", \"a\")\n\t\t, can_use_save_link = \"download\" in save_link\n\t\t, click = function(node) {\n\t\t\tvar event = new MouseEvent(\"click\");\n\t\t\tnode.dispatchEvent(event);\n\t\t}\n\t\t, is_safari = /constructor/i.test(view.HTMLElement) || view.safari\n\t\t, is_chrome_ios =/CriOS\\/[\\d]+/.test(navigator.userAgent)\n\t\t, throw_outside = function(ex) {\n\t\t\t(view.setImmediate || view.setTimeout)(function() {\n\t\t\t\tthrow ex;\n\t\t\t}, 0);\n\t\t}\n\t\t, force_saveable_type = \"application/octet-stream\"\n\t\t// the Blob API is fundamentally broken as there is no \"downloadfinished\" event to subscribe to\n\t\t, arbitrary_revoke_timeout = 1000 * 40 // in ms\n\t\t, revoke = function(file) {\n\t\t\tvar revoker = function() {\n\t\t\t\tif (typeof file === \"string\") { // file is an object URL\n\t\t\t\t\tget_URL().revokeObjectURL(file);\n\t\t\t\t} else { // file is a File\n\t\t\t\t\tfile.remove();\n\t\t\t\t}\n\t\t\t};\n\t\t\tsetTimeout(revoker, arbitrary_revoke_timeout);\n\t\t}\n\t\t, dispatch = function(filesaver, event_types, event) {\n\t\t\tevent_types = [].concat(event_types);\n\t\t\tvar i = event_types.length;\n\t\t\twhile (i--) {\n\t\t\t\tvar listener = filesaver[\"on\" + event_types[i]];\n\t\t\t\tif (typeof listener === \"function\") {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tlistener.call(filesaver, event || filesaver);\n\t\t\t\t\t} catch (ex) {\n\t\t\t\t\t\tthrow_outside(ex);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t, auto_bom = function(blob) {\n\t\t\t// prepend BOM for UTF-8 XML and text/* types (including HTML)\n\t\t\t// note: your browser will automatically convert UTF-16 U+FEFF to EF BB BF\n\t\t\tif (/^\\s*(?:text\\/\\S*|application\\/xml|\\S*\\/\\S*\\+xml)\\s*;.*charset\\s*=\\s*utf-8/i.test(blob.type)) {\n\t\t\t\treturn new Blob([String.fromCharCode(0xFEFF), blob], {type: blob.type});\n\t\t\t}\n\t\t\treturn blob;\n\t\t}\n\t\t, FileSaver = function(blob, name, no_auto_bom) {\n\t\t\tif (!no_auto_bom) {\n\t\t\t\tblob = auto_bom(blob);\n\t\t\t}\n\t\t\t// First try a.download, then web filesystem, then object URLs\n\t\t\tvar\n\t\t\t\t filesaver = this\n\t\t\t\t, type = blob.type\n\t\t\t\t, force = type === force_saveable_type\n\t\t\t\t, object_url\n\t\t\t\t, dispatch_all = function() {\n\t\t\t\t\tdispatch(filesaver, \"writestart progress write writeend\".split(\" \"));\n\t\t\t\t}\n\t\t\t\t// on any filesys errors revert to saving with object URLs\n\t\t\t\t, fs_error = function() {\n\t\t\t\t\tif ((is_chrome_ios || (force && is_safari)) && view.FileReader) {\n\t\t\t\t\t\t// Safari doesn't allow downloading of blob urls\n\t\t\t\t\t\tvar reader = new FileReader();\n\t\t\t\t\t\treader.onloadend = function() {\n\t\t\t\t\t\t\tvar url = is_chrome_ios ? reader.result : reader.result.replace(/^data:[^;]*;/, 'data:attachment/file;');\n\t\t\t\t\t\t\tvar popup = view.open(url, '_blank');\n\t\t\t\t\t\t\tif(!popup) view.location.href = url;\n\t\t\t\t\t\t\turl=undefined; // release reference before dispatching\n\t\t\t\t\t\t\tfilesaver.readyState = filesaver.DONE;\n\t\t\t\t\t\t\tdispatch_all();\n\t\t\t\t\t\t};\n\t\t\t\t\t\treader.readAsDataURL(blob);\n\t\t\t\t\t\tfilesaver.readyState = filesaver.INIT;\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\t// don't create more object URLs than needed\n\t\t\t\t\tif (!object_url) {\n\t\t\t\t\t\tobject_url = get_URL().createObjectURL(blob);\n\t\t\t\t\t}\n\t\t\t\t\tif (force) {\n\t\t\t\t\t\tview.location.href = object_url;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar opened = view.open(object_url, \"_blank\");\n\t\t\t\t\t\tif (!opened) {\n\t\t\t\t\t\t\t// Apple does not allow window.open, see https://developer.apple.com/library/safari/documentation/Tools/Conceptual/SafariExtensionGuide/WorkingwithWindowsandTabs/WorkingwithWindowsandTabs.html\n\t\t\t\t\t\t\tview.location.href = object_url;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfilesaver.readyState = filesaver.DONE;\n\t\t\t\t\tdispatch_all();\n\t\t\t\t\trevoke(object_url);\n\t\t\t\t}\n\t\t\t;\n\t\t\tfilesaver.readyState = filesaver.INIT;\n\n\t\t\tif (can_use_save_link) {\n\t\t\t\tobject_url = get_URL().createObjectURL(blob);\n\t\t\t\tsetTimeout(function() {\n\t\t\t\t\tsave_link.href = object_url;\n\t\t\t\t\tsave_link.download = name;\n\t\t\t\t\tclick(save_link);\n\t\t\t\t\tdispatch_all();\n\t\t\t\t\trevoke(object_url);\n\t\t\t\t\tfilesaver.readyState = filesaver.DONE;\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tfs_error();\n\t\t}\n\t\t, FS_proto = FileSaver.prototype\n\t\t, saveAs = function(blob, name, no_auto_bom) {\n\t\t\treturn new FileSaver(blob, name || blob.name || \"download\", no_auto_bom);\n\t\t}\n\t;\n\t// IE 10+ (native saveAs)\n\tif (typeof navigator !== \"undefined\" && navigator.msSaveOrOpenBlob) {\n\t\treturn function(blob, name, no_auto_bom) {\n\t\t\tname = name || blob.name || \"download\";\n\n\t\t\tif (!no_auto_bom) {\n\t\t\t\tblob = auto_bom(blob);\n\t\t\t}\n\t\t\treturn navigator.msSaveOrOpenBlob(blob, name);\n\t\t};\n\t}\n\n\tFS_proto.abort = function(){};\n\tFS_proto.readyState = FS_proto.INIT = 0;\n\tFS_proto.WRITING = 1;\n\tFS_proto.DONE = 2;\n\n\tFS_proto.error =\n\tFS_proto.onwritestart =\n\tFS_proto.onprogress =\n\tFS_proto.onwrite =\n\tFS_proto.onabort =\n\tFS_proto.onerror =\n\tFS_proto.onwriteend =\n\t\tnull;\n\n\treturn saveAs;\n}(\n\t typeof self !== \"undefined\" && self\n\t|| typeof window !== \"undefined\" && window\n\t|| this.content\n));\n// `self` is undefined in Firefox for Android content script context\n// while `this` is nsIContentFrameMessageManager\n// with an attribute `content` that corresponds to the window\n\nif (typeof module !== \"undefined\" && module.exports) {\n module.exports.saveAs = saveAs;\n} else if ((typeof define !== \"undefined\" && define !== null) && (define.amd !== null)) {\n define(\"FileSaver.js\", function() {\n return saveAs;\n });\n}\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport FileSaver from 'file-saver';\n//import JSZip from \"jszip\";\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* A PixpEncoder instance takes an Image2D or Image3D as input with `addInput(...)`\n* and encode it so that it can be saved as a *.pixp file.\n* An output filename can be specified using `.setMetadata(\"filename\", \"yourName.pixp\");`,\n* by default, the name is \"untitled.pixp\".\n* When `update()` is called, a gzip blog is prepared as output[0] and can then be downloaded\n* when calling the method `.download()`. The gzip blob could also be sent over AJAX\n* using a third party library.\n*\n* **Usage**\n* - [examples/savePixpFile.html](../examples/savePixpFile.html)\n*/\nclass PixpEncoder extends Filter {\n constructor(){\n super();\n this.setMetadata(\"filename\", \"untitled.pixp\");\n\n }\n\n\n /**\n * [PRIVATE]\n * overwrite the original from Filter\n * Only accept Image2D and Image3D\n */\n hasValidInput(){\n var input = this._getInput();\n return input && ( input.isOfType(Image2D.TYPE()) || input.isOfType(Image3D.TYPE()) );\n }\n\n\n _run(){\n\n if(! this.hasValidInput() ){\n console.warn(\"PixpEncoder can only encode Image2D and Image3D.\");\n return;\n }\n\n var input = this._getInput();\n\n var arrayAndMeta = {\n dataType: input.getData().constructor.name, // typed array type\n data: Array.prototype.slice.call( input.getData() ), // data of pixel/voxel\n metadata: input.getMetadataCopy(), // Image2D/Image3D._metadata\n pixpipeType: input.constructor.name // most likely \"Image2D\", \"Image3D\", \"MniVolume\", \"LineString\", etc.\n }\n\n var pixpString = JSON.stringify( arrayAndMeta );\n\n var deflator = new pako.Deflate({\n level: 6,\n //to: 'string',\n gzip: true,\n header: {\n text: true,\n time: + new Date(),\n comment: \"This file was created by Pixpipe.js\"\n }\n });\n\n deflator.push(pixpString, true);\n\n // making a blob to be saved\n this._output[0] = new Blob([deflator.result], {type: \"application/gzip\"} );\n }\n\n\n /**\n * Download the generated file\n */\n download(){\n var output = this.getOutput();\n\n if(output){\n FileSaver.saveAs( this.getOutput(), this.getMetadata(\"filename\"));\n }else{\n console.warn(\"No output computed yet.\");\n }\n }\n\n} /* END of class PixpEncoder */\n\nexport { PixpEncoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport FileSaver from 'file-saver';\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* A PixpDecoder instance decodes a *.pixp file and output an Image2D or Image3D.\n* The input, specified by `.addInput(...)` must be an ArrayBuffer\n* (from an `UrlToArrayBufferFilter`, an `UrlToArrayBufferReader` or anothrer source ).\n*\n* **Usage**\n* - [examples/pixpFileToImage2D.html](../examples/pixpFileToImage2D.html)\n*/\nclass PixpDecoder extends Filter {\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n }\n\n\n _run(){\n\n if(! this.hasValidInput() ){\n console.warn(\"PixpDecoder can only decode ArrayBuffer.\");\n return;\n }\n\n var input = this._getInput();\n\n //var pixpString2 = pako.inflate(input /*, { to: 'string' }*/);\n //var pixpObject = JSON.parse( pixpString2 );\n\n var inflator = new pako.Inflate({\n level: 6,\n to: 'string'\n });\n\n inflator.push( input, true );\n\n // quit if not a gz file\n if( inflator.err ){\n console.warn(\"This file is not a Pixp file.\");\n return;\n }\n \n var pixpObject = null;\n\n try{\n pixpObject = JSON.parse( inflator.result );\n }catch(e){\n console.warn(\"Could not parse pixp file.\");\n console.error(e);\n return;\n }\n\n if( ! (pixpObject.pixpipeType in pixpipe)){\n console.warn(\"Unknown type pixpipe.\" + pixpObject.pixpipeType + \", cannot create any output.\" );\n return;\n }\n\n var constructorHost = null;\n \n try{\n constructorHost = window;\n }catch( e ){\n try{\n constructorHost = GLOBAL;\n }catch( e ){\n console.warn( \"You are not in a Javascript environment?? Weird.\" );\n return;\n }\n }\n \n if(! constructorHost[ pixpObject.dataType ]){\n console.warn( \"Data array from pixp file is unknown: \" + pixpObject.dataType );\n return;\n }\n\n var outputRawData = new constructorHost[ pixpObject.dataType ]( pixpObject.data );\n var output = new pixpipe[ pixpObject.pixpipeType ];\n output.setRawData( outputRawData );\n output.setRawMetadata( pixpObject.metadata );\n\n this._output[0] = output;\n\n }\n\n\n\n} /* END of class PixpDecoder */\n\nexport { PixpDecoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* Robert D. Vincent\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport { Filter } from '../core/Filter.js';\nimport { MniVolume } from '../core/MniVolume.js';\n\n/**\n* Decodes a MGH file.\n* Takes an ArrayBuffer as input (0) and output a `MniVolume` (which inherit `Image3D`).\n*\n* **Usage**\n* - [examples/fileToMgh.html](../examples/fileToMgh.html)\n*/\nclass MghDecoder extends Filter {\n \n constructor() {\n super();\n this.addInputValidator(0, ArrayBuffer);\n this.setMetadata(\"debug\", false);\n }\n \n \n /* Function to parse the basic MGH header. This is a 284-byte binary\n * object that begins at offset zero in the file.\n * The resulting header object will contain the following fields:\n *\n * header.order[] - An array of strings that gives the order of the\n * spatial dimensions.\n * header.xspace - Description of the X axis (patient left to right)\n * header.yspace - Description of the Y axis (patient posterior to anterior)\n * header.zspace - Description of the Z axis (patient inferior to superior)\n * header.time - Description of time axis, if any.\n\n * Non-standard fields used internally only:\n *\n * header.nvoxels - Total number of voxels in the image.\n * header.datatype - MGH data type of image.\n * header.little_endian - True if data is little endian (should be false!)\n */\n _parseMGHHeader(raw_data, callback) {\n var header = {\n order: [\"xspace\", \"yspace\", \"zspace\"],\n xspace: {},\n yspace: {},\n zspace: {}\n };\n var error_message;\n var dview = new DataView(raw_data, 0, 284);\n var little_endian = true;\n\n /* Read the header version, which should always have the value\n * 0x00000001. We use this to test the endian-ness of the data,\n * but it should always be big-endian.\n */\n var hdr_version = dview.getUint32(0, true);\n if (hdr_version === 0x00000001) {\n little_endian = true;\n } else if (hdr_version === 0x01000000) {\n little_endian = false; // Generally files are big-endian.\n }\n else {\n console.warn( \"This does not look like an MGH file.\" );\n return null;\n }\n\n /* Now read the dimension lengths. There are at most 4 dimensions\n * in the file. The lengths fields are always present, but they\n * unused dimensions may have the value 0 or 1.\n */\n var ndims = 0;\n var sizes = [0, 0, 0, 0];\n var header_offset = 4;\n var nvoxels = 1;\n for (ndims = 0; ndims < 4; ndims++) {\n sizes[ndims] = dview.getUint32(header_offset, little_endian);\n if (sizes[ndims] <= 1) {\n break;\n }\n nvoxels *= sizes[ndims];\n header_offset += 4;\n }\n\n if (ndims < 3 || ndims > 4) {\n console.warn( \"Cannot handle \" + ndims + \"-dimensional images yet.\" );\n return null;\n }\n\n var datatype = dview.getUint32(20, little_endian);\n // IGNORED var dof = dview.getUint32(24, little_endian);\n var good_transform_flag = dview.getUint16(28, little_endian);\n var spacing = [1.0, 1.0, 1.0];\n var i, j;\n var dircos = [\n [-1.0, 0.0, 0.0],\n [ 0.0, 0.0, -1.0],\n [ 0.0, 1.0, 0.0],\n [ 0.0, 0.0, 0.0]\n ];\n if (good_transform_flag) {\n header_offset = 30;\n for (i = 0; i < 3; i++) {\n spacing[i] = dview.getFloat32(header_offset, little_endian);\n header_offset += 4;\n }\n for (i = 0; i < 4; i++) {\n for (j = 0; j < 3; j++) {\n dircos[i][j] = dview.getFloat32(header_offset, little_endian);\n header_offset += 4;\n }\n }\n }\n\n if ( this._metadata.debug ) {\n // Prints out the transform in a format similar to the output\n // of FreeSurfer's mri_info tool.\n //\n for (i = 0; i < 3; i++) {\n var s1 = \"\";\n for (j = 0; j < 4; j++) {\n s1 += \"xyzc\"[j] + \"_\" + \"ras\"[i] + \" \" + dircos[j][i] + \" \";\n }\n console.log(s1);\n }\n }\n\n var axis_index_from_file = [0, 1, 2];\n\n for ( var axis = 0; axis < 3; axis++) {\n var spatial_axis = 0;\n var c_x = Math.abs(dircos[axis][0]);\n var c_y = Math.abs(dircos[axis][1]);\n var c_z = Math.abs(dircos[axis][2]);\n\n header.order[axis] = \"xspace\";\n if (c_y > c_x && c_y > c_z) {\n spatial_axis = 1;\n header.order[axis] = \"yspace\";\n }\n if (c_z > c_x && c_z > c_y) {\n spatial_axis = 2;\n header.order[axis] = \"zspace\";\n }\n axis_index_from_file[axis] = spatial_axis;\n }\n\n /* If there are four dimensions, assume the last is the time\n * dimension. I use default values for step and start because as\n * far as I know MGH files do not carry any descriptive\n * information about the 4th dimension.\n */\n if (ndims === 4) {\n if (this._metadata.debug) {\n console.log(\"Creating time dimension: \" + sizes[3]);\n }\n header.time = {\n space_length: sizes[3],\n step: 1,\n start: 0,\n name: \"time\"\n };\n header.order.push(\"time\");\n }\n\n /** This is here because there are two different ways of interpreting\n * the origin of an MGH file. One can ignore the offsets in the\n * transform, using the centre of the voxel grid. Or you can correct\n * these naive grid centres using the values stored in the transform.\n * The first approach is what is used by surface files, so to get them\n * to register nicely, we want ignore_offsets to be true. However,\n * getting volumetric files to register correctly implies setting\n * ignore_offsets to false.\n */\n var ignore_offsets = false;\n var mgh_xform = [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ];\n for (i = 0; i < 3; i++) {\n for (j = 0; j < 3; j++) {\n mgh_xform[i][j] = dircos[j][i] * spacing[i];\n }\n }\n\n for (i = 0; i < 3; i++) {\n var temp = 0.0;\n for (j = 0; j < 3; j++) {\n temp += mgh_xform[i][j] * (sizes[j] / 2.0);\n }\n\n if (ignore_offsets) {\n mgh_xform[i][4 - 1] = -temp;\n }\n else {\n mgh_xform[i][4 - 1] = dircos[4 - 1][i] - temp;\n }\n }\n\n var transform = [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ];\n\n for (i = 0; i < 3; i++) {\n for (j = 0; j < 4; j++) {\n var volume_axis = j;\n if (j < 3) {\n volume_axis = axis_index_from_file[j];\n }\n transform[i][volume_axis] = mgh_xform[i][j];\n }\n }\n\n // Now that we have the transform, need to convert it to MINC-like\n // steps and direction_cosines.\n\n MniVolume.transformToMinc(transform, header);\n\n // Save the datatype so that we can refer to it later.\n header.datatype = datatype;\n header.little_endian = little_endian;\n header.nvoxels = nvoxels;\n\n // Save the voxel dimension lengths.\n for (i = 0; i < 3; i++) {\n header[header.order[i]].space_length = sizes[i];\n }\n\n return header;\n }\n \n \n _createMGHData(header, raw_data) {\n \n var native_data = null;\n var bytes_per_voxel = 1;\n\n switch (header.datatype) {\n case 0: // Unsigned characters.\n bytes_per_voxel = 1;\n break;\n case 1: // 4-byte signed integers.\n case 3: // 4-byte float.\n bytes_per_voxel = 4;\n break;\n case 4: // 2-byte signed integers.\n bytes_per_voxel = 2;\n break;\n default:\n console.warn( \"Unsupported data type: \" + header.datatype );\n return null;\n }\n\n var nbytes = header.nvoxels * bytes_per_voxel;\n\n if (bytes_per_voxel > 1 && !header.little_endian) {\n MniVolume.swapn( new Uint8Array(raw_data, 284, nbytes), bytes_per_voxel );\n }\n\n switch (header.datatype) {\n case 0: // unsigned char\n native_data = new Uint8Array(raw_data, 284, header.nvoxels);\n break;\n case 1: // signed int\n native_data = new Int32Array(raw_data, 284, header.nvoxels);\n break;\n case 3:\n native_data = new Float32Array(raw_data, 284, header.nvoxels);\n break;\n case 4: // signed short\n native_data = new Int16Array(raw_data, 284, header.nvoxels);\n break;\n }\n\n // Incrementation offsets for each dimension of the volume. MGH\n // files store the fastest-varying dimension _first_, so the\n // \"first\" dimension actually has the smallest offset. That is\n // why this calculation is different from that for NIfTI-1.\n //\n var offset = 1;\n for (var d = 0; d < header.order.length; d++) {\n header[header.order[d]].offset = offset;\n offset *= header[header.order[d]].space_length;\n }\n return native_data;\n\n }\n\n \n _run(){\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"MghDecoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n\n var header = this._parseMGHHeader( inputBuffer );\n\n // abort if header not valid\n if(!header)\n return;\n\n\n var dataArray = this._createMGHData(header, inputBuffer)\n \n if(!dataArray)\n return null;\n\n // add the output to this filter\n this._addOutput(MniVolume);\n var mniVol = this.getOutput();\n mniVol.setData(dataArray, header);\n mniVol.setMetadata(\"format\", \"mgh\");\n \n }\n \n} /* END of class MghDecoder */\n\nexport { MghDecoder };\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport pako from 'pako';\nimport FileSaver from 'file-saver';\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* A PixBinDecoder instance decodes a *.pixp file and output an Image2D or Image3D.\n* The input, specified by `.addInput(...)` must be an ArrayBuffer\n* (from an `UrlToArrayBufferFilter`, an `UrlToArrayBufferReader` or anothrer source ).\n*\n* **Usage**\n* - [examples/pixpFileToImage2D.html](../examples/pixpFileToImage2D.html)\n*/\nclass PixBinDecoder extends Filter {\n constructor(){\n super();\n this.addInputValidator(0, ArrayBuffer);\n }\n\n\n _run(){\n\n if(! this.hasValidInput() ){\n console.warn(\"PixBinDecoder can only decode ArrayBuffer.\");\n return;\n }\n\n var input = this._getInput();\n var inputByteLength = input.byteLength;\n\n // the view to decode the buffer\n var view = new DataView( input );\n var offsetFromHere = 0;\n \n // fetch the extendedMetadata string length\n var extendedMetadataStringLength = view.getUint32( offsetFromHere );\n offsetFromHere += 4;\n \n // getting extendedMetadata\n var extendedMetadataBytes = new Uint8Array(input, offsetFromHere, extendedMetadataStringLength);\n var extendedMetadata = JSON.parse( String.fromCharCode( ...extendedMetadataBytes ) );\n offsetFromHere += extendedMetadataStringLength;\n \n // getting the data\n var constructorHost = null;\n \n try{\n constructorHost = window; // in a web browser\n }catch( e ){\n try{\n constructorHost = GLOBAL; // in node\n }catch( e ){\n console.warn( \"You are not in a Javascript environment?? Weird.\" );\n return;\n }\n }\n \n if(! constructorHost[ extendedMetadata.dataType ]){\n console.warn( \"Data array from pixb file is unknown: \" + extendedMetadata.dataType );\n return;\n }\n \n /*\n There is a known issues in JS that a TypedArray cannot be created starting at a non-multiple-of-2 start offset \n if the type of data within this array is supposed to take more than one byte (ie. Uint16, Float32, etc.).\n The error is stated like that (in Chrome):\n \"Uncaught RangeError: start offset of Uint16Array should be a multiple of 2\"\n When it comes to Float32, Chrome wants an offset that is multiple of 4, and so on.\n \n The workaround is to slice the buffer to take only the data part of it (basically to remove what is before)\n so that this new array starts with an offset 0, no matter what was before.\n */\n \n var data = new constructorHost[ extendedMetadata.dataType ]( input.slice( offsetFromHere ) )\n \n var output = new pixpipe[ extendedMetadata.pixpipeType ];\n output.setRawData( data );\n output.setRawMetadata( extendedMetadata.metadata );\n\n this._output[0] = output;\n }\n\n\n} /* END of class PixBinDecoder */\n\nexport { PixBinDecoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image3D } from '../core/Image3D.js';\n\n// decoders\nimport { Minc2Decoder } from './Minc2Decoder.js';\nimport { NiftiDecoder } from './NiftiDecoder.js';\nimport { MghDecoder } from './MghDecoder.js';\nimport { PixpDecoder } from './PixpDecoder.js';\nimport { PixBinDecoder } from './PixBinDecoder.js';\n\n\n/**\n* An instance of Image3DGenericDecoder takes a ArrayBuffer \n* as input 0 (`.addInput(myArrayBuffer)`) and output an Image3D.\n* The `update` method will perform several decoding attempts, using the readers\n* specified in the constructor.\n* In case of success (one of the registered decoder was compatible to the data)\n* the metadata `decoderConstructor` and `decoderName` are made accessible and give\n* information about the file format. If no decoder managed to decode the input buffer,\n* this filter will not have any output.\n*\n* Developers: if a new 3D dataset decoder is added, reference it here.\n*/\nclass Image3DGenericDecoder extends Filter {\n \n constructor(){\n super();\n \n this._decoders = [\n Minc2Decoder,\n NiftiDecoder,\n MghDecoder,\n PixpDecoder,\n PixBinDecoder\n ];\n }\n \n \n _run(){\n var inputBuffer = this._getInput(0);\n \n if(!inputBuffer){\n console.warn(\"The input buffer must not be null.\");\n return;\n }\n \n // try with each decoder\n for(var d=0; d\r\n\r\nfunction XMLReader(){\r\n\t\r\n}\r\n\r\nXMLReader.prototype = {\r\n\tparse:function(source,defaultNSMap,entityMap){\r\n\t\tvar domBuilder = this.domBuilder;\r\n\t\tdomBuilder.startDocument();\r\n\t\t_copy(defaultNSMap ,defaultNSMap = {})\r\n\t\tparse(source,defaultNSMap,entityMap,\r\n\t\t\t\tdomBuilder,this.errorHandler);\r\n\t\tdomBuilder.endDocument();\r\n\t}\r\n}\r\nfunction parse(source,defaultNSMapCopy,entityMap,domBuilder,errorHandler){\r\n\tfunction fixedFromCharCode(code) {\r\n\t\t// String.prototype.fromCharCode does not supports\r\n\t\t// > 2 bytes unicode chars directly\r\n\t\tif (code > 0xffff) {\r\n\t\t\tcode -= 0x10000;\r\n\t\t\tvar surrogate1 = 0xd800 + (code >> 10)\r\n\t\t\t\t, surrogate2 = 0xdc00 + (code & 0x3ff);\r\n\r\n\t\t\treturn String.fromCharCode(surrogate1, surrogate2);\r\n\t\t} else {\r\n\t\t\treturn String.fromCharCode(code);\r\n\t\t}\r\n\t}\r\n\tfunction entityReplacer(a){\r\n\t\tvar k = a.slice(1,-1);\r\n\t\tif(k in entityMap){\r\n\t\t\treturn entityMap[k]; \r\n\t\t}else if(k.charAt(0) === '#'){\r\n\t\t\treturn fixedFromCharCode(parseInt(k.substr(1).replace('x','0x')))\r\n\t\t}else{\r\n\t\t\terrorHandler.error('entity not found:'+a);\r\n\t\t\treturn a;\r\n\t\t}\r\n\t}\r\n\tfunction appendText(end){//has some bugs\r\n\t\tif(end>start){\r\n\t\t\tvar xt = source.substring(start,end).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\tlocator&&position(start);\r\n\t\t\tdomBuilder.characters(xt,0,end-start);\r\n\t\t\tstart = end\r\n\t\t}\r\n\t}\r\n\tfunction position(p,m){\r\n\t\twhile(p>=lineEnd && (m = linePattern.exec(source))){\r\n\t\t\tlineStart = m.index;\r\n\t\t\tlineEnd = lineStart + m[0].length;\r\n\t\t\tlocator.lineNumber++;\r\n\t\t\t//console.log('line++:',locator,startPos,endPos)\r\n\t\t}\r\n\t\tlocator.columnNumber = p-lineStart+1;\r\n\t}\r\n\tvar lineStart = 0;\r\n\tvar lineEnd = 0;\r\n\tvar linePattern = /.*(?:\\r\\n?|\\n)|.*$/g\r\n\tvar locator = domBuilder.locator;\r\n\t\r\n\tvar parseStack = [{currentNSMap:defaultNSMapCopy}]\r\n\tvar closeMap = {};\r\n\tvar start = 0;\r\n\twhile(true){\r\n\t\ttry{\r\n\t\t\tvar tagStart = source.indexOf('<',start);\r\n\t\t\tif(tagStart<0){\r\n\t\t\t\tif(!source.substr(start).match(/^\\s*$/)){\r\n\t\t\t\t\tvar doc = domBuilder.doc;\r\n\t \t\t\tvar text = doc.createTextNode(source.substr(start));\r\n\t \t\t\tdoc.appendChild(text);\r\n\t \t\t\tdomBuilder.currentElement = text;\r\n\t\t\t\t}\r\n\t\t\t\treturn;\r\n\t\t\t}\r\n\t\t\tif(tagStart>start){\r\n\t\t\t\tappendText(tagStart);\r\n\t\t\t}\r\n\t\t\tswitch(source.charAt(tagStart+1)){\r\n\t\t\tcase '/':\r\n\t\t\t\tvar end = source.indexOf('>',tagStart+3);\r\n\t\t\t\tvar tagName = source.substring(tagStart+2,end);\r\n\t\t\t\tvar config = parseStack.pop();\r\n\t\t\t\tif(end<0){\r\n\t\t\t\t\t\r\n\t \t\ttagName = source.substring(tagStart+2).replace(/[\\s<].*/,'');\r\n\t \t\t//console.error('#@@@@@@'+tagName)\r\n\t \t\terrorHandler.error(\"end tag name: \"+tagName+' is not complete:'+config.tagName);\r\n\t \t\tend = tagStart+1+tagName.length;\r\n\t \t}else if(tagName.match(/\\s\r\n\t\t\t\tlocator&&position(tagStart);\r\n\t\t\t\tend = parseInstruction(source,tagStart,domBuilder);\r\n\t\t\t\tbreak;\r\n\t\t\tcase '!':// start){\r\n\t\t\tstart = end;\r\n\t\t}else{\r\n\t\t\t//TODO: 这里有可能sax回退,有位置错误风险\r\n\t\t\tappendText(Math.max(tagStart,start)+1);\r\n\t\t}\r\n\t}\r\n}\r\nfunction copyLocator(f,t){\r\n\tt.lineNumber = f.lineNumber;\r\n\tt.columnNumber = f.columnNumber;\r\n\treturn t;\r\n}\r\n\r\n/**\r\n * @see #appendElement(source,elStartEnd,el,selfClosed,entityReplacer,domBuilder,parseStack);\r\n * @return end of the elementStartPart(end of elementEndPart for selfClosed el)\r\n */\r\nfunction parseElementStartPart(source,start,el,currentNSMap,entityReplacer,errorHandler){\r\n\tvar attrName;\r\n\tvar value;\r\n\tvar p = ++start;\r\n\tvar s = S_TAG;//status\r\n\twhile(true){\r\n\t\tvar c = source.charAt(p);\r\n\t\tswitch(c){\r\n\t\tcase '=':\r\n\t\t\tif(s === S_ATTR){//attrName\r\n\t\t\t\tattrName = source.slice(start,p);\r\n\t\t\t\ts = S_EQ;\r\n\t\t\t}else if(s === S_ATTR_SPACE){\r\n\t\t\t\ts = S_EQ;\r\n\t\t\t}else{\r\n\t\t\t\t//fatalError: equal must after attrName or space after attrName\r\n\t\t\t\tthrow new Error('attribute equal must after attrName');\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\tcase '\\'':\r\n\t\tcase '\"':\r\n\t\t\tif(s === S_EQ || s === S_ATTR //|| s == S_ATTR_SPACE\r\n\t\t\t\t){//equal\r\n\t\t\t\tif(s === S_ATTR){\r\n\t\t\t\t\terrorHandler.warning('attribute value must after \"=\"')\r\n\t\t\t\t\tattrName = source.slice(start,p)\r\n\t\t\t\t}\r\n\t\t\t\tstart = p+1;\r\n\t\t\t\tp = source.indexOf(c,start)\r\n\t\t\t\tif(p>0){\r\n\t\t\t\t\tvalue = source.slice(start,p).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\t\tel.add(attrName,value,start-1);\r\n\t\t\t\t\ts = S_ATTR_END;\r\n\t\t\t\t}else{\r\n\t\t\t\t\t//fatalError: no end quot match\r\n\t\t\t\t\tthrow new Error('attribute value no end \\''+c+'\\' match');\r\n\t\t\t\t}\r\n\t\t\t}else if(s == S_ATTR_NOQUOT_VALUE){\r\n\t\t\t\tvalue = source.slice(start,p).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\t//console.log(attrName,value,start,p)\r\n\t\t\t\tel.add(attrName,value,start);\r\n\t\t\t\t//console.dir(el)\r\n\t\t\t\terrorHandler.warning('attribute \"'+attrName+'\" missed start quot('+c+')!!');\r\n\t\t\t\tstart = p+1;\r\n\t\t\t\ts = S_ATTR_END\r\n\t\t\t}else{\r\n\t\t\t\t//fatalError: no equal before\r\n\t\t\t\tthrow new Error('attribute value must after \"=\"');\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\tcase '/':\r\n\t\t\tswitch(s){\r\n\t\t\tcase S_TAG:\r\n\t\t\t\tel.setTagName(source.slice(start,p));\r\n\t\t\tcase S_ATTR_END:\r\n\t\t\tcase S_TAG_SPACE:\r\n\t\t\tcase S_TAG_CLOSE:\r\n\t\t\t\ts =S_TAG_CLOSE;\r\n\t\t\t\tel.closed = true;\r\n\t\t\tcase S_ATTR_NOQUOT_VALUE:\r\n\t\t\tcase S_ATTR:\r\n\t\t\tcase S_ATTR_SPACE:\r\n\t\t\t\tbreak;\r\n\t\t\t//case S_EQ:\r\n\t\t\tdefault:\r\n\t\t\t\tthrow new Error(\"attribute invalid close char('/')\")\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\tcase ''://end document\r\n\t\t\t//throw new Error('unexpected end of input')\r\n\t\t\terrorHandler.error('unexpected end of input');\r\n\t\t\tif(s == S_TAG){\r\n\t\t\t\tel.setTagName(source.slice(start,p));\r\n\t\t\t}\r\n\t\t\treturn p;\r\n\t\tcase '>':\r\n\t\t\tswitch(s){\r\n\t\t\tcase S_TAG:\r\n\t\t\t\tel.setTagName(source.slice(start,p));\r\n\t\t\tcase S_ATTR_END:\r\n\t\t\tcase S_TAG_SPACE:\r\n\t\t\tcase S_TAG_CLOSE:\r\n\t\t\t\tbreak;//normal\r\n\t\t\tcase S_ATTR_NOQUOT_VALUE://Compatible state\r\n\t\t\tcase S_ATTR:\r\n\t\t\t\tvalue = source.slice(start,p);\r\n\t\t\t\tif(value.slice(-1) === '/'){\r\n\t\t\t\t\tel.closed = true;\r\n\t\t\t\t\tvalue = value.slice(0,-1)\r\n\t\t\t\t}\r\n\t\t\tcase S_ATTR_SPACE:\r\n\t\t\t\tif(s === S_ATTR_SPACE){\r\n\t\t\t\t\tvalue = attrName;\r\n\t\t\t\t}\r\n\t\t\t\tif(s == S_ATTR_NOQUOT_VALUE){\r\n\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed quot(\")!!');\r\n\t\t\t\t\tel.add(attrName,value.replace(/&#?\\w+;/g,entityReplacer),start)\r\n\t\t\t\t}else{\r\n\t\t\t\t\tif(currentNSMap[''] !== 'http://www.w3.org/1999/xhtml' || !value.match(/^(?:disabled|checked|selected)$/i)){\r\n\t\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed value!! \"'+value+'\" instead!!')\r\n\t\t\t\t\t}\r\n\t\t\t\t\tel.add(value,value,start)\r\n\t\t\t\t}\r\n\t\t\t\tbreak;\r\n\t\t\tcase S_EQ:\r\n\t\t\t\tthrow new Error('attribute value missed!!');\r\n\t\t\t}\r\n//\t\t\tconsole.log(tagName,tagNamePattern,tagNamePattern.test(tagName))\r\n\t\t\treturn p;\r\n\t\t/*xml space '\\x20' | #x9 | #xD | #xA; */\r\n\t\tcase '\\u0080':\r\n\t\t\tc = ' ';\r\n\t\tdefault:\r\n\t\t\tif(c<= ' '){//space\r\n\t\t\t\tswitch(s){\r\n\t\t\t\tcase S_TAG:\r\n\t\t\t\t\tel.setTagName(source.slice(start,p));//tagName\r\n\t\t\t\t\ts = S_TAG_SPACE;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_ATTR:\r\n\t\t\t\t\tattrName = source.slice(start,p)\r\n\t\t\t\t\ts = S_ATTR_SPACE;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_ATTR_NOQUOT_VALUE:\r\n\t\t\t\t\tvar value = source.slice(start,p).replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed quot(\")!!');\r\n\t\t\t\t\tel.add(attrName,value,start)\r\n\t\t\t\tcase S_ATTR_END:\r\n\t\t\t\t\ts = S_TAG_SPACE;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t//case S_TAG_SPACE:\r\n\t\t\t\t//case S_EQ:\r\n\t\t\t\t//case S_ATTR_SPACE:\r\n\t\t\t\t//\tvoid();break;\r\n\t\t\t\t//case S_TAG_CLOSE:\r\n\t\t\t\t\t//ignore warning\r\n\t\t\t\t}\r\n\t\t\t}else{//not space\r\n//S_TAG,\tS_ATTR,\tS_EQ,\tS_ATTR_NOQUOT_VALUE\r\n//S_ATTR_SPACE,\tS_ATTR_END,\tS_TAG_SPACE, S_TAG_CLOSE\r\n\t\t\t\tswitch(s){\r\n\t\t\t\t//case S_TAG:void();break;\r\n\t\t\t\t//case S_ATTR:void();break;\r\n\t\t\t\t//case S_ATTR_NOQUOT_VALUE:void();break;\r\n\t\t\t\tcase S_ATTR_SPACE:\r\n\t\t\t\t\tvar tagName = el.tagName;\r\n\t\t\t\t\tif(currentNSMap[''] !== 'http://www.w3.org/1999/xhtml' || !attrName.match(/^(?:disabled|checked|selected)$/i)){\r\n\t\t\t\t\t\terrorHandler.warning('attribute \"'+attrName+'\" missed value!! \"'+attrName+'\" instead2!!')\r\n\t\t\t\t\t}\r\n\t\t\t\t\tel.add(attrName,attrName,start);\r\n\t\t\t\t\tstart = p;\r\n\t\t\t\t\ts = S_ATTR;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_ATTR_END:\r\n\t\t\t\t\terrorHandler.warning('attribute space is required\"'+attrName+'\"!!')\r\n\t\t\t\tcase S_TAG_SPACE:\r\n\t\t\t\t\ts = S_ATTR;\r\n\t\t\t\t\tstart = p;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_EQ:\r\n\t\t\t\t\ts = S_ATTR_NOQUOT_VALUE;\r\n\t\t\t\t\tstart = p;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\tcase S_TAG_CLOSE:\r\n\t\t\t\t\tthrow new Error(\"elements closed character '/' and '>' must be connected to\");\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}//end outer switch\r\n\t\t//console.log('p++',p)\r\n\t\tp++;\r\n\t}\r\n}\r\n/**\r\n * @return true if has new namespace define\r\n */\r\nfunction appendElement(el,domBuilder,currentNSMap){\r\n\tvar tagName = el.tagName;\r\n\tvar localNSMap = null;\r\n\t//var currentNSMap = parseStack[parseStack.length-1].currentNSMap;\r\n\tvar i = el.length;\r\n\twhile(i--){\r\n\t\tvar a = el[i];\r\n\t\tvar qName = a.qName;\r\n\t\tvar value = a.value;\r\n\t\tvar nsp = qName.indexOf(':');\r\n\t\tif(nsp>0){\r\n\t\t\tvar prefix = a.prefix = qName.slice(0,nsp);\r\n\t\t\tvar localName = qName.slice(nsp+1);\r\n\t\t\tvar nsPrefix = prefix === 'xmlns' && localName\r\n\t\t}else{\r\n\t\t\tlocalName = qName;\r\n\t\t\tprefix = null\r\n\t\t\tnsPrefix = qName === 'xmlns' && ''\r\n\t\t}\r\n\t\t//can not set prefix,because prefix !== ''\r\n\t\ta.localName = localName ;\r\n\t\t//prefix == null for no ns prefix attribute \r\n\t\tif(nsPrefix !== false){//hack!!\r\n\t\t\tif(localNSMap == null){\r\n\t\t\t\tlocalNSMap = {}\r\n\t\t\t\t//console.log(currentNSMap,0)\r\n\t\t\t\t_copy(currentNSMap,currentNSMap={})\r\n\t\t\t\t//console.log(currentNSMap,1)\r\n\t\t\t}\r\n\t\t\tcurrentNSMap[nsPrefix] = localNSMap[nsPrefix] = value;\r\n\t\t\ta.uri = 'http://www.w3.org/2000/xmlns/'\r\n\t\t\tdomBuilder.startPrefixMapping(nsPrefix, value) \r\n\t\t}\r\n\t}\r\n\tvar i = el.length;\r\n\twhile(i--){\r\n\t\ta = el[i];\r\n\t\tvar prefix = a.prefix;\r\n\t\tif(prefix){//no prefix attribute has no namespace\r\n\t\t\tif(prefix === 'xml'){\r\n\t\t\t\ta.uri = 'http://www.w3.org/XML/1998/namespace';\r\n\t\t\t}if(prefix !== 'xmlns'){\r\n\t\t\t\ta.uri = currentNSMap[prefix || '']\r\n\t\t\t\t\r\n\t\t\t\t//{console.log('###'+a.qName,domBuilder.locator.systemId+'',currentNSMap,a.uri)}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tvar nsp = tagName.indexOf(':');\r\n\tif(nsp>0){\r\n\t\tprefix = el.prefix = tagName.slice(0,nsp);\r\n\t\tlocalName = el.localName = tagName.slice(nsp+1);\r\n\t}else{\r\n\t\tprefix = null;//important!!\r\n\t\tlocalName = el.localName = tagName;\r\n\t}\r\n\t//no prefix element has default namespace\r\n\tvar ns = el.uri = currentNSMap[prefix || ''];\r\n\tdomBuilder.startElement(ns,localName,tagName,el);\r\n\t//endPrefixMapping and startPrefixMapping have not any help for dom builder\r\n\t//localNSMap = null\r\n\tif(el.closed){\r\n\t\tdomBuilder.endElement(ns,localName,tagName);\r\n\t\tif(localNSMap){\r\n\t\t\tfor(prefix in localNSMap){\r\n\t\t\t\tdomBuilder.endPrefixMapping(prefix) \r\n\t\t\t}\r\n\t\t}\r\n\t}else{\r\n\t\tel.currentNSMap = currentNSMap;\r\n\t\tel.localNSMap = localNSMap;\r\n\t\t//parseStack.push(el);\r\n\t\treturn true;\r\n\t}\r\n}\r\nfunction parseHtmlSpecialContent(source,elStartEnd,tagName,entityReplacer,domBuilder){\r\n\tif(/^(?:script|textarea)$/i.test(tagName)){\r\n\t\tvar elEndStart = source.indexOf('',elStartEnd);\r\n\t\tvar text = source.substring(elStartEnd+1,elEndStart);\r\n\t\tif(/[&<]/.test(text)){\r\n\t\t\tif(/^script$/i.test(tagName)){\r\n\t\t\t\t//if(!/\\]\\]>/.test(text)){\r\n\t\t\t\t\t//lexHandler.startCDATA();\r\n\t\t\t\t\tdomBuilder.characters(text,0,text.length);\r\n\t\t\t\t\t//lexHandler.endCDATA();\r\n\t\t\t\t\treturn elEndStart;\r\n\t\t\t\t//}\r\n\t\t\t}//}else{//text area\r\n\t\t\t\ttext = text.replace(/&#?\\w+;/g,entityReplacer);\r\n\t\t\t\tdomBuilder.characters(text,0,text.length);\r\n\t\t\t\treturn elEndStart;\r\n\t\t\t//}\r\n\t\t\t\r\n\t\t}\r\n\t}\r\n\treturn elStartEnd+1;\r\n}\r\nfunction fixSelfClosed(source,elStartEnd,tagName,closeMap){\r\n\t//if(tagName in closeMap){\r\n\tvar pos = closeMap[tagName];\r\n\tif(pos == null){\r\n\t\t//console.log(tagName)\r\n\t\tpos = source.lastIndexOf('')\r\n\t\tif(pos',start+4);\r\n\t\t\t//append comment source.substring(4,end)//\");\n\tcase DOCUMENT_TYPE_NODE:\n\t\tvar pubid = node.publicId;\n\t\tvar sysid = node.systemId;\n\t\tbuf.push('');\n\t\t}else if(sysid && sysid!='.'){\n\t\t\tbuf.push(' SYSTEM \"',sysid,'\">');\n\t\t}else{\n\t\t\tvar sub = node.internalSubset;\n\t\t\tif(sub){\n\t\t\t\tbuf.push(\" [\",sub,\"]\");\n\t\t\t}\n\t\t\tbuf.push(\">\");\n\t\t}\n\t\treturn;\n\tcase PROCESSING_INSTRUCTION_NODE:\n\t\treturn buf.push( \"\");\n\tcase ENTITY_REFERENCE_NODE:\n\t\treturn buf.push( '&',node.nodeName,';');\n\t//case ENTITY_NODE:\n\t//case NOTATION_NODE:\n\tdefault:\n\t\tbuf.push('??',node.nodeName);\n\t}\n}\nfunction importNode(doc,node,deep){\n\tvar node2;\n\tswitch (node.nodeType) {\n\tcase ELEMENT_NODE:\n\t\tnode2 = node.cloneNode(false);\n\t\tnode2.ownerDocument = doc;\n\t\t//var attrs = node2.attributes;\n\t\t//var len = attrs.length;\n\t\t//for(var i=0;i','amp':'&','quot':'\"','apos':\"'\"}\r\n\tif(locator){\r\n\t\tdomBuilder.setDocumentLocator(locator)\r\n\t}\r\n\t\r\n\tsax.errorHandler = buildErrorHandler(errorHandler,domBuilder,locator);\r\n\tsax.domBuilder = options.domBuilder || domBuilder;\r\n\tif(/\\/x?html?$/.test(mimeType)){\r\n\t\tentityMap.nbsp = '\\xa0';\r\n\t\tentityMap.copy = '\\xa9';\r\n\t\tdefaultNSMap['']= 'http://www.w3.org/1999/xhtml';\r\n\t}\r\n\tdefaultNSMap.xml = defaultNSMap.xml || 'http://www.w3.org/XML/1998/namespace';\r\n\tif(source){\r\n\t\tsax.parse(source,defaultNSMap,entityMap);\r\n\t}else{\r\n\t\tsax.errorHandler.error(\"invalid doc source\");\r\n\t}\r\n\treturn domBuilder.doc;\r\n}\r\nfunction buildErrorHandler(errorImpl,domBuilder,locator){\r\n\tif(!errorImpl){\r\n\t\tif(domBuilder instanceof DOMHandler){\r\n\t\t\treturn domBuilder;\r\n\t\t}\r\n\t\terrorImpl = domBuilder ;\r\n\t}\r\n\tvar errorHandler = {}\r\n\tvar isCallback = errorImpl instanceof Function;\r\n\tlocator = locator||{}\r\n\tfunction build(key){\r\n\t\tvar fn = errorImpl[key];\r\n\t\tif(!fn && isCallback){\r\n\t\t\tfn = errorImpl.length == 2?function(msg){errorImpl(key,msg)}:errorImpl;\r\n\t\t}\r\n\t\terrorHandler[key] = fn && function(msg){\r\n\t\t\tfn('[xmldom '+key+']\\t'+msg+_locator(locator));\r\n\t\t}||function(){};\r\n\t}\r\n\tbuild('warning');\r\n\tbuild('error');\r\n\tbuild('fatalError');\r\n\treturn errorHandler;\r\n}\r\n\r\n//console.log('#\\n\\n\\n\\n\\n\\n\\n####')\r\n/**\r\n * +ContentHandler+ErrorHandler\r\n * +LexicalHandler+EntityResolver2\r\n * -DeclHandler-DTDHandler \r\n * \r\n * DefaultHandler:EntityResolver, DTDHandler, ContentHandler, ErrorHandler\r\n * DefaultHandler2:DefaultHandler,LexicalHandler, DeclHandler, EntityResolver2\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/helpers/DefaultHandler.html\r\n */\r\nfunction DOMHandler() {\r\n this.cdata = false;\r\n}\r\nfunction position(locator,node){\r\n\tnode.lineNumber = locator.lineNumber;\r\n\tnode.columnNumber = locator.columnNumber;\r\n}\r\n/**\r\n * @see org.xml.sax.ContentHandler#startDocument\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ContentHandler.html\r\n */ \r\nDOMHandler.prototype = {\r\n\tstartDocument : function() {\r\n \tthis.doc = new DOMImplementation().createDocument(null, null, null);\r\n \tif (this.locator) {\r\n \tthis.doc.documentURI = this.locator.systemId;\r\n \t}\r\n\t},\r\n\tstartElement:function(namespaceURI, localName, qName, attrs) {\r\n\t\tvar doc = this.doc;\r\n\t var el = doc.createElementNS(namespaceURI, qName||localName);\r\n\t var len = attrs.length;\r\n\t appendElement(this, el);\r\n\t this.currentElement = el;\r\n\t \r\n\t\tthis.locator && position(this.locator,el)\r\n\t for (var i = 0 ; i < len; i++) {\r\n\t var namespaceURI = attrs.getURI(i);\r\n\t var value = attrs.getValue(i);\r\n\t var qName = attrs.getQName(i);\r\n\t\t\tvar attr = doc.createAttributeNS(namespaceURI, qName);\r\n\t\t\tthis.locator &&position(attrs.getLocator(i),attr);\r\n\t\t\tattr.value = attr.nodeValue = value;\r\n\t\t\tel.setAttributeNode(attr)\r\n\t }\r\n\t},\r\n\tendElement:function(namespaceURI, localName, qName) {\r\n\t\tvar current = this.currentElement\r\n\t\tvar tagName = current.tagName;\r\n\t\tthis.currentElement = current.parentNode;\r\n\t},\r\n\tstartPrefixMapping:function(prefix, uri) {\r\n\t},\r\n\tendPrefixMapping:function(prefix) {\r\n\t},\r\n\tprocessingInstruction:function(target, data) {\r\n\t var ins = this.doc.createProcessingInstruction(target, data);\r\n\t this.locator && position(this.locator,ins)\r\n\t appendElement(this, ins);\r\n\t},\r\n\tignorableWhitespace:function(ch, start, length) {\r\n\t},\r\n\tcharacters:function(chars, start, length) {\r\n\t\tchars = _toString.apply(this,arguments)\r\n\t\t//console.log(chars)\r\n\t\tif(chars){\r\n\t\t\tif (this.cdata) {\r\n\t\t\t\tvar charNode = this.doc.createCDATASection(chars);\r\n\t\t\t} else {\r\n\t\t\t\tvar charNode = this.doc.createTextNode(chars);\r\n\t\t\t}\r\n\t\t\tif(this.currentElement){\r\n\t\t\t\tthis.currentElement.appendChild(charNode);\r\n\t\t\t}else if(/^\\s*$/.test(chars)){\r\n\t\t\t\tthis.doc.appendChild(charNode);\r\n\t\t\t\t//process xml\r\n\t\t\t}\r\n\t\t\tthis.locator && position(this.locator,charNode)\r\n\t\t}\r\n\t},\r\n\tskippedEntity:function(name) {\r\n\t},\r\n\tendDocument:function() {\r\n\t\tthis.doc.normalize();\r\n\t},\r\n\tsetDocumentLocator:function (locator) {\r\n\t if(this.locator = locator){// && !('lineNumber' in locator)){\r\n\t \tlocator.lineNumber = 0;\r\n\t }\r\n\t},\r\n\t//LexicalHandler\r\n\tcomment:function(chars, start, length) {\r\n\t\tchars = _toString.apply(this,arguments)\r\n\t var comm = this.doc.createComment(chars);\r\n\t this.locator && position(this.locator,comm)\r\n\t appendElement(this, comm);\r\n\t},\r\n\t\r\n\tstartCDATA:function() {\r\n\t //used in characters() methods\r\n\t this.cdata = true;\r\n\t},\r\n\tendCDATA:function() {\r\n\t this.cdata = false;\r\n\t},\r\n\t\r\n\tstartDTD:function(name, publicId, systemId) {\r\n\t\tvar impl = this.doc.implementation;\r\n\t if (impl && impl.createDocumentType) {\r\n\t var dt = impl.createDocumentType(name, publicId, systemId);\r\n\t this.locator && position(this.locator,dt)\r\n\t appendElement(this, dt);\r\n\t }\r\n\t},\r\n\t/**\r\n\t * @see org.xml.sax.ErrorHandler\r\n\t * @link http://www.saxproject.org/apidoc/org/xml/sax/ErrorHandler.html\r\n\t */\r\n\twarning:function(error) {\r\n\t\tconsole.warn('[xmldom warning]\\t'+error,_locator(this.locator));\r\n\t},\r\n\terror:function(error) {\r\n\t\tconsole.error('[xmldom error]\\t'+error,_locator(this.locator));\r\n\t},\r\n\tfatalError:function(error) {\r\n\t\tconsole.error('[xmldom fatalError]\\t'+error,_locator(this.locator));\r\n\t throw error;\r\n\t}\r\n}\r\nfunction _locator(l){\r\n\tif(l){\r\n\t\treturn '\\n@'+(l.systemId ||'')+'#[line:'+l.lineNumber+',col:'+l.columnNumber+']'\r\n\t}\r\n}\r\nfunction _toString(chars,start,length){\r\n\tif(typeof chars == 'string'){\r\n\t\treturn chars.substr(start,length)\r\n\t}else{//java sax connect width xmldom on rhino(what about: \"? && !(chars instanceof String)\")\r\n\t\tif(chars.length >= start+length || start){\r\n\t\t\treturn new java.lang.String(chars,start,length)+'';\r\n\t\t}\r\n\t\treturn chars;\r\n\t}\r\n}\r\n\r\n/*\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ext/LexicalHandler.html\r\n * used method of org.xml.sax.ext.LexicalHandler:\r\n * #comment(chars, start, length)\r\n * #startCDATA()\r\n * #endCDATA()\r\n * #startDTD(name, publicId, systemId)\r\n *\r\n *\r\n * IGNORED method of org.xml.sax.ext.LexicalHandler:\r\n * #endDTD()\r\n * #startEntity(name)\r\n * #endEntity(name)\r\n *\r\n *\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ext/DeclHandler.html\r\n * IGNORED method of org.xml.sax.ext.DeclHandler\r\n * \t#attributeDecl(eName, aName, type, mode, value)\r\n * #elementDecl(name, model)\r\n * #externalEntityDecl(name, publicId, systemId)\r\n * #internalEntityDecl(name, value)\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/ext/EntityResolver2.html\r\n * IGNORED method of org.xml.sax.EntityResolver2\r\n * #resolveEntity(String name,String publicId,String baseURI,String systemId)\r\n * #resolveEntity(publicId, systemId)\r\n * #getExternalSubset(name, baseURI)\r\n * @link http://www.saxproject.org/apidoc/org/xml/sax/DTDHandler.html\r\n * IGNORED method of org.xml.sax.DTDHandler\r\n * #notationDecl(name, publicId, systemId) {};\r\n * #unparsedEntityDecl(name, publicId, systemId, notationName) {};\r\n */\r\n\"endDTD,startEntity,endEntity,attributeDecl,elementDecl,externalEntityDecl,internalEntityDecl,resolveEntity,getExternalSubset,notationDecl,unparsedEntityDecl\".replace(/\\w+/g,function(key){\r\n\tDOMHandler.prototype[key] = function(){return null}\r\n})\r\n\r\n/* Private static helpers treated below as private instance methods, so don't need to add these to the public API; we might use a Relator to also get rid of non-standard public properties */\r\nfunction appendElement (hander,node) {\r\n if (!hander.currentElement) {\r\n hander.doc.appendChild(node);\r\n } else {\r\n hander.currentElement.appendChild(node);\r\n }\r\n}//appendChild and setAttributeNS are preformance key\r\n\r\n//if(typeof require == 'function'){\r\n\tvar XMLReader = require('./sax').XMLReader;\r\n\tvar DOMImplementation = exports.DOMImplementation = require('./dom').DOMImplementation;\r\n\texports.XMLSerializer = require('./dom').XMLSerializer ;\r\n\texports.DOMParser = DOMParser;\r\n//}\r\n","\"use strict\";\n\nvar fieldTagNames = {\n // TIFF Baseline\n 0x013B: 'Artist',\n 0x0102: 'BitsPerSample',\n 0x0109: 'CellLength',\n 0x0108: 'CellWidth',\n 0x0140: 'ColorMap',\n 0x0103: 'Compression',\n 0x8298: 'Copyright',\n 0x0132: 'DateTime',\n 0x0152: 'ExtraSamples',\n 0x010A: 'FillOrder',\n 0x0121: 'FreeByteCounts',\n 0x0120: 'FreeOffsets',\n 0x0123: 'GrayResponseCurve',\n 0x0122: 'GrayResponseUnit',\n 0x013C: 'HostComputer',\n 0x010E: 'ImageDescription',\n 0x0101: 'ImageLength',\n 0x0100: 'ImageWidth',\n 0x010F: 'Make',\n 0x0119: 'MaxSampleValue',\n 0x0118: 'MinSampleValue',\n 0x0110: 'Model',\n 0x00FE: 'NewSubfileType',\n 0x0112: 'Orientation',\n 0x0106: 'PhotometricInterpretation',\n 0x011C: 'PlanarConfiguration',\n 0x0128: 'ResolutionUnit',\n 0x0116: 'RowsPerStrip',\n 0x0115: 'SamplesPerPixel',\n 0x0131: 'Software',\n 0x0117: 'StripByteCounts',\n 0x0111: 'StripOffsets',\n 0x00FF: 'SubfileType',\n 0x0107: 'Threshholding',\n 0x011A: 'XResolution',\n 0x011B: 'YResolution',\n\n // TIFF Extended\n 0x0146: 'BadFaxLines',\n 0x0147: 'CleanFaxData',\n 0x0157: 'ClipPath',\n 0x0148: 'ConsecutiveBadFaxLines',\n 0x01B1: 'Decode',\n 0x01B2: 'DefaultImageColor',\n 0x010D: 'DocumentName',\n 0x0150: 'DotRange',\n 0x0141: 'HalftoneHints',\n 0x015A: 'Indexed',\n 0x015B: 'JPEGTables',\n 0x011D: 'PageName',\n 0x0129: 'PageNumber',\n 0x013D: 'Predictor',\n 0x013F: 'PrimaryChromaticities',\n 0x0214: 'ReferenceBlackWhite',\n 0x0153: 'SampleFormat',\n 0x0154: 'SMinSampleValue',\n 0x0155: 'SMaxSampleValue',\n 0x022F: 'StripRowCounts',\n 0x014A: 'SubIFDs',\n 0x0124: 'T4Options',\n 0x0125: 'T6Options',\n 0x0145: 'TileByteCounts',\n 0x0143: 'TileLength',\n 0x0144: 'TileOffsets',\n 0x0142: 'TileWidth',\n 0x012D: 'TransferFunction',\n 0x013E: 'WhitePoint',\n 0x0158: 'XClipPathUnits',\n 0x011E: 'XPosition',\n 0x0211: 'YCbCrCoefficients',\n 0x0213: 'YCbCrPositioning',\n 0x0212: 'YCbCrSubSampling',\n 0x0159: 'YClipPathUnits',\n 0x011F: 'YPosition',\n\n // EXIF\n 0x9202: 'ApertureValue',\n 0xA001: 'ColorSpace',\n 0x9004: 'DateTimeDigitized',\n 0x9003: 'DateTimeOriginal',\n 0x8769: 'Exif IFD',\n 0x9000: 'ExifVersion',\n 0x829A: 'ExposureTime',\n 0xA300: 'FileSource',\n 0x9209: 'Flash',\n 0xA000: 'FlashpixVersion',\n 0x829D: 'FNumber',\n 0xA420: 'ImageUniqueID',\n 0x9208: 'LightSource',\n 0x927C: 'MakerNote',\n 0x9201: 'ShutterSpeedValue',\n 0x9286: 'UserComment',\n\n // IPTC\n 0x83BB: 'IPTC',\n\n // ICC\n 0x8773: 'ICC Profile',\n\n // XMP\n 0x02BC: 'XMP',\n\n // GDAL\n 0xA480: 'GDAL_METADATA',\n 0xA481: 'GDAL_NODATA',\n\n // Photoshop\n 0x8649: 'Photoshop',\n\n // GeoTiff\n 0x830E: 'ModelPixelScale',\n 0x8482: 'ModelTiepoint',\n 0x85D8: 'ModelTransformation',\n 0x87AF: 'GeoKeyDirectory',\n 0x87B0: 'GeoDoubleParams',\n 0x87B1: 'GeoAsciiParams'\n};\n\nvar key;\nvar fieldTags = {};\nfor (key in fieldTagNames) {\n fieldTags[fieldTagNames[key]] = parseInt(key);\n}\n\nvar arrayFields = [fieldTags.BitsPerSample, fieldTags.ExtraSamples, fieldTags.SampleFormat, fieldTags.StripByteCounts, fieldTags.StripOffsets, fieldTags.StripRowCounts, fieldTags.TileByteCounts, fieldTags.TileOffsets];\n\nvar fieldTypeNames = {\n 0x0001: 'BYTE',\n 0x0002: 'ASCII',\n 0x0003: 'SHORT',\n 0x0004: 'LONG',\n 0x0005: 'RATIONAL',\n 0x0006: 'SBYTE',\n 0x0007: 'UNDEFINED',\n 0x0008: 'SSHORT',\n 0x0009: 'SLONG',\n 0x000A: 'SRATIONAL',\n 0x000B: 'FLOAT',\n 0x000C: 'DOUBLE',\n // introduced by BigTIFF\n 0x0010: 'LONG8',\n 0x0011: 'SLONG8',\n 0x0012: 'IFD8'\n};\n\nvar fieldTypes = {};\nfor (key in fieldTypeNames) {\n fieldTypes[fieldTypeNames[key]] = parseInt(key);\n}\n\nvar photometricInterpretations = {\n WhiteIsZero: 0,\n BlackIsZero: 1,\n RGB: 2,\n Palette: 3,\n TransparencyMask: 4,\n CMYK: 5,\n YCbCr: 6,\n\n CIELab: 8,\n ICCLab: 9\n};\n\nvar geoKeyNames = {\n 1024: 'GTModelTypeGeoKey',\n 1025: 'GTRasterTypeGeoKey',\n 1026: 'GTCitationGeoKey',\n 2048: 'GeographicTypeGeoKey',\n 2049: 'GeogCitationGeoKey',\n 2050: 'GeogGeodeticDatumGeoKey',\n 2051: 'GeogPrimeMeridianGeoKey',\n 2052: 'GeogLinearUnitsGeoKey',\n 2053: 'GeogLinearUnitSizeGeoKey',\n 2054: 'GeogAngularUnitsGeoKey',\n 2055: 'GeogAngularUnitSizeGeoKey',\n 2056: 'GeogEllipsoidGeoKey',\n 2057: 'GeogSemiMajorAxisGeoKey',\n 2058: 'GeogSemiMinorAxisGeoKey',\n 2059: 'GeogInvFlatteningGeoKey',\n 2060: 'GeogAzimuthUnitsGeoKey',\n 2061: 'GeogPrimeMeridianLongGeoKey',\n 2062: 'GeogTOWGS84GeoKey',\n 3072: 'ProjectedCSTypeGeoKey',\n 3073: 'PCSCitationGeoKey',\n 3074: 'ProjectionGeoKey',\n 3075: 'ProjCoordTransGeoKey',\n 3076: 'ProjLinearUnitsGeoKey',\n 3077: 'ProjLinearUnitSizeGeoKey',\n 3078: 'ProjStdParallel1GeoKey',\n 3079: 'ProjStdParallel2GeoKey',\n 3080: 'ProjNatOriginLongGeoKey',\n 3081: 'ProjNatOriginLatGeoKey',\n 3082: 'ProjFalseEastingGeoKey',\n 3083: 'ProjFalseNorthingGeoKey',\n 3084: 'ProjFalseOriginLongGeoKey',\n 3085: 'ProjFalseOriginLatGeoKey',\n 3086: 'ProjFalseOriginEastingGeoKey',\n 3087: 'ProjFalseOriginNorthingGeoKey',\n 3088: 'ProjCenterLongGeoKey',\n 3089: 'ProjCenterLatGeoKey',\n 3090: 'ProjCenterEastingGeoKey',\n 3091: 'ProjCenterNorthingGeoKey',\n 3092: 'ProjScaleAtNatOriginGeoKey',\n 3093: 'ProjScaleAtCenterGeoKey',\n 3094: 'ProjAzimuthAngleGeoKey',\n 3095: 'ProjStraightVertPoleLongGeoKey',\n 3096: 'ProjRectifiedGridAngleGeoKey',\n 4096: 'VerticalCSTypeGeoKey',\n 4097: 'VerticalCitationGeoKey',\n 4098: 'VerticalDatumGeoKey',\n 4099: 'VerticalUnitsGeoKey'\n};\n\nvar geoKeys = {};\nfor (key in geoKeyNames) {\n geoKeys[geoKeyNames[key]] = parseInt(key);\n}\n\nvar parseXml;\n// node.js version\nif (typeof window === \"undefined\") {\n parseXml = function parseXml(xmlStr) {\n // requires xmldom module\n var DOMParser = require('xmldom').DOMParser;\n return new DOMParser().parseFromString(xmlStr, \"text/xml\");\n };\n} else if (typeof window.DOMParser !== \"undefined\") {\n parseXml = function parseXml(xmlStr) {\n return new window.DOMParser().parseFromString(xmlStr, \"text/xml\");\n };\n} else if (typeof window.ActiveXObject !== \"undefined\" && new window.ActiveXObject(\"Microsoft.XMLDOM\")) {\n parseXml = function parseXml(xmlStr) {\n var xmlDoc = new window.ActiveXObject(\"Microsoft.XMLDOM\");\n xmlDoc.async = \"false\";\n xmlDoc.loadXML(xmlStr);\n return xmlDoc;\n };\n}\n\nmodule.exports = {\n fieldTags: fieldTags,\n fieldTagNames: fieldTagNames,\n arrayFields: arrayFields,\n fieldTypes: fieldTypes,\n fieldTypeNames: fieldTypeNames,\n photometricInterpretations: photometricInterpretations,\n geoKeys: geoKeys,\n geoKeyNames: geoKeyNames,\n parseXml: parseXml\n};","\"use strict\";\n\nfunction fromWhiteIsZero(raster, max, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var value;\n for (var i = 0, j = 0; i < raster.length; ++i, j += 3) {\n value = 256 - raster[i] / max * 256;\n rgbRaster[j] = value;\n rgbRaster[j + 1] = value;\n rgbRaster[j + 2] = value;\n }\n return rgbRaster;\n}\n\nfunction fromBlackIsZero(raster, max, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var value;\n for (var i = 0, j = 0; i < raster.length; ++i, j += 3) {\n value = raster[i] / max * 256;\n rgbRaster[j] = value;\n rgbRaster[j + 1] = value;\n rgbRaster[j + 2] = value;\n }\n return rgbRaster;\n}\n\nfunction fromPalette(raster, colorMap, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var greenOffset = colorMap.length / 3;\n var blueOffset = colorMap.length / 3 * 2;\n for (var i = 0, j = 0; i < raster.length; ++i, j += 3) {\n var mapIndex = raster[i];\n rgbRaster[j] = colorMap[mapIndex] / 65536 * 256;\n rgbRaster[j + 1] = colorMap[mapIndex + greenOffset] / 65536 * 256;\n rgbRaster[j + 2] = colorMap[mapIndex + blueOffset] / 65536 * 256;\n }\n return rgbRaster;\n}\n\nfunction fromCMYK(cmykRaster, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var c, m, y, k;\n for (var i = 0, j = 0; i < cmykRaster.length; i += 4, j += 3) {\n c = cmykRaster[i];\n m = cmykRaster[i + 1];\n y = cmykRaster[i + 2];\n k = cmykRaster[i + 3];\n\n rgbRaster[j] = 255 * ((255 - c) / 256) * ((255 - k) / 256);\n rgbRaster[j + 1] = 255 * ((255 - m) / 256) * ((255 - k) / 256);\n rgbRaster[j + 2] = 255 * ((255 - y) / 256) * ((255 - k) / 256);\n }\n return rgbRaster;\n}\n\nfunction fromYCbCr(yCbCrRaster, width, height) {\n var rgbRaster = new Uint8Array(width * height * 3);\n var y, cb, cr;\n for (var i = 0, j = 0; i < yCbCrRaster.length; i += 3, j += 3) {\n y = yCbCrRaster[i];\n cb = yCbCrRaster[i + 1];\n cr = yCbCrRaster[i + 2];\n\n rgbRaster[j] = y + 1.40200 * (cr - 0x80);\n rgbRaster[j + 1] = y - 0.34414 * (cb - 0x80) - 0.71414 * (cr - 0x80);\n rgbRaster[j + 2] = y + 1.77200 * (cb - 0x80);\n }\n return rgbRaster;\n}\n\n// converted from here:\n// http://de.mathworks.com/matlabcentral/fileexchange/24010-lab2rgb/content/Lab2RGB.m\n// still buggy\nfunction fromCIELab(cieLabRaster, width, height) {\n var T1 = 0.008856;\n var T2 = 0.206893;\n var MAT = [3.240479, -1.537150, -0.498535, -0.969256, 1.875992, 0.041556, 0.055648, -0.204043, 1.057311];\n var rgbRaster = new Uint8Array(width * height * 3);\n var L, a, b;\n var fX, fY, fZ, XT, YT, ZT, X, Y, Z;\n for (var i = 0, j = 0; i < cieLabRaster.length; i += 3, j += 3) {\n L = cieLabRaster[i];\n a = cieLabRaster[i + 1];\n b = cieLabRaster[i + 2];\n\n // Compute Y\n fY = Math.pow((L + 16) / 116, 3);\n YT = fY > T1;\n fY = (YT !== 0) * (L / 903.3) + YT * fY;\n Y = fY;\n\n fY = YT * Math.pow(fY, 1 / 3) + (YT !== 0) * (7.787 * fY + 16 / 116);\n\n // Compute X\n fX = a / 500 + fY;\n XT = fX > T2;\n X = XT * Math.pow(fX, 3) + (XT !== 0) * ((fX - 16 / 116) / 7.787);\n\n // Compute Z\n fZ = fY - b / 200;\n ZT = fZ > T2;\n Z = ZT * Math.pow(fZ, 3) + (ZT !== 0) * ((fZ - 16 / 116) / 7.787);\n\n // Normalize for D65 white point\n X = X * 0.950456;\n Z = Z * 1.088754;\n\n rgbRaster[j] = X * MAT[0] + Y * MAT[1] + Z * MAT[2];\n rgbRaster[j + 1] = X * MAT[3] + Y * MAT[4] + Z * MAT[5];\n rgbRaster[j + 2] = X * MAT[6] + Y * MAT[7] + Z * MAT[8];\n }\n return rgbRaster;\n}\n\nmodule.exports = {\n fromWhiteIsZero: fromWhiteIsZero,\n fromBlackIsZero: fromBlackIsZero,\n fromPalette: fromPalette,\n fromCMYK: fromCMYK,\n fromYCbCr: fromYCbCr,\n fromCIELab: fromCIELab\n};","\"use strict\";\n\nfunction AbstractDecoder() {}\n\nAbstractDecoder.prototype = {\n isAsync: function isAsync() {\n // TODO: check if async reading func is enabled or not.\n return typeof this.decodeBlock === \"undefined\";\n }\n};\n\nmodule.exports = AbstractDecoder;","\"use strict\";\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\n\nfunction RawDecoder() {}\n\nRawDecoder.prototype = Object.create(AbstractDecoder.prototype);\nRawDecoder.prototype.constructor = RawDecoder;\nRawDecoder.prototype.decodeBlock = function (buffer) {\n return buffer;\n};\n\nmodule.exports = RawDecoder;","\"use strict\";\n\n//var lzwCompress = require(\"lzwcompress\");\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\n\nvar MIN_BITS = 9;\nvar MAX_BITS = 12;\nvar CLEAR_CODE = 256; // clear code\nvar EOI_CODE = 257; // end of information\n\nfunction LZW() {\n this.littleEndian = false;\n this.position = 0;\n\n this._makeEntryLookup = false;\n this.dictionary = [];\n}\n\nLZW.prototype = {\n constructor: LZW,\n initDictionary: function initDictionary() {\n this.dictionary = new Array(258);\n this.entryLookup = {};\n this.byteLength = MIN_BITS;\n for (var i = 0; i <= 257; i++) {\n // i really feal like i <= 257, but I get strange unknown words that way.\n this.dictionary[i] = [i];\n if (this._makeEntryLookup) {\n this.entryLookup[i] = i;\n }\n }\n },\n\n decompress: function decompress(input) {\n this._makeEntryLookup = false; // for speed\n this.initDictionary();\n this.position = 0;\n this.result = [];\n if (!input.buffer) {\n input = new Uint8Array(input);\n }\n var mydataview = new DataView(input.buffer);\n var code = this.getNext(mydataview);\n var oldCode;\n while (code !== EOI_CODE) {\n if (code === CLEAR_CODE) {\n this.initDictionary();\n code = this.getNext(mydataview);\n while (code === CLEAR_CODE) {\n code = this.getNext(mydataview);\n }\n if (code > CLEAR_CODE) {\n throw 'corrupted code at scanline ' + code;\n }\n if (code === EOI_CODE) {\n break;\n } else {\n var val = this.dictionary[code];\n this.appendArray(this.result, val);\n oldCode = code;\n }\n } else {\n if (this.dictionary[code] !== undefined) {\n var _val = this.dictionary[code];\n this.appendArray(this.result, _val);\n var newVal = this.dictionary[oldCode].concat(this.dictionary[code][0]);\n this.addToDictionary(newVal);\n oldCode = code;\n } else {\n var oldVal = this.dictionary[oldCode];\n if (!oldVal) {\n throw \"Bogus entry. Not in dictionary, \" + oldCode + \" / \" + this.dictionary.length + \", position: \" + this.position;\n }\n var _newVal = oldVal.concat(this.dictionary[oldCode][0]);\n this.appendArray(this.result, _newVal);\n this.addToDictionary(_newVal);\n oldCode = code;\n }\n }\n // This is strange. It seems like the\n if (this.dictionary.length >= Math.pow(2, this.byteLength) - 1) {\n this.byteLength++;\n }\n code = this.getNext(mydataview);\n }\n return new Uint8Array(this.result);\n },\n\n appendArray: function appendArray(dest, source) {\n for (var i = 0; i < source.length; i++) {\n dest.push(source[i]);\n }\n return dest;\n },\n\n haveBytesChanged: function haveBytesChanged() {\n if (this.dictionary.length >= Math.pow(2, this.byteLength)) {\n this.byteLength++;\n return true;\n }\n return false;\n },\n\n addToDictionary: function addToDictionary(arr) {\n this.dictionary.push(arr);\n if (this._makeEntryLookup) {\n this.entryLookup[arr] = this.dictionary.length - 1;\n }\n this.haveBytesChanged();\n return this.dictionary.length - 1;\n },\n\n getNext: function getNext(dataview) {\n var byte = this.getByte(dataview, this.position, this.byteLength);\n this.position += this.byteLength;\n return byte;\n },\n\n // This binary representation might actually be as fast as the completely illegible bit shift approach\n //\n getByte: function getByte(dataview, position, length) {\n var d = position % 8;\n var a = Math.floor(position / 8);\n var de = 8 - d;\n var ef = position + length - (a + 1) * 8;\n var fg = 8 * (a + 2) - (position + length);\n var dg = (a + 2) * 8 - position;\n fg = Math.max(0, fg);\n if (a >= dataview.byteLength) {\n console.warn('ran off the end of the buffer before finding EOI_CODE (end on input code)');\n return EOI_CODE;\n }\n var chunk1 = dataview.getUint8(a, this.littleEndian) & Math.pow(2, 8 - d) - 1;\n chunk1 = chunk1 << length - de;\n var chunks = chunk1;\n if (a + 1 < dataview.byteLength) {\n var chunk2 = dataview.getUint8(a + 1, this.littleEndian) >>> fg;\n chunk2 = chunk2 << Math.max(0, length - dg);\n chunks += chunk2;\n }\n if (ef > 8 && a + 2 < dataview.byteLength) {\n var hi = (a + 3) * 8 - (position + length);\n var chunk3 = dataview.getUint8(a + 2, this.littleEndian) >>> hi;\n chunks += chunk3;\n }\n return chunks;\n },\n\n // compress has not been optimized and uses a uint8 array to hold binary values.\n compress: function compress(input) {\n this._makeEntryLookup = true;\n this.initDictionary();\n this.position = 0;\n var resultBits = [];\n var omega = [];\n resultBits = this.appendArray(resultBits, this.binaryFromByte(CLEAR_CODE, this.byteLength)); // resultBits.concat(Array.from(this.binaryFromByte(this.CLEAR_CODE, this.byteLength)))\n for (var i = 0; i < input.length; i++) {\n var k = [input[i]];\n var omk = omega.concat(k);\n if (this.entryLookup[omk] !== undefined) {\n omega = omk;\n } else {\n var _code = this.entryLookup[omega];\n var _bin = this.binaryFromByte(_code, this.byteLength);\n resultBits = this.appendArray(resultBits, _bin);\n this.addToDictionary(omk);\n omega = k;\n if (this.dictionary.length >= Math.pow(2, MAX_BITS)) {\n resultBits = this.appendArray(resultBits, this.binaryFromByte(CLEAR_CODE, this.byteLength));\n this.initDictionary();\n }\n }\n }\n var code = this.entryLookup[omega];\n var bin = this.binaryFromByte(code, this.byteLength);\n resultBits = this.appendArray(resultBits, bin);\n resultBits = resultBits = this.appendArray(resultBits, this.binaryFromByte(EOI_CODE, this.byteLength));\n this.binary = resultBits;\n this.result = this.binaryToUint8(resultBits);\n return this.result;\n },\n\n byteFromCode: function byteFromCode(code) {\n var res = this.dictionary[code];\n return res;\n },\n\n binaryFromByte: function binaryFromByte(byte) {\n var byteLength = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 8;\n\n var res = new Uint8Array(byteLength);\n for (var i = 0; i < res.length; i++) {\n var mask = Math.pow(2, i);\n var isOne = (byte & mask) > 0;\n res[res.length - 1 - i] = isOne;\n }\n return res;\n },\n\n binaryToNumber: function binaryToNumber(bin) {\n var res = 0;\n for (var i = 0; i < bin.length; i++) {\n res += Math.pow(2, bin.length - i - 1) * bin[i];\n }\n return res;\n },\n\n inputToBinary: function inputToBinary(input) {\n var inputByteLength = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 8;\n\n var res = new Uint8Array(input.length * inputByteLength);\n for (var i = 0; i < input.length; i++) {\n var bin = this.binaryFromByte(input[i], inputByteLength);\n res.set(bin, i * inputByteLength);\n }\n return res;\n },\n\n binaryToUint8: function binaryToUint8(bin) {\n var result = new Uint8Array(Math.ceil(bin.length / 8));\n var index = 0;\n for (var i = 0; i < bin.length; i += 8) {\n var val = 0;\n for (var j = 0; j < 8 && i + j < bin.length; j++) {\n val = val + bin[i + j] * Math.pow(2, 8 - j - 1);\n }\n result[index] = val;\n index++;\n }\n return result;\n }\n};\n\n// the actual decoder interface\n\nfunction LZWDecoder() {\n this.decompressor = new LZW();\n}\n\nLZWDecoder.prototype = Object.create(AbstractDecoder.prototype);\nLZWDecoder.prototype.constructor = LZWDecoder;\nLZWDecoder.prototype.decodeBlock = function (buffer) {\n return this.decompressor.decompress(buffer).buffer;\n};\n\nmodule.exports = LZWDecoder;","\"use strict\";\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\nvar pakoInflate = require('pako/lib/inflate').inflate;\n\nfunction DeflateDecoder() {}\n\nDeflateDecoder.prototype = Object.create(AbstractDecoder.prototype);\nDeflateDecoder.prototype.constructor = DeflateDecoder;\nDeflateDecoder.prototype.decodeBlock = function (buffer) {\n return pakoInflate(new Uint8Array(buffer)).buffer;\n};\n\nmodule.exports = DeflateDecoder;","\"use strict\";\n\nvar AbstractDecoder = require(\"../abstractdecoder.js\");\n\nfunction PackbitsDecoder() {}\n\nPackbitsDecoder.prototype = Object.create(AbstractDecoder.prototype);\nPackbitsDecoder.prototype.constructor = PackbitsDecoder;\nPackbitsDecoder.prototype.decodeBlock = function (buffer) {\n var dataView = new DataView(buffer);\n var out = [];\n var i, j;\n\n for (i = 0; i < buffer.byteLength; ++i) {\n var header = dataView.getInt8(i);\n if (header < 0) {\n var next = dataView.getUint8(i + 1);\n header = -header;\n for (j = 0; j <= header; ++j) {\n out.push(next);\n }\n i += 1;\n } else {\n for (j = 0; j <= header; ++j) {\n out.push(dataView.getUint8(i + j + 1));\n }\n i += header + 1;\n }\n }\n return new Uint8Array(out).buffer;\n};\n\nmodule.exports = PackbitsDecoder;","\"use strict\";\n\nvar globals = require(\"./globals.js\");\nvar RGB = require(\"./rgb.js\");\nvar RawDecoder = require(\"./compression/raw.js\");\nvar LZWDecoder = require(\"./compression/lzw.js\");\nvar DeflateDecoder = require(\"./compression/deflate.js\");\nvar PackbitsDecoder = require(\"./compression/packbits.js\");\n\nvar sum = function sum(array, start, end) {\n var s = 0;\n for (var i = start; i < end; ++i) {\n s += array[i];\n }\n return s;\n};\n\nvar arrayForType = function arrayForType(format, bitsPerSample, size) {\n switch (format) {\n case 1:\n // unsigned integer data\n switch (bitsPerSample) {\n case 8:\n return new Uint8Array(size);\n case 16:\n return new Uint16Array(size);\n case 32:\n return new Uint32Array(size);\n }\n break;\n case 2:\n // twos complement signed integer data\n switch (bitsPerSample) {\n case 8:\n return new Int8Array(size);\n case 16:\n return new Int16Array(size);\n case 32:\n return new Int32Array(size);\n }\n break;\n case 3:\n // floating point data\n switch (bitsPerSample) {\n case 32:\n return new Float32Array(size);\n case 64:\n return new Float64Array(size);\n }\n break;\n }\n throw Error(\"Unsupported data format/bitsPerSample\");\n};\n\n/**\n * GeoTIFF sub-file image.\n * @constructor\n * @param {Object} fileDirectory The parsed file directory\n * @param {Object} geoKeys The parsed geo-keys\n * @param {DataView} dataView The DataView for the underlying file.\n * @param {Boolean} littleEndian Whether the file is encoded in little or big endian\n * @param {Boolean} cache Whether or not decoded tiles shall be cached\n */\nfunction GeoTIFFImage(fileDirectory, geoKeys, dataView, littleEndian, cache) {\n this.fileDirectory = fileDirectory;\n this.geoKeys = geoKeys;\n this.dataView = dataView;\n this.littleEndian = littleEndian;\n this.tiles = cache ? {} : null;\n this.isTiled = fileDirectory.StripOffsets ? false : true;\n var planarConfiguration = fileDirectory.PlanarConfiguration;\n this.planarConfiguration = typeof planarConfiguration === \"undefined\" ? 1 : planarConfiguration;\n if (this.planarConfiguration !== 1 && this.planarConfiguration !== 2) {\n throw new Error(\"Invalid planar configuration.\");\n }\n\n switch (this.fileDirectory.Compression) {\n case undefined:\n case 1:\n // no compression\n this.decoder = new RawDecoder();\n break;\n case 5:\n // LZW\n this.decoder = new LZWDecoder();\n break;\n case 6:\n // JPEG\n throw new Error(\"JPEG compression not supported.\");\n case 8:\n // Deflate\n this.decoder = new DeflateDecoder();\n break;\n //case 32946: // deflate ??\n // throw new Error(\"Deflate compression not supported.\");\n case 32773:\n // packbits\n this.decoder = new PackbitsDecoder();\n break;\n default:\n throw new Error(\"Unknown compresseion method identifier: \" + this.fileDirectory.Compression);\n }\n}\n\nGeoTIFFImage.prototype = {\n /**\n * Returns the associated parsed file directory.\n * @returns {Object} the parsed file directory\n */\n getFileDirectory: function getFileDirectory() {\n return this.fileDirectory;\n },\n /**\n * Returns the associated parsed geo keys.\n * @returns {Object} the parsed geo keys\n */\n getGeoKeys: function getGeoKeys() {\n return this.geoKeys;\n },\n /**\n * Returns the width of the image.\n * @returns {Number} the width of the image\n */\n getWidth: function getWidth() {\n return this.fileDirectory.ImageWidth;\n },\n /**\n * Returns the height of the image.\n * @returns {Number} the height of the image\n */\n getHeight: function getHeight() {\n return this.fileDirectory.ImageLength;\n },\n /**\n * Returns the number of samples per pixel.\n * @returns {Number} the number of samples per pixel\n */\n getSamplesPerPixel: function getSamplesPerPixel() {\n return this.fileDirectory.SamplesPerPixel;\n },\n /**\n * Returns the width of each tile.\n * @returns {Number} the width of each tile\n */\n getTileWidth: function getTileWidth() {\n return this.isTiled ? this.fileDirectory.TileWidth : this.getWidth();\n },\n /**\n * Returns the height of each tile.\n * @returns {Number} the height of each tile\n */\n getTileHeight: function getTileHeight() {\n return this.isTiled ? this.fileDirectory.TileLength : this.fileDirectory.RowsPerStrip;\n },\n\n /**\n * Calculates the number of bytes for each pixel across all samples. Only full\n * bytes are supported, an exception is thrown when this is not the case.\n * @returns {Number} the bytes per pixel\n */\n getBytesPerPixel: function getBytesPerPixel() {\n var bitsPerSample = 0;\n for (var i = 0; i < this.fileDirectory.BitsPerSample.length; ++i) {\n var bits = this.fileDirectory.BitsPerSample[i];\n if (bits % 8 !== 0) {\n throw new Error(\"Sample bit-width of \" + bits + \" is not supported.\");\n } else if (bits !== this.fileDirectory.BitsPerSample[0]) {\n throw new Error(\"Differing size of samples in a pixel are not supported.\");\n }\n bitsPerSample += bits;\n }\n return bitsPerSample / 8;\n },\n\n getSampleByteSize: function getSampleByteSize(i) {\n if (i >= this.fileDirectory.BitsPerSample.length) {\n throw new RangeError(\"Sample index \" + i + \" is out of range.\");\n }\n var bits = this.fileDirectory.BitsPerSample[i];\n if (bits % 8 !== 0) {\n throw new Error(\"Sample bit-width of \" + bits + \" is not supported.\");\n }\n return bits / 8;\n },\n\n getReaderForSample: function getReaderForSample(sampleIndex) {\n var format = this.fileDirectory.SampleFormat ? this.fileDirectory.SampleFormat[sampleIndex] : 1;\n var bitsPerSample = this.fileDirectory.BitsPerSample[sampleIndex];\n switch (format) {\n case 1:\n // unsigned integer data\n switch (bitsPerSample) {\n case 8:\n return DataView.prototype.getUint8;\n case 16:\n return DataView.prototype.getUint16;\n case 32:\n return DataView.prototype.getUint32;\n }\n break;\n case 2:\n // twos complement signed integer data\n switch (bitsPerSample) {\n case 8:\n return DataView.prototype.getInt8;\n case 16:\n return DataView.prototype.getInt16;\n case 32:\n return DataView.prototype.getInt32;\n }\n break;\n case 3:\n switch (bitsPerSample) {\n case 32:\n return DataView.prototype.getFloat32;\n case 64:\n return DataView.prototype.getFloat64;\n }\n break;\n }\n },\n\n getArrayForSample: function getArrayForSample(sampleIndex, size) {\n var format = this.fileDirectory.SampleFormat ? this.fileDirectory.SampleFormat[sampleIndex] : 1;\n var bitsPerSample = this.fileDirectory.BitsPerSample[sampleIndex];\n return arrayForType(format, bitsPerSample, size);\n },\n\n getDecoder: function getDecoder() {\n return this.decoder;\n },\n\n /**\n * Returns the decoded strip or tile.\n * @param {Number} x the strip or tile x-offset\n * @param {Number} y the tile y-offset (0 for stripped images)\n * @param {Number} plane the planar configuration (1: \"chunky\", 2: \"separate samples\")\n * @returns {(Int8Array|Uint8Array|Int16Array|Uint16Array|Int32Array|Uint32Array|Float32Array|Float64Array)}\n */\n getTileOrStrip: function getTileOrStrip(x, y, sample, callback) {\n var numTilesPerRow = Math.ceil(this.getWidth() / this.getTileWidth());\n var numTilesPerCol = Math.ceil(this.getHeight() / this.getTileHeight());\n var index;\n var tiles = this.tiles;\n if (this.planarConfiguration === 1) {\n index = y * numTilesPerRow + x;\n } else if (this.planarConfiguration === 2) {\n index = sample * numTilesPerRow * numTilesPerCol + y * numTilesPerRow + x;\n }\n\n if (tiles !== null && index in tiles) {\n if (callback) {\n return callback(null, { x: x, y: y, sample: sample, data: tiles[index] });\n }\n return tiles[index];\n } else {\n var offset, byteCount;\n if (this.isTiled) {\n offset = this.fileDirectory.TileOffsets[index];\n byteCount = this.fileDirectory.TileByteCounts[index];\n } else {\n offset = this.fileDirectory.StripOffsets[index];\n byteCount = this.fileDirectory.StripByteCounts[index];\n }\n var slice = this.dataView.buffer.slice(offset, offset + byteCount);\n if (callback) {\n return this.getDecoder().decodeBlockAsync(slice, function (error, data) {\n if (!error && tiles !== null) {\n tiles[index] = data;\n }\n callback(error, { x: x, y: y, sample: sample, data: data });\n });\n }\n var block = this.getDecoder().decodeBlock(slice);\n if (tiles !== null) {\n tiles[index] = block;\n }\n return block;\n }\n },\n\n _readRasterAsync: function _readRasterAsync(imageWindow, samples, valueArrays, interleave, callback, callbackError) {\n var tileWidth = this.getTileWidth();\n var tileHeight = this.getTileHeight();\n\n var minXTile = Math.floor(imageWindow[0] / tileWidth);\n var maxXTile = Math.ceil(imageWindow[2] / tileWidth);\n var minYTile = Math.floor(imageWindow[1] / tileHeight);\n var maxYTile = Math.ceil(imageWindow[3] / tileHeight);\n\n var numTilesPerRow = Math.ceil(this.getWidth() / tileWidth);\n\n var windowWidth = imageWindow[2] - imageWindow[0];\n var windowHeight = imageWindow[3] - imageWindow[1];\n\n var bytesPerPixel = this.getBytesPerPixel();\n var imageWidth = this.getWidth();\n\n var predictor = this.fileDirectory.Predictor || 1;\n\n var srcSampleOffsets = [];\n var sampleReaders = [];\n for (var i = 0; i < samples.length; ++i) {\n if (this.planarConfiguration === 1) {\n srcSampleOffsets.push(sum(this.fileDirectory.BitsPerSample, 0, samples[i]) / 8);\n } else {\n srcSampleOffsets.push(0);\n }\n sampleReaders.push(this.getReaderForSample(samples[i]));\n }\n\n var allStacked = false;\n var unfinishedTiles = 0;\n var littleEndian = this.littleEndian;\n var globalError = null;\n\n function checkFinished() {\n if (allStacked && unfinishedTiles === 0) {\n if (globalError) {\n callbackError(globalError);\n } else {\n callback(valueArrays);\n }\n }\n }\n\n function onTileGot(error, tile) {\n if (!error) {\n var dataView = new DataView(tile.data);\n\n var firstLine = tile.y * tileHeight;\n var firstCol = tile.x * tileWidth;\n var lastLine = (tile.y + 1) * tileHeight;\n var lastCol = (tile.x + 1) * tileWidth;\n var sampleIndex = tile.sample;\n\n for (var y = Math.max(0, imageWindow[1] - firstLine); y < Math.min(tileHeight, tileHeight - (lastLine - imageWindow[3])); ++y) {\n for (var x = Math.max(0, imageWindow[0] - firstCol); x < Math.min(tileWidth, tileWidth - (lastCol - imageWindow[2])); ++x) {\n var pixelOffset = (y * tileWidth + x) * bytesPerPixel;\n var value = sampleReaders[sampleIndex].call(dataView, pixelOffset + srcSampleOffsets[sampleIndex], littleEndian);\n var windowCoordinate;\n if (interleave) {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0] - 1) * samples.length + sampleIndex;\n value += valueArrays[windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0]) * samples.length + sampleIndex;\n valueArrays[windowCoordinate] = value;\n } else {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x - 1 + firstCol - imageWindow[0];\n value += valueArrays[sampleIndex][windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x + firstCol - imageWindow[0];\n valueArrays[sampleIndex][windowCoordinate] = value;\n }\n }\n }\n } else {\n globalError = error;\n }\n\n // check end condition and call callbacks\n unfinishedTiles -= 1;\n checkFinished();\n }\n\n for (var yTile = minYTile; yTile <= maxYTile; ++yTile) {\n for (var xTile = minXTile; xTile <= maxXTile; ++xTile) {\n for (var sampleIndex = 0; sampleIndex < samples.length; ++sampleIndex) {\n var sample = samples[sampleIndex];\n if (this.planarConfiguration === 2) {\n bytesPerPixel = this.getSampleByteSize(sample);\n }\n var _sampleIndex = sampleIndex;\n unfinishedTiles += 1;\n this.getTileOrStrip(xTile, yTile, sample, onTileGot);\n }\n }\n }\n allStacked = true;\n checkFinished();\n },\n\n _readRaster: function _readRaster(imageWindow, samples, valueArrays, interleave, callback, callbackError) {\n try {\n var tileWidth = this.getTileWidth();\n var tileHeight = this.getTileHeight();\n\n var minXTile = Math.floor(imageWindow[0] / tileWidth);\n var maxXTile = Math.ceil(imageWindow[2] / tileWidth);\n var minYTile = Math.floor(imageWindow[1] / tileHeight);\n var maxYTile = Math.ceil(imageWindow[3] / tileHeight);\n\n var numTilesPerRow = Math.ceil(this.getWidth() / tileWidth);\n\n var windowWidth = imageWindow[2] - imageWindow[0];\n var windowHeight = imageWindow[3] - imageWindow[1];\n\n var bytesPerPixel = this.getBytesPerPixel();\n var imageWidth = this.getWidth();\n\n var predictor = this.fileDirectory.Predictor || 1;\n\n var srcSampleOffsets = [];\n var sampleReaders = [];\n for (var i = 0; i < samples.length; ++i) {\n if (this.planarConfiguration === 1) {\n srcSampleOffsets.push(sum(this.fileDirectory.BitsPerSample, 0, samples[i]) / 8);\n } else {\n srcSampleOffsets.push(0);\n }\n sampleReaders.push(this.getReaderForSample(samples[i]));\n }\n\n for (var yTile = minYTile; yTile < maxYTile; ++yTile) {\n for (var xTile = minXTile; xTile < maxXTile; ++xTile) {\n var firstLine = yTile * tileHeight;\n var firstCol = xTile * tileWidth;\n var lastLine = (yTile + 1) * tileHeight;\n var lastCol = (xTile + 1) * tileWidth;\n\n for (var sampleIndex = 0; sampleIndex < samples.length; ++sampleIndex) {\n var sample = samples[sampleIndex];\n if (this.planarConfiguration === 2) {\n bytesPerPixel = this.getSampleByteSize(sample);\n }\n var tile = new DataView(this.getTileOrStrip(xTile, yTile, sample));\n\n var reader = sampleReaders[sampleIndex];\n var ymax = Math.min(tileHeight, tileHeight - (lastLine - imageWindow[3]));\n var xmax = Math.min(tileWidth, tileWidth - (lastCol - imageWindow[2]));\n var totalbytes = (ymax * tileWidth + xmax) * bytesPerPixel;\n var tileLength = new Uint8Array(tile.buffer).length;\n if (2 * tileLength !== totalbytes && this._debugMessages) {\n console.warn('dimension mismatch', tileLength, totalbytes);\n }\n for (var y = Math.max(0, imageWindow[1] - firstLine); y < ymax; ++y) {\n for (var x = Math.max(0, imageWindow[0] - firstCol); x < xmax; ++x) {\n var pixelOffset = (y * tileWidth + x) * bytesPerPixel;\n var value = 0;\n if (pixelOffset < tileLength - 1) {\n value = reader.call(tile, pixelOffset + srcSampleOffsets[sampleIndex], this.littleEndian);\n }\n\n var windowCoordinate;\n if (interleave) {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0] - 1) * samples.length + sampleIndex;\n value += valueArrays[windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth * samples.length + (x + firstCol - imageWindow[0]) * samples.length + sampleIndex;\n valueArrays[windowCoordinate] = value;\n } else {\n if (predictor !== 1 && x > 0) {\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x - 1 + firstCol - imageWindow[0];\n value += valueArrays[sampleIndex][windowCoordinate];\n }\n\n windowCoordinate = (y + firstLine - imageWindow[1]) * windowWidth + x + firstCol - imageWindow[0];\n valueArrays[sampleIndex][windowCoordinate] = value;\n }\n }\n }\n }\n }\n }\n callback(valueArrays);\n return valueArrays;\n } catch (error) {\n return callbackError(error);\n }\n },\n\n /**\n * This callback is called upon successful reading of a GeoTIFF image. The\n * resulting arrays are passed as a single argument.\n * @callback GeoTIFFImage~readCallback\n * @param {(TypedArray|TypedArray[])} array the requested data as a either a\n * single typed array or a list of\n * typed arrays, depending on the\n * 'interleave' option.\n */\n\n /**\n * This callback is called upon encountering an error while reading of a\n * GeoTIFF image\n * @callback GeoTIFFImage~readErrorCallback\n * @param {Error} error the encountered error\n */\n\n /**\n * Reads raster data from the image. This function reads all selected samples\n * into separate arrays of the correct type for that sample. When provided,\n * only a subset of the raster is read for each sample.\n *\n * @param {Object} [options] optional parameters\n * @param {Array} [options.window=whole image] the subset to read data from.\n * @param {Array} [options.samples=all samples] the selection of samples to read from.\n * @param {Boolean} [options.interleave=false] whether the data shall be read\n * in one single array or separate\n * arrays.\n * @param {GeoTIFFImage~readCallback} [callback] the success callback. this\n * parameter is mandatory for\n * asynchronous decoders (some\n * compression mechanisms).\n * @param {GeoTIFFImage~readErrorCallback} [callbackError] the error callback\n * @returns {(TypedArray|TypedArray[]|null)} in synchonous cases, the decoded\n * array(s) is/are returned. In\n * asynchronous cases, nothing is\n * returned.\n */\n readRasters: function readRasters() /* arguments are read via the 'arguments' object */{\n // parse the arguments\n var options, callback, callbackError;\n switch (arguments.length) {\n case 0:\n break;\n case 1:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n } else {\n options = arguments[0];\n }\n break;\n case 2:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n callbackError = arguments[1];\n } else {\n options = arguments[0];\n callback = arguments[1];\n }\n break;\n case 3:\n options = arguments[0];\n callback = arguments[1];\n callbackError = arguments[2];\n break;\n default:\n throw new Error(\"Invalid number of arguments passed.\");\n }\n\n // set up default arguments\n options = options || {};\n callbackError = callbackError || function (error) {\n console.error(error);\n };\n\n var imageWindow = options.window || [0, 0, this.getWidth(), this.getHeight()],\n samples = options.samples,\n interleave = options.interleave;\n\n // check parameters\n if (imageWindow[0] < 0 || imageWindow[1] < 0 || imageWindow[2] > this.getWidth() || imageWindow[3] > this.getHeight()) {\n throw new Error(\"Select window is out of image bounds.\");\n } else if (imageWindow[0] > imageWindow[2] || imageWindow[1] > imageWindow[3]) {\n throw new Error(\"Invalid subsets\");\n }\n\n var imageWindowWidth = imageWindow[2] - imageWindow[0];\n var imageWindowHeight = imageWindow[3] - imageWindow[1];\n var numPixels = imageWindowWidth * imageWindowHeight;\n var i;\n\n if (!samples) {\n samples = [];\n for (i = 0; i < this.fileDirectory.SamplesPerPixel; ++i) {\n samples.push(i);\n }\n } else {\n for (i = 0; i < samples.length; ++i) {\n if (samples[i] >= this.fileDirectory.SamplesPerPixel) {\n throw new RangeError(\"Invalid sample index '\" + samples[i] + \"'.\");\n }\n }\n }\n var valueArrays;\n if (interleave) {\n var format = this.fileDirectory.SampleFormat ? Math.max.apply(null, this.fileDirectory.SampleFormat) : 1,\n bitsPerSample = Math.max.apply(null, this.fileDirectory.BitsPerSample);\n valueArrays = arrayForType(format, bitsPerSample, numPixels * samples.length);\n } else {\n valueArrays = [];\n for (i = 0; i < samples.length; ++i) {\n valueArrays.push(this.getArrayForSample(samples[i], numPixels));\n }\n }\n\n // start reading data, sync or async\n var decoder = this.getDecoder();\n if (decoder.isAsync()) {\n if (!callback) {\n throw new Error(\"No callback specified for asynchronous raster reading.\");\n }\n return this._readRasterAsync(imageWindow, samples, valueArrays, interleave, callback, callbackError);\n } else {\n callback = callback || function () {};\n return this._readRaster(imageWindow, samples, valueArrays, interleave, callback, callbackError);\n }\n },\n\n /**\n * Reads raster data from the image as RGB. The result is always an\n * interleaved typed array.\n * Colorspaces other than RGB will be transformed to RGB, color maps expanded.\n * When no other method is applicable, the first sample is used to produce a\n * greayscale image.\n * When provided, only a subset of the raster is read for each sample.\n *\n * @param {Object} [options] optional parameters\n * @param {Array} [options.window=whole image] the subset to read data from.\n * @param {GeoTIFFImage~readCallback} callback the success callback. this\n * parameter is mandatory.\n * @param {GeoTIFFImage~readErrorCallback} [callbackError] the error callback\n */\n readRGB: function readRGB() {\n // parse the arguments\n var options = null,\n callback = null,\n callbackError = null;\n switch (arguments.length) {\n case 0:\n break;\n case 1:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n } else {\n options = arguments[0];\n }\n break;\n case 2:\n if (typeof arguments[0] === \"function\") {\n callback = arguments[0];\n callbackError = arguments[1];\n } else {\n options = arguments[0];\n callback = arguments[1];\n }\n break;\n case 3:\n options = arguments[0];\n callback = arguments[1];\n callbackError = arguments[2];\n break;\n default:\n throw new Error(\"Invalid number of arguments passed.\");\n }\n\n // set up default arguments\n options = options || {};\n callbackError = callbackError || function (error) {\n console.error(error);\n };\n\n var imageWindow = options.window || [0, 0, this.getWidth(), this.getHeight()];\n\n // check parameters\n if (imageWindow[0] < 0 || imageWindow[1] < 0 || imageWindow[2] > this.getWidth() || imageWindow[3] > this.getHeight()) {\n throw new Error(\"Select window is out of image bounds.\");\n } else if (imageWindow[0] > imageWindow[2] || imageWindow[1] > imageWindow[3]) {\n throw new Error(\"Invalid subsets\");\n }\n\n var width = imageWindow[2] - imageWindow[0];\n var height = imageWindow[3] - imageWindow[1];\n\n var pi = this.fileDirectory.PhotometricInterpretation;\n\n var bits = this.fileDirectory.BitsPerSample[0];\n var max = Math.pow(2, bits);\n\n if (pi === globals.photometricInterpretations.RGB) {\n return this.readRasters({\n window: options.window,\n interleave: true\n }, callback, callbackError);\n }\n\n var samples;\n switch (pi) {\n case globals.photometricInterpretations.WhiteIsZero:\n case globals.photometricInterpretations.BlackIsZero:\n case globals.photometricInterpretations.Palette:\n samples = [0];\n break;\n case globals.photometricInterpretations.CMYK:\n samples = [0, 1, 2, 3];\n break;\n case globals.photometricInterpretations.YCbCr:\n case globals.photometricInterpretations.CIELab:\n samples = [0, 1, 2];\n break;\n default:\n throw new Error(\"Invalid or unsupported photometric interpretation.\");\n }\n\n var subOptions = {\n window: options.window,\n interleave: true,\n samples: samples\n };\n var fileDirectory = this.fileDirectory;\n return this.readRasters(subOptions, function (raster) {\n switch (pi) {\n case globals.photometricInterpretations.WhiteIsZero:\n return callback(RGB.fromWhiteIsZero(raster, max, width, height));\n case globals.photometricInterpretations.BlackIsZero:\n return callback(RGB.fromBlackIsZero(raster, max, width, height));\n case globals.photometricInterpretations.Palette:\n return callback(RGB.fromPalette(raster, fileDirectory.ColorMap, width, height));\n case globals.photometricInterpretations.CMYK:\n return callback(RGB.fromCMYK(raster, width, height));\n case globals.photometricInterpretations.YCbCr:\n return callback(RGB.fromYCbCr(raster, width, height));\n case globals.photometricInterpretations.CIELab:\n return callback(RGB.fromCIELab(raster, width, height));\n }\n }, callbackError);\n },\n\n /**\n * Returns an array of tiepoints.\n * @returns {Object[]}\n */\n getTiePoints: function getTiePoints() {\n if (!this.fileDirectory.ModelTiepoint) {\n return [];\n }\n\n var tiePoints = [];\n for (var i = 0; i < this.fileDirectory.ModelTiepoint.length; i += 6) {\n tiePoints.push({\n i: this.fileDirectory.ModelTiepoint[i],\n j: this.fileDirectory.ModelTiepoint[i + 1],\n k: this.fileDirectory.ModelTiepoint[i + 2],\n x: this.fileDirectory.ModelTiepoint[i + 3],\n y: this.fileDirectory.ModelTiepoint[i + 4],\n z: this.fileDirectory.ModelTiepoint[i + 5]\n });\n }\n return tiePoints;\n },\n\n /**\n * Returns the parsed GDAL metadata items.\n * @returns {Object}\n */\n getGDALMetadata: function getGDALMetadata() {\n var metadata = {};\n if (!this.fileDirectory.GDAL_METADATA) {\n return null;\n }\n var string = this.fileDirectory.GDAL_METADATA;\n var xmlDom = globals.parseXml(string.substring(0, string.length - 1));\n var result = xmlDom.evaluate(\"GDALMetadata/Item\", xmlDom, null, XPathResult.UNORDERED_NODE_SNAPSHOT_TYPE, null);\n for (var i = 0; i < result.snapshotLength; ++i) {\n var node = result.snapshotItem(i);\n metadata[node.getAttribute(\"name\")] = node.textContent;\n }\n return metadata;\n },\n\n /**\n * Returns the image origin as a XYZ-vector. When the image has no affine\n * transformation, then an exception is thrown.\n * @returns {Array} The origin as a vector\n */\n getOrigin: function getOrigin() {\n var tiePoints = this.fileDirectory.ModelTiepoint;\n if (!tiePoints || tiePoints.length !== 6) {\n throw new Error(\"The image does not have an affine transformation.\");\n }\n\n return [tiePoints[3], tiePoints[4], tiePoints[5]];\n },\n\n /**\n * Returns the image resolution as a XYZ-vector. When the image has no affine\n * transformation, then an exception is thrown.\n * @returns {Array} The resolution as a vector\n */\n getResolution: function getResolution() {\n if (!this.fileDirectory.ModelPixelScale) {\n throw new Error(\"The image does not have an affine transformation.\");\n }\n\n return [this.fileDirectory.ModelPixelScale[0], this.fileDirectory.ModelPixelScale[1], this.fileDirectory.ModelPixelScale[2]];\n },\n\n /**\n * Returns whether or not the pixels of the image depict an area (or point).\n * @returns {Boolean} Whether the pixels are a point\n */\n pixelIsArea: function pixelIsArea() {\n return this.geoKeys.GTRasterTypeGeoKey === 1;\n },\n\n /**\n * Returns the image bounding box as an array of 4 values: min-x, min-y,\n * max-x and max-y. When the image has no affine transformation, then an\n * exception is thrown.\n * @returns {Array} The bounding box\n */\n getBoundingBox: function getBoundingBox() {\n var origin = this.getOrigin();\n var resolution = this.getResolution();\n\n var x1 = origin[0];\n var y1 = origin[1];\n\n var x2 = x1 + resolution[0] * this.getWidth();\n var y2 = y1 + resolution[1] * this.getHeight();\n\n return [Math.min(x1, x2), Math.min(y1, y2), Math.max(x1, x2), Math.max(y1, y2)];\n }\n};\n\nmodule.exports = GeoTIFFImage;","\"use strict\";\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nvar DataView64 = function () {\n function DataView64(arrayBuffer) {\n _classCallCheck(this, DataView64);\n\n this._dataView = new DataView(arrayBuffer);\n }\n\n _createClass(DataView64, [{\n key: \"getUint64\",\n value: function getUint64(offset, littleEndian) {\n var left = this.getUint32(offset, littleEndian);\n var right = this.getUint32(offset + 4, littleEndian);\n if (littleEndian) {\n return left << 32 | right;\n }\n return right << 32 | left;\n }\n }, {\n key: \"getInt64\",\n value: function getInt64(offset, littleEndian) {\n var left, right;\n if (littleEndian) {\n left = this.getInt32(offset, littleEndian);\n right = this.getUint32(offset + 4, littleEndian);\n\n return left << 32 | right;\n }\n left = this.getUint32(offset, littleEndian);\n right = this.getInt32(offset + 4, littleEndian);\n return right << 32 | left;\n }\n }, {\n key: \"getUint8\",\n value: function getUint8(offset, littleEndian) {\n return this._dataView.getUint8(offset, littleEndian);\n }\n }, {\n key: \"getInt8\",\n value: function getInt8(offset, littleEndian) {\n return this._dataView.getInt8(offset, littleEndian);\n }\n }, {\n key: \"getUint16\",\n value: function getUint16(offset, littleEndian) {\n return this._dataView.getUint16(offset, littleEndian);\n }\n }, {\n key: \"getInt16\",\n value: function getInt16(offset, littleEndian) {\n return this._dataView.getInt16(offset, littleEndian);\n }\n }, {\n key: \"getUint32\",\n value: function getUint32(offset, littleEndian) {\n return this._dataView.getUint32(offset, littleEndian);\n }\n }, {\n key: \"getInt32\",\n value: function getInt32(offset, littleEndian) {\n return this._dataView.getInt32(offset, littleEndian);\n }\n }, {\n key: \"getFloat32\",\n value: function getFloat32(offset, littleEndian) {\n return this._dataView.getFloat32(offset, littleEndian);\n }\n }, {\n key: \"getFloat64\",\n value: function getFloat64(offset, littleEndian) {\n return this._dataView.getFloat64(offset, littleEndian);\n }\n }, {\n key: \"buffer\",\n get: function get() {\n return this._dataView.buffer;\n }\n }]);\n\n return DataView64;\n}();\n\nmodule.exports = DataView64;","\"use strict\";\n\nvar globals = require(\"./globals.js\");\nvar GeoTIFFImage = require(\"./geotiffimage.js\");\nvar DataView64 = require(\"./dataview64.js\");\n\nvar fieldTypes = globals.fieldTypes,\n fieldTagNames = globals.fieldTagNames,\n arrayFields = globals.arrayFields,\n geoKeyNames = globals.geoKeyNames;\n\n/**\n * The abstraction for a whole GeoTIFF file.\n * @constructor\n * @param {ArrayBuffer} rawData the raw data stream of the file as an ArrayBuffer.\n * @param {Object} [options] further options.\n * @param {Boolean} [options.cache=false] whether or not decoded tiles shall be cached.\n */\nfunction GeoTIFF(rawData, options) {\n this.dataView = new DataView64(rawData);\n options = options || {};\n this.cache = options.cache || false;\n\n var BOM = this.dataView.getUint16(0, 0);\n if (BOM === 0x4949) {\n this.littleEndian = true;\n } else if (BOM === 0x4D4D) {\n this.littleEndian = false;\n } else {\n throw new TypeError(\"Invalid byte order value.\");\n }\n\n var magicNumber = this.dataView.getUint16(2, this.littleEndian);\n if (this.dataView.getUint16(2, this.littleEndian) === 42) {\n this.bigTiff = false;\n } else if (magicNumber === 43) {\n this.bigTiff = true;\n var offsetBytesize = this.dataView.getUint16(4, this.littleEndian);\n if (offsetBytesize !== 8) {\n throw new Error(\"Unsupported offset byte-size.\");\n }\n } else {\n throw new TypeError(\"Invalid magic number.\");\n }\n\n this.fileDirectories = this.parseFileDirectories(this.getOffset(this.bigTiff ? 8 : 4));\n}\n\nGeoTIFF.prototype = {\n getOffset: function getOffset(offset) {\n if (this.bigTiff) {\n return this.dataView.getUint64(offset, this.littleEndian);\n }\n return this.dataView.getUint32(offset, this.littleEndian);\n },\n\n getFieldTypeLength: function getFieldTypeLength(fieldType) {\n switch (fieldType) {\n case fieldTypes.BYTE:case fieldTypes.ASCII:case fieldTypes.SBYTE:case fieldTypes.UNDEFINED:\n return 1;\n case fieldTypes.SHORT:case fieldTypes.SSHORT:\n return 2;\n case fieldTypes.LONG:case fieldTypes.SLONG:case fieldTypes.FLOAT:\n return 4;\n case fieldTypes.RATIONAL:case fieldTypes.SRATIONAL:case fieldTypes.DOUBLE:\n case fieldTypes.LONG8:case fieldTypes.SLONG8:case fieldTypes.IFD8:\n return 8;\n default:\n throw new RangeError(\"Invalid field type: \" + fieldType);\n }\n },\n\n getValues: function getValues(fieldType, count, offset) {\n var values = null;\n var readMethod = null;\n var fieldTypeLength = this.getFieldTypeLength(fieldType);\n var i;\n\n switch (fieldType) {\n case fieldTypes.BYTE:case fieldTypes.ASCII:case fieldTypes.UNDEFINED:\n values = new Uint8Array(count);readMethod = this.dataView.getUint8;\n break;\n case fieldTypes.SBYTE:\n values = new Int8Array(count);readMethod = this.dataView.getInt8;\n break;\n case fieldTypes.SHORT:\n values = new Uint16Array(count);readMethod = this.dataView.getUint16;\n break;\n case fieldTypes.SSHORT:\n values = new Int16Array(count);readMethod = this.dataView.getInt16;\n break;\n case fieldTypes.LONG:\n values = new Uint32Array(count);readMethod = this.dataView.getUint32;\n break;\n case fieldTypes.SLONG:\n values = new Int32Array(count);readMethod = this.dataView.getInt32;\n break;\n case fieldTypes.LONG8:case fieldTypes.IFD8:\n values = new Array(count);readMethod = this.dataView.getUint64;\n break;\n case fieldTypes.SLONG8:\n values = new Array(count);readMethod = this.dataView.getInt64;\n break;\n case fieldTypes.RATIONAL:\n values = new Uint32Array(count * 2);readMethod = this.dataView.getUint32;\n break;\n case fieldTypes.SRATIONAL:\n values = new Int32Array(count * 2);readMethod = this.dataView.getInt32;\n break;\n case fieldTypes.FLOAT:\n values = new Float32Array(count);readMethod = this.dataView.getFloat32;\n break;\n case fieldTypes.DOUBLE:\n values = new Float64Array(count);readMethod = this.dataView.getFloat64;\n break;\n default:\n throw new RangeError(\"Invalid field type: \" + fieldType);\n }\n\n // normal fields\n if (!(fieldType === fieldTypes.RATIONAL || fieldType === fieldTypes.SRATIONAL)) {\n for (i = 0; i < count; ++i) {\n values[i] = readMethod.call(this.dataView, offset + i * fieldTypeLength, this.littleEndian);\n }\n }\n // RATIONAL or SRATIONAL\n else {\n for (i = 0; i < count; i += 2) {\n values[i] = readMethod.call(this.dataView, offset + i * fieldTypeLength, this.littleEndian);\n values[i + 1] = readMethod.call(this.dataView, offset + (i * fieldTypeLength + 4), this.littleEndian);\n }\n }\n\n if (fieldType === fieldTypes.ASCII) {\n return String.fromCharCode.apply(null, values);\n }\n return values;\n },\n\n getFieldValues: function getFieldValues(fieldTag, fieldType, typeCount, valueOffset) {\n var fieldValues;\n var fieldTypeLength = this.getFieldTypeLength(fieldType);\n\n if (fieldTypeLength * typeCount <= (this.bigTiff ? 8 : 4)) {\n fieldValues = this.getValues(fieldType, typeCount, valueOffset);\n } else {\n var actualOffset = this.getOffset(valueOffset);\n fieldValues = this.getValues(fieldType, typeCount, actualOffset);\n }\n\n if (typeCount === 1 && arrayFields.indexOf(fieldTag) === -1 && !(fieldType === fieldTypes.RATIONAL || fieldType === fieldTypes.SRATIONAL)) {\n return fieldValues[0];\n }\n\n return fieldValues;\n },\n\n parseGeoKeyDirectory: function parseGeoKeyDirectory(fileDirectory) {\n var rawGeoKeyDirectory = fileDirectory.GeoKeyDirectory;\n if (!rawGeoKeyDirectory) {\n return null;\n }\n\n var geoKeyDirectory = {};\n for (var i = 4; i < rawGeoKeyDirectory[3] * 4; i += 4) {\n var key = geoKeyNames[rawGeoKeyDirectory[i]],\n location = rawGeoKeyDirectory[i + 1] ? fieldTagNames[rawGeoKeyDirectory[i + 1]] : null,\n count = rawGeoKeyDirectory[i + 2],\n offset = rawGeoKeyDirectory[i + 3];\n\n var value = null;\n if (!location) {\n value = offset;\n } else {\n value = fileDirectory[location];\n if (typeof value === \"undefined\" || value === null) {\n throw new Error(\"Could not get value of geoKey '\" + key + \"'.\");\n } else if (typeof value === \"string\") {\n value = value.substring(offset, offset + count - 1);\n } else if (value.subarray) {\n value = value.subarray(offset, offset + count - 1);\n }\n }\n geoKeyDirectory[key] = value;\n }\n return geoKeyDirectory;\n },\n\n parseFileDirectories: function parseFileDirectories(byteOffset) {\n var nextIFDByteOffset = byteOffset;\n var fileDirectories = [];\n\n while (nextIFDByteOffset !== 0x00000000) {\n var numDirEntries = this.bigTiff ? this.dataView.getUint64(nextIFDByteOffset, this.littleEndian) : this.dataView.getUint16(nextIFDByteOffset, this.littleEndian);\n\n var fileDirectory = {};\n var i = nextIFDByteOffset + (this.bigTiff ? 8 : 2);\n for (var entryCount = 0; entryCount < numDirEntries; i += this.bigTiff ? 20 : 12, ++entryCount) {\n var fieldTag = this.dataView.getUint16(i, this.littleEndian);\n var fieldType = this.dataView.getUint16(i + 2, this.littleEndian);\n var typeCount = this.bigTiff ? this.dataView.getUint64(i + 4, this.littleEndian) : this.dataView.getUint32(i + 4, this.littleEndian);\n\n fileDirectory[fieldTagNames[fieldTag]] = this.getFieldValues(fieldTag, fieldType, typeCount, i + (this.bigTiff ? 12 : 8));\n }\n fileDirectories.push([fileDirectory, this.parseGeoKeyDirectory(fileDirectory)]);\n\n nextIFDByteOffset = this.getOffset(i);\n }\n return fileDirectories;\n },\n\n /**\n * Get the n-th internal subfile a an image. By default, the first is returned.\n *\n * @param {Number} [index=0] the index of the image to return.\n * @returns {GeoTIFFImage} the image at the given index\n */\n getImage: function getImage(index) {\n index = index || 0;\n var fileDirectoryAndGeoKey = this.fileDirectories[index];\n if (!fileDirectoryAndGeoKey) {\n throw new RangeError(\"Invalid image index\");\n }\n return new GeoTIFFImage(fileDirectoryAndGeoKey[0], fileDirectoryAndGeoKey[1], this.dataView, this.littleEndian, this.cache);\n },\n\n /**\n * Returns the count of the internal subfiles.\n *\n * @returns {Number} the number of internal subfile images\n */\n getImageCount: function getImageCount() {\n return this.fileDirectories.length;\n }\n};\n\nmodule.exports = GeoTIFF;","\"use strict\";\n\nvar GeoTIFF = require(\"./geotiff.js\");\n\n/**\n * Main parsing function for GeoTIFF files.\n * @param {(string|ArrayBuffer)} data Raw data to parse the GeoTIFF from.\n * @param {Object} [options] further options.\n * @param {Boolean} [options.cache=false] whether or not decoded tiles shall be cached.\n * @returns {GeoTIFF} the parsed geotiff file.\n */\nvar parse = function parse(data, options) {\n var rawData, i, strLen, view;\n if (typeof data === \"string\" || data instanceof String) {\n rawData = new ArrayBuffer(data.length * 2); // 2 bytes for each char\n view = new Uint16Array(rawData);\n for (i = 0, strLen = data.length; i < strLen; ++i) {\n view[i] = data.charCodeAt(i);\n }\n } else if (data instanceof ArrayBuffer) {\n rawData = data;\n } else {\n throw new Error(\"Invalid input data given.\");\n }\n return new GeoTIFF(rawData, options);\n};\n\nif (typeof module !== \"undefined\" && typeof module.exports !== \"undefined\") {\n module.exports.parse = parse;\n}\nif (typeof window !== \"undefined\") {\n window[\"GeoTIFF\"] = { parse: parse };\n}","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport geotiff from 'geotiff';\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\n\n\n/**\n* Read and decode Tiff format. The decoder for BigTiff is experimental.\n* Takes an ArrayBuffer of a tiff file as input and the TiffDecoder outputs an Image2D.\n* Tiff format is very broad and this decoder, thanks to the Geotiff npm package\n* is compatible with single or multiband images, with or without compression, using\n* various bith depth and types (8bits, 32bits, etc.)\n*\n* Info: Tiff 6.0 specification http://www.npes.org/pdf/TIFF-v6.pdf\n*\n* **Usage**\n* - [examples/savePixpFile.html](../examples/fileToTiff.html)\n*\n*/\nclass TiffDecoder extends Filter {\n constructor() {\n super();\n this.addInputValidator(0, ArrayBuffer);\n }\n \n _run(){\n\n var inputBuffer = this._getInput(0);\n\n if(!inputBuffer){\n console.warn(\"TiffDecoder requires an ArrayBuffer as input \\\"0\\\". Unable to continue.\");\n return;\n }\n \n var success = false;\n \n var tiffData = geotiff.parse(inputBuffer);\n var tiffImage = tiffData.getImage();\n \n var data = tiffImage.readRasters( {interleave: true} );\n var width = tiffImage.getWidth();\n var height = tiffImage.getHeight();\n var ncpp = tiffImage.getSamplesPerPixel();\n \n if(ncpp == (data.length / (width*height))){\n success = true;\n }\n \n if( success ){\n var outputImg = this._addOutput( Image2D );\n outputImg.setData( data, width, height, ncpp);\n }else{\n console.warn(\"Tiff support is experimental and this file is not compatible.\");\n }\n \n }\n \n \n} /* END of class TiffDecoder */\n\nexport { TiffDecoder }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/es6module\n* Lab MCIN - http://mcin.ca/ - Montreal Neurological Institute\n*/\n\n\n/**\n* An instance of QeegModFileParser can be used to parse several file\n* (you don't need to create a QeegModFileParser instance per file to parse).\n* The Qeeg MOD file usually have the .MOD extension, though this parser does not\n* need the filename or the extension.\n*\n*/\nclass QeegModFileParser {\n constructor(){\n this._rawData = null;\n }\n \n /**\n * Feed the parser with raw data to be parsed\n * @param {ArrayBuffer} data - the raw data\n */\n setRawData( data ){\n this._rawData = data;\n }\n \n \n /**\n * Launch the parsing of the ArrayBuffer that was given with the method setRawData\n * @return {Object} - the MOD file data in a readable format\n */\n parse(){\n var qeegData = null;\n try{\n qeegData = this._parseNoException();\n }catch(e){\n //console.error( e );\n console.warn(\"This file is not compatible.\");\n }\n \n return qeegData;\n }\n \n \n /**\n * [PRIVATE]\n * This method parses the data without caring of potential exception to be raised,\n * it does not handle them. Thus, this method is unsafe to use as is and should\n * no be used directly.\n * @return {Object} - the MOD file data in a readable format\n */\n _parseNoException(){\n if( !this._rawData ){\n console.warn(\"The input buffer is null. Nothing to be parsed here.\");\n return null;\n }\n \n var inputBuffer = this._rawData\n \n var view = new DataView( inputBuffer );\n var littleEndian = true;\n \n // ------------- DECODING HEADER -------------------\n \n var header = {};\n \n // Protection Mask\n // Offset: 0, length: 2\n header.protectionMask = view.getUint16(0, littleEndian);\n \n // Comment (first byte is the real length)\n // Offset: 2, length: 81\n var commentRealLength = view.getUint8(2);\n var commentBytes = new Uint8Array(inputBuffer, 3, commentRealLength);\n header.comment = String.fromCharCode.apply(String, commentBytes);\n \n // Measure (M) Size\n // Offset: 83, length: 2\n header.measureSize = view.getUint16(83, littleEndian);\n \n // Duration (D) Size\n // Offset: 85, length: 2\n header.durationSize = view.getUint16(85, littleEndian);\n \n // First space (F) Size\n // Offset: 87, length: 2\n header.firstSpaceSize = view.getUint16(87, littleEndian);\n \n // Second space (S) Size\n // Offset: 89, length: 2\n header.secondSpaceSize = view.getUint16(89, littleEndian);\n \n // Reserved bytes\n // Offset: 91, length: 2\n header.reservedBytes = view.getUint16(91, littleEndian);\n \n // Data size\n // Offset: 93, length: 2\n header.dataSize = view.getUint16(93, littleEndian);\n \n // ------------- DECODING MATRIX -------------------\n var matrixOffset = 95;\n \n var matrixSizeElements = header.measureSize * \n header.durationSize * \n header.firstSpaceSize * \n header.secondSpaceSize;\n \n var matrixSizeBytes = matrixSizeElements * header.dataSize;\n \n var matrixData = new Float32Array(matrixSizeElements);\n \n for(var i=0; i \n * @license MIT\n */\n\n// The _isBuffer check is for Safari 5-7 support, because it's missing\n// Object.prototype.constructor. Remove this eventually\nmodule.exports = function (obj) {\n return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer)\n}\n\nfunction isBuffer (obj) {\n return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj)\n}\n\n// For Node v0.10 support. Remove this eventually.\nfunction isSlowBuffer (obj) {\n return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0))\n}\n","var iota = require(\"iota-array\")\nvar isBuffer = require(\"is-buffer\")\n\nvar hasTypedArrays = ((typeof Float64Array) !== \"undefined\")\n\nfunction compare1st(a, b) {\n return a[0] - b[0]\n}\n\nfunction order() {\n var stride = this.stride\n var terms = new Array(stride.length)\n var i\n for(i=0; iMath.abs(this.stride[1]))?[1,0]:[0,1]}})\")\n } else if(dimension === 3) {\n code.push(\n\"var s0=Math.abs(this.stride[0]),s1=Math.abs(this.stride[1]),s2=Math.abs(this.stride[2]);\\\nif(s0>s1){\\\nif(s1>s2){\\\nreturn [2,1,0];\\\n}else if(s0>s2){\\\nreturn [1,2,0];\\\n}else{\\\nreturn [1,0,2];\\\n}\\\n}else if(s0>s2){\\\nreturn [2,0,1];\\\n}else if(s2>s1){\\\nreturn [0,1,2];\\\n}else{\\\nreturn [0,2,1];\\\n}}})\")\n }\n } else {\n code.push(\"ORDER})\")\n }\n }\n\n //view.set(i0, ..., v):\n code.push(\n\"proto.set=function \"+className+\"_set(\"+args.join(\",\")+\",v){\")\n if(useGetters) {\n code.push(\"return this.data.set(\"+index_str+\",v)}\")\n } else {\n code.push(\"return this.data[\"+index_str+\"]=v}\")\n }\n\n //view.get(i0, ...):\n code.push(\"proto.get=function \"+className+\"_get(\"+args.join(\",\")+\"){\")\n if(useGetters) {\n code.push(\"return this.data.get(\"+index_str+\")}\")\n } else {\n code.push(\"return this.data[\"+index_str+\"]}\")\n }\n\n //view.index:\n code.push(\n \"proto.index=function \"+className+\"_index(\", args.join(), \"){return \"+index_str+\"}\")\n\n //view.hi():\n code.push(\"proto.hi=function \"+className+\"_hi(\"+args.join(\",\")+\"){return new \"+className+\"(this.data,\"+\n indices.map(function(i) {\n return [\"(typeof i\",i,\"!=='number'||i\",i,\"<0)?this.shape[\", i, \"]:i\", i,\"|0\"].join(\"\")\n }).join(\",\")+\",\"+\n indices.map(function(i) {\n return \"this.stride[\"+i + \"]\"\n }).join(\",\")+\",this.offset)}\")\n\n //view.lo():\n var a_vars = indices.map(function(i) { return \"a\"+i+\"=this.shape[\"+i+\"]\" })\n var c_vars = indices.map(function(i) { return \"c\"+i+\"=this.stride[\"+i+\"]\" })\n code.push(\"proto.lo=function \"+className+\"_lo(\"+args.join(\",\")+\"){var b=this.offset,d=0,\"+a_vars.join(\",\")+\",\"+c_vars.join(\",\"))\n for(var i=0; i=0){\\\nd=i\"+i+\"|0;\\\nb+=c\"+i+\"*d;\\\na\"+i+\"-=d}\")\n }\n code.push(\"return new \"+className+\"(this.data,\"+\n indices.map(function(i) {\n return \"a\"+i\n }).join(\",\")+\",\"+\n indices.map(function(i) {\n return \"c\"+i\n }).join(\",\")+\",b)}\")\n\n //view.step():\n code.push(\"proto.step=function \"+className+\"_step(\"+args.join(\",\")+\"){var \"+\n indices.map(function(i) {\n return \"a\"+i+\"=this.shape[\"+i+\"]\"\n }).join(\",\")+\",\"+\n indices.map(function(i) {\n return \"b\"+i+\"=this.stride[\"+i+\"]\"\n }).join(\",\")+\",c=this.offset,d=0,ceil=Math.ceil\")\n for(var i=0; i=0){c=(c+this.stride[\"+i+\"]*i\"+i+\")|0}else{a.push(this.shape[\"+i+\"]);b.push(this.stride[\"+i+\"])}\")\n }\n code.push(\"var ctor=CTOR_LIST[a.length+1];return ctor(this.data,a,b,c)}\")\n\n //Add return statement\n code.push(\"return function construct_\"+className+\"(data,shape,stride,offset){return new \"+className+\"(data,\"+\n indices.map(function(i) {\n return \"shape[\"+i+\"]\"\n }).join(\",\")+\",\"+\n indices.map(function(i) {\n return \"stride[\"+i+\"]\"\n }).join(\",\")+\",offset)}\")\n\n //Compile procedure\n var procedure = new Function(\"CTOR_LIST\", \"ORDER\", code.join(\"\\n\"))\n return procedure(CACHED_CONSTRUCTORS[dtype], order)\n}\n\nfunction arrayDType(data) {\n if(isBuffer(data)) {\n return \"buffer\"\n }\n if(hasTypedArrays) {\n switch(Object.prototype.toString.call(data)) {\n case \"[object Float64Array]\":\n return \"float64\"\n case \"[object Float32Array]\":\n return \"float32\"\n case \"[object Int8Array]\":\n return \"int8\"\n case \"[object Int16Array]\":\n return \"int16\"\n case \"[object Int32Array]\":\n return \"int32\"\n case \"[object Uint8Array]\":\n return \"uint8\"\n case \"[object Uint16Array]\":\n return \"uint16\"\n case \"[object Uint32Array]\":\n return \"uint32\"\n case \"[object Uint8ClampedArray]\":\n return \"uint8_clamped\"\n }\n }\n if(Array.isArray(data)) {\n return \"array\"\n }\n return \"generic\"\n}\n\nvar CACHED_CONSTRUCTORS = {\n \"float32\":[],\n \"float64\":[],\n \"int8\":[],\n \"int16\":[],\n \"int32\":[],\n \"uint8\":[],\n \"uint16\":[],\n \"uint32\":[],\n \"array\":[],\n \"uint8_clamped\":[],\n \"buffer\":[],\n \"generic\":[]\n}\n\n;(function() {\n for(var id in CACHED_CONSTRUCTORS) {\n CACHED_CONSTRUCTORS[id].push(compileConstructor(id, -1))\n }\n});\n\nfunction wrappedNDArrayCtor(data, shape, stride, offset) {\n if(data === undefined) {\n var ctor = CACHED_CONSTRUCTORS.array[0]\n return ctor([])\n } else if(typeof data === \"number\") {\n data = [data]\n }\n if(shape === undefined) {\n shape = [ data.length ]\n }\n var d = shape.length\n if(stride === undefined) {\n stride = new Array(d)\n for(var i=d-1, sz=1; i>=0; --i) {\n stride[i] = sz\n sz *= shape[i]\n }\n }\n if(offset === undefined) {\n offset = 0\n for(var i=0; i0\n , code = []\n , vars = []\n , idx=0, pidx=0, i, j\n for(i=0; i 0) {\n code.push(\"var \" + vars.join(\",\"))\n } \n //Scan loop\n for(i=dimension-1; i>=0; --i) { // Start at largest stride and work your way inwards\n idx = order[i]\n code.push([\"for(i\",i,\"=0;i\",i,\" 0) {\n code.push([\"index[\",pidx,\"]-=s\",pidx].join(\"\"))\n }\n code.push([\"++index[\",idx,\"]\"].join(\"\"))\n }\n code.push(\"}\")\n }\n return code.join(\"\\n\")\n}\n\n// Generate \"outer\" loops that loop over blocks of data, applying \"inner\" loops to the blocks by manipulating the local variables in such a way that the inner loop only \"sees\" the current block.\n// TODO: If this is used, then the previous declaration (done by generateCwiseOp) of s* is essentially unnecessary.\n// I believe the s* are not used elsewhere (in particular, I don't think they're used in the pre/post parts and \"shape\" is defined independently), so it would be possible to make defining the s* dependent on what loop method is being used.\nfunction outerFill(matched, order, proc, body) {\n var dimension = order.length\n , nargs = proc.arrayArgs.length\n , blockSize = proc.blockSize\n , has_index = proc.indexArgs.length > 0\n , code = []\n for(var i=0; i0;){\"].join(\"\")) // Iterate back to front\n code.push([\"if(j\",i,\"<\",blockSize,\"){\"].join(\"\")) // Either decrease j by blockSize (s = blockSize), or set it to zero (after setting s = j).\n code.push([\"s\",order[i],\"=j\",i].join(\"\"))\n code.push([\"j\",i,\"=0\"].join(\"\"))\n code.push([\"}else{s\",order[i],\"=\",blockSize].join(\"\"))\n code.push([\"j\",i,\"-=\",blockSize,\"}\"].join(\"\"))\n if(has_index) {\n code.push([\"index[\",order[i],\"]=j\",i].join(\"\"))\n }\n }\n for(var i=0; i 0) {\n allEqual = allEqual && summary[i] === summary[i-1]\n }\n }\n if(allEqual) {\n return summary[0]\n }\n return summary.join(\"\")\n}\n\n//Generates a cwise operator\nfunction generateCWiseOp(proc, typesig) {\n\n //Compute dimension\n // Arrays get put first in typesig, and there are two entries per array (dtype and order), so this gets the number of dimensions in the first array arg.\n var dimension = (typesig[1].length - Math.abs(proc.arrayBlockIndices[0]))|0\n var orders = new Array(proc.arrayArgs.length)\n var dtypes = new Array(proc.arrayArgs.length)\n for(var i=0; i 0) {\n vars.push(\"shape=SS.slice(0)\") // Makes the shape over which we iterate available to the user defined functions (so you can use width/height for example)\n }\n if(proc.indexArgs.length > 0) {\n // Prepare an array to keep track of the (logical) indices, initialized to dimension zeroes.\n var zeros = new Array(dimension)\n for(var i=0; i 0) {\n code.push(\"var \" + vars.join(\",\"))\n }\n for(var i=0; i 3) {\n code.push(processBlock(proc.pre, proc, dtypes))\n }\n\n //Process body\n var body = processBlock(proc.body, proc, dtypes)\n var matched = countMatches(loopOrders)\n if(matched < dimension) {\n code.push(outerFill(matched, loopOrders[0], proc, body)) // TODO: Rather than passing loopOrders[0], it might be interesting to look at passing an order that represents the majority of the arguments for example.\n } else {\n code.push(innerFill(loopOrders[0], proc, body))\n }\n\n //Inline epilog\n if(proc.post.body.length > 3) {\n code.push(processBlock(proc.post, proc, dtypes))\n }\n \n if(proc.debug) {\n console.log(\"-----Generated cwise routine for \", typesig, \":\\n\" + code.join(\"\\n\") + \"\\n----------\")\n }\n \n var loopName = [(proc.funcName||\"unnamed\"), \"_cwise_loop_\", orders[0].join(\"s\"),\"m\",matched,typeSummary(dtypes)].join(\"\")\n var f = new Function([\"function \",loopName,\"(\", arglist.join(\",\"),\"){\", code.join(\"\\n\"),\"} return \", loopName].join(\"\"))\n return f()\n}\nmodule.exports = generateCWiseOp\n","\"use strict\"\n\n// The function below is called when constructing a cwise function object, and does the following:\n// A function object is constructed which accepts as argument a compilation function and returns another function.\n// It is this other function that is eventually returned by createThunk, and this function is the one that actually\n// checks whether a certain pattern of arguments has already been used before and compiles new loops as needed.\n// The compilation passed to the first function object is used for compiling new functions.\n// Once this function object is created, it is called with compile as argument, where the first argument of compile\n// is bound to \"proc\" (essentially containing a preprocessed version of the user arguments to cwise).\n// So createThunk roughly works like this:\n// function createThunk(proc) {\n// var thunk = function(compileBound) {\n// var CACHED = {}\n// return function(arrays and scalars) {\n// if (dtype and order of arrays in CACHED) {\n// var func = CACHED[dtype and order of arrays]\n// } else {\n// var func = CACHED[dtype and order of arrays] = compileBound(dtype and order of arrays)\n// }\n// return func(arrays and scalars)\n// }\n// }\n// return thunk(compile.bind1(proc))\n// }\n\nvar compile = require(\"./compile.js\")\n\nfunction createThunk(proc) {\n var code = [\"'use strict'\", \"var CACHED={}\"]\n var vars = []\n var thunkName = proc.funcName + \"_cwise_thunk\"\n \n //Build thunk\n code.push([\"return function \", thunkName, \"(\", proc.shimArgs.join(\",\"), \"){\"].join(\"\"))\n var typesig = []\n var string_typesig = []\n var proc_args = [[\"array\",proc.arrayArgs[0],\".shape.slice(\", // Slice shape so that we only retain the shape over which we iterate (which gets passed to the cwise operator as SS).\n Math.max(0,proc.arrayBlockIndices[0]),proc.arrayBlockIndices[0]<0?(\",\"+proc.arrayBlockIndices[0]+\")\"):\")\"].join(\"\")]\n var shapeLengthConditions = [], shapeConditions = []\n // Process array arguments\n for(var i=0; i0) { // Gather conditions to check for shape equality (ignoring block indices)\n shapeLengthConditions.push(\"array\" + proc.arrayArgs[0] + \".shape.length===array\" + j + \".shape.length+\" + (Math.abs(proc.arrayBlockIndices[0])-Math.abs(proc.arrayBlockIndices[i])))\n shapeConditions.push(\"array\" + proc.arrayArgs[0] + \".shape[shapeIndex+\" + Math.max(0,proc.arrayBlockIndices[0]) + \"]===array\" + j + \".shape[shapeIndex+\" + Math.max(0,proc.arrayBlockIndices[i]) + \"]\")\n }\n }\n // Check for shape equality\n if (proc.arrayArgs.length > 1) {\n code.push(\"if (!(\" + shapeLengthConditions.join(\" && \") + \")) throw new Error('cwise: Arrays do not all have the same dimensionality!')\")\n code.push(\"for(var shapeIndex=array\" + proc.arrayArgs[0] + \".shape.length-\" + Math.abs(proc.arrayBlockIndices[0]) + \"; shapeIndex-->0;) {\")\n code.push(\"if (!(\" + shapeConditions.join(\" && \") + \")) throw new Error('cwise: Arrays do not all have the same shape!')\")\n code.push(\"}\")\n }\n // Process scalar arguments\n for(var i=0; i0) {\n throw new Error(\"cwise: pre() block may not reference array args\")\n }\n if(i < proc.post.args.length && proc.post.args[i].count>0) {\n throw new Error(\"cwise: post() block may not reference array args\")\n }\n } else if(arg_type === \"scalar\") {\n proc.scalarArgs.push(i)\n proc.shimArgs.push(\"scalar\" + i)\n } else if(arg_type === \"index\") {\n proc.indexArgs.push(i)\n if(i < proc.pre.args.length && proc.pre.args[i].count > 0) {\n throw new Error(\"cwise: pre() block may not reference array index\")\n }\n if(i < proc.body.args.length && proc.body.args[i].lvalue) {\n throw new Error(\"cwise: body() block may not write to array index\")\n }\n if(i < proc.post.args.length && proc.post.args[i].count > 0) {\n throw new Error(\"cwise: post() block may not reference array index\")\n }\n } else if(arg_type === \"shape\") {\n proc.shapeArgs.push(i)\n if(i < proc.pre.args.length && proc.pre.args[i].lvalue) {\n throw new Error(\"cwise: pre() block may not write to array shape\")\n }\n if(i < proc.body.args.length && proc.body.args[i].lvalue) {\n throw new Error(\"cwise: body() block may not write to array shape\")\n }\n if(i < proc.post.args.length && proc.post.args[i].lvalue) {\n throw new Error(\"cwise: post() block may not write to array shape\")\n }\n } else if(typeof arg_type === \"object\" && arg_type.offset) {\n proc.argTypes[i] = \"offset\"\n proc.offsetArgs.push({ array: arg_type.array, offset:arg_type.offset })\n proc.offsetArgIndex.push(i)\n } else {\n throw new Error(\"cwise: Unknown argument type \" + proc_args[i])\n }\n }\n \n //Make sure at least one array argument was specified\n if(proc.arrayArgs.length <= 0) {\n throw new Error(\"cwise: No array arguments specified\")\n }\n \n //Make sure arguments are correct\n if(proc.pre.args.length > proc_args.length) {\n throw new Error(\"cwise: Too many arguments in pre() block\")\n }\n if(proc.body.args.length > proc_args.length) {\n throw new Error(\"cwise: Too many arguments in body() block\")\n }\n if(proc.post.args.length > proc_args.length) {\n throw new Error(\"cwise: Too many arguments in post() block\")\n }\n\n //Check debug flag\n proc.debug = !!user_args.printCode || !!user_args.debug\n \n //Retrieve name\n proc.funcName = user_args.funcName || \"cwise\"\n \n //Read in block size\n proc.blockSize = user_args.blockSize || 64\n\n return createThunk(proc)\n}\n\nmodule.exports = compileCwise\n","\"use strict\"\n\nvar compile = require(\"cwise-compiler\")\n\nvar EmptyProc = {\n body: \"\",\n args: [],\n thisVars: [],\n localVars: []\n}\n\nfunction fixup(x) {\n if(!x) {\n return EmptyProc\n }\n for(var i=0; i>\",\n rrshift: \">>>\"\n}\n;(function(){\n for(var id in assign_ops) {\n var op = assign_ops[id]\n exports[id] = makeOp({\n args: [\"array\",\"array\",\"array\"],\n body: {args:[\"a\",\"b\",\"c\"],\n body: \"a=b\"+op+\"c\"},\n funcName: id\n })\n exports[id+\"eq\"] = makeOp({\n args: [\"array\",\"array\"],\n body: {args:[\"a\",\"b\"],\n body:\"a\"+op+\"=b\"},\n rvalue: true,\n funcName: id+\"eq\"\n })\n exports[id+\"s\"] = makeOp({\n args: [\"array\", \"array\", \"scalar\"],\n body: {args:[\"a\",\"b\",\"s\"],\n body:\"a=b\"+op+\"s\"},\n funcName: id+\"s\"\n })\n exports[id+\"seq\"] = makeOp({\n args: [\"array\",\"scalar\"],\n body: {args:[\"a\",\"s\"],\n body:\"a\"+op+\"=s\"},\n rvalue: true,\n funcName: id+\"seq\"\n })\n }\n})();\n\nvar unary_ops = {\n not: \"!\",\n bnot: \"~\",\n neg: \"-\",\n recip: \"1.0/\"\n}\n;(function(){\n for(var id in unary_ops) {\n var op = unary_ops[id]\n exports[id] = makeOp({\n args: [\"array\", \"array\"],\n body: {args:[\"a\",\"b\"],\n body:\"a=\"+op+\"b\"},\n funcName: id\n })\n exports[id+\"eq\"] = makeOp({\n args: [\"array\"],\n body: {args:[\"a\"],\n body:\"a=\"+op+\"a\"},\n rvalue: true,\n count: 2,\n funcName: id+\"eq\"\n })\n }\n})();\n\nvar binary_ops = {\n and: \"&&\",\n or: \"||\",\n eq: \"===\",\n neq: \"!==\",\n lt: \"<\",\n gt: \">\",\n leq: \"<=\",\n geq: \">=\"\n}\n;(function() {\n for(var id in binary_ops) {\n var op = binary_ops[id]\n exports[id] = makeOp({\n args: [\"array\",\"array\",\"array\"],\n body: {args:[\"a\", \"b\", \"c\"],\n body:\"a=b\"+op+\"c\"},\n funcName: id\n })\n exports[id+\"s\"] = makeOp({\n args: [\"array\",\"array\",\"scalar\"],\n body: {args:[\"a\", \"b\", \"s\"],\n body:\"a=b\"+op+\"s\"},\n funcName: id+\"s\"\n })\n exports[id+\"eq\"] = makeOp({\n args: [\"array\", \"array\"],\n body: {args:[\"a\", \"b\"],\n body:\"a=a\"+op+\"b\"},\n rvalue:true,\n count:2,\n funcName: id+\"eq\"\n })\n exports[id+\"seq\"] = makeOp({\n args: [\"array\", \"scalar\"],\n body: {args:[\"a\",\"s\"],\n body:\"a=a\"+op+\"s\"},\n rvalue:true,\n count:2,\n funcName: id+\"seq\"\n })\n }\n})();\n\nvar math_unary = [\n \"abs\",\n \"acos\",\n \"asin\",\n \"atan\",\n \"ceil\",\n \"cos\",\n \"exp\",\n \"floor\",\n \"log\",\n \"round\",\n \"sin\",\n \"sqrt\",\n \"tan\"\n]\n;(function() {\n for(var i=0; ithis_s){this_s=-a}else if(a>this_s){this_s=a}\", localVars: [], thisVars: [\"this_s\"]},\n post: {args:[], localVars:[], thisVars:[\"this_s\"], body:\"return this_s\"},\n funcName: \"norminf\"\n})\n\nexports.norm1 = compile({\n args:[\"array\"],\n pre: {args:[], localVars:[], thisVars:[\"this_s\"], body:\"this_s=0\"},\n body: {args:[{name:\"a\", lvalue:false, rvalue:true, count:3}], body: \"this_s+=a<0?-a:a\", localVars: [], thisVars: [\"this_s\"]},\n post: {args:[], localVars:[], thisVars:[\"this_s\"], body:\"return this_s\"},\n funcName: \"norm1\"\n})\n\nexports.sup = compile({\n args: [ \"array\" ],\n pre:\n { body: \"this_h=-Infinity\",\n args: [],\n thisVars: [ \"this_h\" ],\n localVars: [] },\n body:\n { body: \"if(_inline_1_arg0_>this_h)this_h=_inline_1_arg0_\",\n args: [{\"name\":\"_inline_1_arg0_\",\"lvalue\":false,\"rvalue\":true,\"count\":2} ],\n thisVars: [ \"this_h\" ],\n localVars: [] },\n post:\n { body: \"return this_h\",\n args: [],\n thisVars: [ \"this_h\" ],\n localVars: [] }\n })\n\nexports.inf = compile({\n args: [ \"array\" ],\n pre:\n { body: \"this_h=Infinity\",\n args: [],\n thisVars: [ \"this_h\" ],\n localVars: [] },\n body:\n { body: \"if(_inline_1_arg0_this_v){this_v=_inline_1_arg1_;for(var _inline_1_k=0;_inline_1_k<_inline_1_arg0_.length;++_inline_1_k){this_i[_inline_1_k]=_inline_1_arg0_[_inline_1_k]}}}\",\n args:[\n {name:\"_inline_1_arg0_\",lvalue:false,rvalue:true,count:2},\n {name:\"_inline_1_arg1_\",lvalue:false,rvalue:true,count:2}],\n thisVars:[\"this_i\",\"this_v\"],\n localVars:[\"_inline_1_k\"]},\n post:{\n body:\"{return this_i}\",\n args:[],\n thisVars:[\"this_i\"],\n localVars:[]}\n}) \n\nexports.random = makeOp({\n args: [\"array\"],\n pre: {args:[], body:\"this_f=Math.random\", thisVars:[\"this_f\"]},\n body: {args: [\"a\"], body:\"a=this_f()\", thisVars:[\"this_f\"]},\n funcName: \"random\"\n})\n\nexports.assign = makeOp({\n args:[\"array\", \"array\"],\n body: {args:[\"a\", \"b\"], body:\"a=b\"},\n funcName: \"assign\" })\n\nexports.assigns = makeOp({\n args:[\"array\", \"scalar\"],\n body: {args:[\"a\", \"b\"], body:\"a=b\"},\n funcName: \"assigns\" })\n\n\nexports.equals = compile({\n args:[\"array\", \"array\"],\n pre: EmptyProc,\n body: {args:[{name:\"x\", lvalue:false, rvalue:true, count:1},\n {name:\"y\", lvalue:false, rvalue:true, count:1}], \n body: \"if(x!==y){return false}\", \n localVars: [], \n thisVars: []},\n post: {args:[], localVars:[], thisVars:[], body:\"return true\"},\n funcName: \"equals\"\n})\n\n\n","/**\n * Bit twiddling hacks for JavaScript.\n *\n * Author: Mikola Lysenko\n *\n * Ported from Stanford bit twiddling hack library:\n * http://graphics.stanford.edu/~seander/bithacks.html\n */\n\n\"use strict\"; \"use restrict\";\n\n//Number of bits in an integer\nvar INT_BITS = 32;\n\n//Constants\nexports.INT_BITS = INT_BITS;\nexports.INT_MAX = 0x7fffffff;\nexports.INT_MIN = -1<<(INT_BITS-1);\n\n//Returns -1, 0, +1 depending on sign of x\nexports.sign = function(v) {\n return (v > 0) - (v < 0);\n}\n\n//Computes absolute value of integer\nexports.abs = function(v) {\n var mask = v >> (INT_BITS-1);\n return (v ^ mask) - mask;\n}\n\n//Computes minimum of integers x and y\nexports.min = function(x, y) {\n return y ^ ((x ^ y) & -(x < y));\n}\n\n//Computes maximum of integers x and y\nexports.max = function(x, y) {\n return x ^ ((x ^ y) & -(x < y));\n}\n\n//Checks if a number is a power of two\nexports.isPow2 = function(v) {\n return !(v & (v-1)) && (!!v);\n}\n\n//Computes log base 2 of v\nexports.log2 = function(v) {\n var r, shift;\n r = (v > 0xFFFF) << 4; v >>>= r;\n shift = (v > 0xFF ) << 3; v >>>= shift; r |= shift;\n shift = (v > 0xF ) << 2; v >>>= shift; r |= shift;\n shift = (v > 0x3 ) << 1; v >>>= shift; r |= shift;\n return r | (v >> 1);\n}\n\n//Computes log base 10 of v\nexports.log10 = function(v) {\n return (v >= 1000000000) ? 9 : (v >= 100000000) ? 8 : (v >= 10000000) ? 7 :\n (v >= 1000000) ? 6 : (v >= 100000) ? 5 : (v >= 10000) ? 4 :\n (v >= 1000) ? 3 : (v >= 100) ? 2 : (v >= 10) ? 1 : 0;\n}\n\n//Counts number of bits\nexports.popCount = function(v) {\n v = v - ((v >>> 1) & 0x55555555);\n v = (v & 0x33333333) + ((v >>> 2) & 0x33333333);\n return ((v + (v >>> 4) & 0xF0F0F0F) * 0x1010101) >>> 24;\n}\n\n//Counts number of trailing zeros\nfunction countTrailingZeros(v) {\n var c = 32;\n v &= -v;\n if (v) c--;\n if (v & 0x0000FFFF) c -= 16;\n if (v & 0x00FF00FF) c -= 8;\n if (v & 0x0F0F0F0F) c -= 4;\n if (v & 0x33333333) c -= 2;\n if (v & 0x55555555) c -= 1;\n return c;\n}\nexports.countTrailingZeros = countTrailingZeros;\n\n//Rounds to next power of 2\nexports.nextPow2 = function(v) {\n v += v === 0;\n --v;\n v |= v >>> 1;\n v |= v >>> 2;\n v |= v >>> 4;\n v |= v >>> 8;\n v |= v >>> 16;\n return v + 1;\n}\n\n//Rounds down to previous power of 2\nexports.prevPow2 = function(v) {\n v |= v >>> 1;\n v |= v >>> 2;\n v |= v >>> 4;\n v |= v >>> 8;\n v |= v >>> 16;\n return v - (v>>>1);\n}\n\n//Computes parity of word\nexports.parity = function(v) {\n v ^= v >>> 16;\n v ^= v >>> 8;\n v ^= v >>> 4;\n v &= 0xf;\n return (0x6996 >>> v) & 1;\n}\n\nvar REVERSE_TABLE = new Array(256);\n\n(function(tab) {\n for(var i=0; i<256; ++i) {\n var v = i, r = i, s = 7;\n for (v >>>= 1; v; v >>>= 1) {\n r <<= 1;\n r |= v & 1;\n --s;\n }\n tab[i] = (r << s) & 0xff;\n }\n})(REVERSE_TABLE);\n\n//Reverse bits in a 32 bit word\nexports.reverse = function(v) {\n return (REVERSE_TABLE[ v & 0xff] << 24) |\n (REVERSE_TABLE[(v >>> 8) & 0xff] << 16) |\n (REVERSE_TABLE[(v >>> 16) & 0xff] << 8) |\n REVERSE_TABLE[(v >>> 24) & 0xff];\n}\n\n//Interleave bits of 2 coordinates with 16 bits. Useful for fast quadtree codes\nexports.interleave2 = function(x, y) {\n x &= 0xFFFF;\n x = (x | (x << 8)) & 0x00FF00FF;\n x = (x | (x << 4)) & 0x0F0F0F0F;\n x = (x | (x << 2)) & 0x33333333;\n x = (x | (x << 1)) & 0x55555555;\n\n y &= 0xFFFF;\n y = (y | (y << 8)) & 0x00FF00FF;\n y = (y | (y << 4)) & 0x0F0F0F0F;\n y = (y | (y << 2)) & 0x33333333;\n y = (y | (y << 1)) & 0x55555555;\n\n return x | (y << 1);\n}\n\n//Extracts the nth interleaved component\nexports.deinterleave2 = function(v, n) {\n v = (v >>> n) & 0x55555555;\n v = (v | (v >>> 1)) & 0x33333333;\n v = (v | (v >>> 2)) & 0x0F0F0F0F;\n v = (v | (v >>> 4)) & 0x00FF00FF;\n v = (v | (v >>> 16)) & 0x000FFFF;\n return (v << 16) >> 16;\n}\n\n\n//Interleave bits of 3 coordinates, each with 10 bits. Useful for fast octree codes\nexports.interleave3 = function(x, y, z) {\n x &= 0x3FF;\n x = (x | (x<<16)) & 4278190335;\n x = (x | (x<<8)) & 251719695;\n x = (x | (x<<4)) & 3272356035;\n x = (x | (x<<2)) & 1227133513;\n\n y &= 0x3FF;\n y = (y | (y<<16)) & 4278190335;\n y = (y | (y<<8)) & 251719695;\n y = (y | (y<<4)) & 3272356035;\n y = (y | (y<<2)) & 1227133513;\n x |= (y << 1);\n \n z &= 0x3FF;\n z = (z | (z<<16)) & 4278190335;\n z = (z | (z<<8)) & 251719695;\n z = (z | (z<<4)) & 3272356035;\n z = (z | (z<<2)) & 1227133513;\n \n return x | (z << 2);\n}\n\n//Extracts nth interleaved component of a 3-tuple\nexports.deinterleave3 = function(v, n) {\n v = (v >>> n) & 1227133513;\n v = (v | (v>>>2)) & 3272356035;\n v = (v | (v>>>4)) & 251719695;\n v = (v | (v>>>8)) & 4278190335;\n v = (v | (v>>>16)) & 0x3FF;\n return (v<<22)>>22;\n}\n\n//Computes next combination in colexicographic order (this is mistakenly called nextPermutation on the bit twiddling hacks page)\nexports.nextCombination = function(v) {\n var t = v | (v - 1);\n return (t + 1) | (((~t & -~t) - 1) >>> (countTrailingZeros(v) + 1));\n}\n\n","\"use strict\"\n\nfunction dupe_array(count, value, i) {\n var c = count[i]|0\n if(c <= 0) {\n return []\n }\n var result = new Array(c), j\n if(i === count.length-1) {\n for(j=0; j 0) {\n return dupe_number(count|0, value)\n }\n break\n case \"object\":\n if(typeof (count.length) === \"number\") {\n return dupe_array(count, value, 0)\n }\n break\n }\n return []\n}\n\nmodule.exports = dupe","'use strict'\n\nvar bits = require('bit-twiddle')\nvar dup = require('dup')\n\n//Legacy pool support\nif(!global.__TYPEDARRAY_POOL) {\n global.__TYPEDARRAY_POOL = {\n UINT8 : dup([32, 0])\n , UINT16 : dup([32, 0])\n , UINT32 : dup([32, 0])\n , INT8 : dup([32, 0])\n , INT16 : dup([32, 0])\n , INT32 : dup([32, 0])\n , FLOAT : dup([32, 0])\n , DOUBLE : dup([32, 0])\n , DATA : dup([32, 0])\n , UINT8C : dup([32, 0])\n , BUFFER : dup([32, 0])\n }\n}\n\nvar hasUint8C = (typeof Uint8ClampedArray) !== 'undefined'\nvar POOL = global.__TYPEDARRAY_POOL\n\n//Upgrade pool\nif(!POOL.UINT8C) {\n POOL.UINT8C = dup([32, 0])\n}\nif(!POOL.BUFFER) {\n POOL.BUFFER = dup([32, 0])\n}\n\n//New technique: Only allocate from ArrayBufferView and Buffer\nvar DATA = POOL.DATA\n , BUFFER = POOL.BUFFER\n\nexports.free = function free(array) {\n if(Buffer.isBuffer(array)) {\n BUFFER[bits.log2(array.length)].push(array)\n } else {\n if(Object.prototype.toString.call(array) !== '[object ArrayBuffer]') {\n array = array.buffer\n }\n if(!array) {\n return\n }\n var n = array.length || array.byteLength\n var log_n = bits.log2(n)|0\n DATA[log_n].push(array)\n }\n}\n\nfunction freeArrayBuffer(buffer) {\n if(!buffer) {\n return\n }\n var n = buffer.length || buffer.byteLength\n var log_n = bits.log2(n)\n DATA[log_n].push(buffer)\n}\n\nfunction freeTypedArray(array) {\n freeArrayBuffer(array.buffer)\n}\n\nexports.freeUint8 =\nexports.freeUint16 =\nexports.freeUint32 =\nexports.freeInt8 =\nexports.freeInt16 =\nexports.freeInt32 =\nexports.freeFloat32 = \nexports.freeFloat =\nexports.freeFloat64 = \nexports.freeDouble = \nexports.freeUint8Clamped = \nexports.freeDataView = freeTypedArray\n\nexports.freeArrayBuffer = freeArrayBuffer\n\nexports.freeBuffer = function freeBuffer(array) {\n BUFFER[bits.log2(array.length)].push(array)\n}\n\nexports.malloc = function malloc(n, dtype) {\n if(dtype === undefined || dtype === 'arraybuffer') {\n return mallocArrayBuffer(n)\n } else {\n switch(dtype) {\n case 'uint8':\n return mallocUint8(n)\n case 'uint16':\n return mallocUint16(n)\n case 'uint32':\n return mallocUint32(n)\n case 'int8':\n return mallocInt8(n)\n case 'int16':\n return mallocInt16(n)\n case 'int32':\n return mallocInt32(n)\n case 'float':\n case 'float32':\n return mallocFloat(n)\n case 'double':\n case 'float64':\n return mallocDouble(n)\n case 'uint8_clamped':\n return mallocUint8Clamped(n)\n case 'buffer':\n return mallocBuffer(n)\n case 'data':\n case 'dataview':\n return mallocDataView(n)\n\n default:\n return null\n }\n }\n return null\n}\n\nfunction mallocArrayBuffer(n) {\n var n = bits.nextPow2(n)\n var log_n = bits.log2(n)\n var d = DATA[log_n]\n if(d.length > 0) {\n return d.pop()\n }\n return new ArrayBuffer(n)\n}\nexports.mallocArrayBuffer = mallocArrayBuffer\n\nfunction mallocUint8(n) {\n return new Uint8Array(mallocArrayBuffer(n), 0, n)\n}\nexports.mallocUint8 = mallocUint8\n\nfunction mallocUint16(n) {\n return new Uint16Array(mallocArrayBuffer(2*n), 0, n)\n}\nexports.mallocUint16 = mallocUint16\n\nfunction mallocUint32(n) {\n return new Uint32Array(mallocArrayBuffer(4*n), 0, n)\n}\nexports.mallocUint32 = mallocUint32\n\nfunction mallocInt8(n) {\n return new Int8Array(mallocArrayBuffer(n), 0, n)\n}\nexports.mallocInt8 = mallocInt8\n\nfunction mallocInt16(n) {\n return new Int16Array(mallocArrayBuffer(2*n), 0, n)\n}\nexports.mallocInt16 = mallocInt16\n\nfunction mallocInt32(n) {\n return new Int32Array(mallocArrayBuffer(4*n), 0, n)\n}\nexports.mallocInt32 = mallocInt32\n\nfunction mallocFloat(n) {\n return new Float32Array(mallocArrayBuffer(4*n), 0, n)\n}\nexports.mallocFloat32 = exports.mallocFloat = mallocFloat\n\nfunction mallocDouble(n) {\n return new Float64Array(mallocArrayBuffer(8*n), 0, n)\n}\nexports.mallocFloat64 = exports.mallocDouble = mallocDouble\n\nfunction mallocUint8Clamped(n) {\n if(hasUint8C) {\n return new Uint8ClampedArray(mallocArrayBuffer(n), 0, n)\n } else {\n return mallocUint8(n)\n }\n}\nexports.mallocUint8Clamped = mallocUint8Clamped\n\nfunction mallocDataView(n) {\n return new DataView(mallocArrayBuffer(n), 0, n)\n}\nexports.mallocDataView = mallocDataView\n\nfunction mallocBuffer(n) {\n n = bits.nextPow2(n)\n var log_n = bits.log2(n)\n var cache = BUFFER[log_n]\n if(cache.length > 0) {\n return cache.pop()\n }\n return new Buffer(n)\n}\nexports.mallocBuffer = mallocBuffer\n\nexports.clearCache = function clearCache() {\n for(var i=0; i<32; ++i) {\n POOL.UINT8[i].length = 0\n POOL.UINT16[i].length = 0\n POOL.UINT32[i].length = 0\n POOL.INT8[i].length = 0\n POOL.INT16[i].length = 0\n POOL.INT32[i].length = 0\n POOL.FLOAT[i].length = 0\n POOL.DOUBLE[i].length = 0\n POOL.UINT8C[i].length = 0\n DATA[i].length = 0\n BUFFER[i].length = 0\n }\n}","var bits = require('bit-twiddle')\r\n\r\nfunction fft(dir, nrows, ncols, buffer, x_ptr, y_ptr, scratch_ptr) {\r\n dir |= 0\r\n nrows |= 0\r\n ncols |= 0\r\n x_ptr |= 0\r\n y_ptr |= 0\r\n if(bits.isPow2(ncols)) {\r\n fftRadix2(dir, nrows, ncols, buffer, x_ptr, y_ptr)\r\n } else {\r\n fftBluestein(dir, nrows, ncols, buffer, x_ptr, y_ptr, scratch_ptr)\r\n }\r\n}\r\nmodule.exports = fft\r\n\r\nfunction scratchMemory(n) {\r\n if(bits.isPow2(n)) {\r\n return 0\r\n }\r\n return 2 * n + 4 * bits.nextPow2(2*n + 1)\r\n}\r\nmodule.exports.scratchMemory = scratchMemory\r\n\r\n\r\n//Radix 2 FFT Adapted from Paul Bourke's C Implementation\r\nfunction fftRadix2(dir, nrows, ncols, buffer, x_ptr, y_ptr) {\r\n dir |= 0\r\n nrows |= 0\r\n ncols |= 0\r\n x_ptr |= 0\r\n y_ptr |= 0\r\n var nn,m,i,i1,j,k,i2,l,l1,l2\r\n var c1,c2,t,t1,t2,u1,u2,z,row,a,b,c,d,k1,k2,k3\r\n \r\n // Calculate the number of points\r\n nn = ncols\r\n m = bits.log2(nn)\r\n \r\n for(row=0; row> 1;\r\n j = 0;\r\n for(i=0;i>= 1\r\n }\r\n j += k\r\n }\r\n \r\n // Compute the FFT\r\n c1 = -1.0\r\n c2 = 0.0\r\n l2 = 1\r\n for(l=0;l=0; --i) {\r\n stride[i] = size\r\n size *= shape[i]\r\n pad = Math.max(pad, fftm.scratchMemory(shape[i]))\r\n if(x.shape[i] !== y.shape[i]) {\r\n throw new Error('Shape mismatch, real and imaginary arrays must have same size')\r\n }\r\n }\r\n var buf_size = 4 * size + pad\r\n var buffer\r\n if( x.dtype === 'array' ||\r\n x.dtype === 'float64' ||\r\n x.dtype === 'custom' ) {\r\n buffer = pool.mallocDouble(buf_size)\r\n } else {\r\n buffer = pool.mallocFloat(buf_size)\r\n }\r\n var x1 = ndarray(buffer, shape.slice(0), stride, 0)\r\n , y1 = ndarray(buffer, shape.slice(0), stride.slice(0), size)\r\n , x2 = ndarray(buffer, shape.slice(0), stride.slice(0), 2*size)\r\n , y2 = ndarray(buffer, shape.slice(0), stride.slice(0), 3*size)\r\n , tmp, n, s1, s2\r\n , scratch_ptr = 4 * size\r\n \r\n //Copy into x1/y1\r\n ops.assign(x1, x)\r\n ops.assign(y1, y)\r\n \r\n for(i=d-1; i>=0; --i) {\r\n fftm(dir, size/shape[i], shape[i], buffer, x1.offset, y1.offset, scratch_ptr)\r\n if(i === 0) {\r\n break\r\n }\r\n \r\n //Compute new stride for x2/y2\r\n n = 1\r\n s1 = x2.stride\r\n s2 = y2.stride\r\n for(j=i-1; j=0; --j) {\r\n s2[j] = s1[j] = n\r\n n *= shape[j]\r\n }\r\n \r\n //Transpose\r\n ops.assign(x2, x1)\r\n ops.assign(y2, y1)\r\n \r\n //Swap buffers\r\n tmp = x1\r\n x1 = x2\r\n x2 = tmp\r\n tmp = y1\r\n y1 = y2\r\n y2 = tmp\r\n }\r\n \r\n //Copy result back into x\r\n ops.assign(x, x1)\r\n ops.assign(y, y1)\r\n \r\n pool.free(buffer)\r\n}\r\n\r\nmodule.exports = ndfft","import ndarray from 'ndarray';\nimport ft from 'ndarray-fft';\n\nimport { Filter } from '../core/Filter';\nimport { Signal1D } from '../core/Signal1D';\n\nconst DIRECTIONS = {\n 'FORWARD': 1,\n 'INVERSE': -1,\n};\n\nclass BaseFourierSignalFilter extends Filter {\n constructor(direction) {\n super();\n this.direction = direction;\n if (DIRECTIONS[this.direction] === undefined) {\n throw new Error(`${this.direction} is not a valid fourier transform direction. Please try one of: ${Object.keys(DIRECTIONS)}`);\n }\n this.addInputValidator(0, Signal1D);\n }\n _run() {\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type BaseFourierSignalFilter requires 1 input of Signal1D.\");\n return;\n }\n const inputSignal = this._getInput(0);\n const length = inputSignal.getMetadata('length');\n const real = ndarray(inputSignal.clone().getData(), [length]);\n const img = ndarray(inputSignal.hollowClone().getData(), [length]);\n this.setMetadata('direction', this.direction);\n\n ft(DIRECTIONS[this.direction], real, img);\n this._output[0] = new Signal1D();\n this._output[0].setData(real.data);\n this._output[1] = new Signal1D();\n this._output[1].setData(img.data);\n }\n}\n\nclass ForwardFourierSignalFilter extends BaseFourierSignalFilter {\n constructor() {\n super('FORWARD');\n }\n}\n\nclass InverseFourerSignalFilter extends BaseFourierSignalFilter {\n constructor() {\n super('INVERSE');\n }\n}\n\nexport { ForwardFourierSignalFilter, InverseFourerSignalFilter }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\nimport { Image2D } from '../core/Image2D.js';\nimport { ImageToImageFilter } from '../core/ImageToImageFilter.js';\n\n/**\n* A filter of type ForEachPixelImageFilter can perform a operation on evey pixel\n* of an Image2D with a simple interface. For this purpose, a per-pixel-callback\n* must be specified using method\n* .on( \"pixel\" , function( coord, color ){ ... })\n* where coord is of form {x, y} and color is of form [r, g, b, a] (with possibly)\n* a different number of components per pixel.\n* This callback must return, or null (original color not modified),\n* or a array of color (same dimension as the one in arguments).\n*\n* **Usage**\n* - [examples/forEachPixel.html](../examples/forEachPixel.html)\n*\n* @example\n* var forEachPixelFilter = new pixpipe.ForEachPixelImageFilter();\n* forEachPixelFilter.on( \"pixel\", function(position, color){\n*\n* return [\n* color[1], // red (takes the values from green)\n* color[0], // green (takes the values from red)\n* color[2] * 0.5, // blue get 50% darker\n* 255 // alpha, at max\n* ]\n*\n* }\n* );\n*\n*/\nclass ForEachPixelImageFilter extends ImageToImageFilter {\n\n constructor(){\n super();\n this.addInputValidator(0, Image2D);\n }\n\n\n /**\n * Run the filter\n */\n _run(){\n if( ! this.hasValidInput() )\n return;\n\n var inputImage2D = this._getInput();\n var firstPixel = 0;\n var lastPixel = inputImage2D.getWidth() * inputImage2D.getHeight();\n var increment = 1;\n\n var bufferCopy = inputImage2D.getDataCopy();\n\n this._forEachPixelOfSuch(bufferCopy, firstPixel, lastPixel, increment );\n\n // 1 - init the output\n var outputImg = this._addOutput( Image2D );\n\n // 2 - tune the output\n outputImg.setData(\n bufferCopy,\n inputImage2D.getWidth(),\n inputImage2D.getHeight(),\n inputImage2D.getComponentsPerPixel()\n );\n\n }\n\n\n /**\n * [PRIVATE]\n * generic function for painting row, colum or whole\n * @param {Number} firstPixel - Index of the first pixel in 1D array\n * @param {Number} lastPixel - Index of the last pixel in 1D array\n * @param {Number} increment - jump gap from a pixel to another (in a 1D style)\n */\n _forEachPixelOfSuch(buffer, firstPixel, lastPixel, increment ){\n // abort if no callback per pixel\n //if( ! (\"pixel\" in this._events)){\n if( ! ( this.hasEvent(\"pixel\"))){\n console.warn(\"No function to apply per pixel was specified.\");\n return;\n }\n\n var inputImage2D = this._getInput();\n var inputBuffer = inputImage2D.getData();\n var componentPerPixel = inputImage2D.getComponentsPerPixel();\n\n var currentColor = null;\n\n for(var p=firstPixel; p 1) {\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = binaryOps[item.value];\n item = new Instruction(INUMBER, f(n1.value, n2.value));\n nstack.push(item);\n } else if (type === IOP3 && nstack.length > 2) {\n n3 = nstack.pop();\n n2 = nstack.pop();\n n1 = nstack.pop();\n if (item.value === '?') {\n nstack.push(n1.value ? n2.value : n3.value);\n } else {\n f = ternaryOps[item.value];\n item = new Instruction(INUMBER, f(n1.value, n2.value, n3.value));\n nstack.push(item);\n }\n } else if (type === IOP1 && nstack.length > 0) {\n n1 = nstack.pop();\n f = unaryOps[item.value];\n item = new Instruction(INUMBER, f(n1.value));\n nstack.push(item);\n } else if (type === IEXPR) {\n while (nstack.length > 0) {\n newexpression.push(nstack.shift());\n }\n newexpression.push(new Instruction(IEXPR, simplify(item.value, unaryOps, binaryOps, ternaryOps, values)));\n } else if (type === IMEMBER && nstack.length > 0) {\n n1 = nstack.pop();\n nstack.push(new Instruction(INUMBER, n1.value[item.value]));\n } else {\n while (nstack.length > 0) {\n newexpression.push(nstack.shift());\n }\n newexpression.push(item);\n }\n }\n while (nstack.length > 0) {\n newexpression.push(nstack.shift());\n }\n return newexpression;\n}\n\nExpression.prototype.simplify = function (values) {\n values = values || {};\n return new Expression(simplify(this.tokens, this.unaryOps, this.binaryOps, this.ternaryOps, values), this.parser);\n};\n\nfunction substitute(tokens, variable, expr) {\n var newexpression = [];\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n var type = item.type;\n if (type === IVAR && item.value === variable) {\n for (var j = 0; j < expr.tokens.length; j++) {\n var expritem = expr.tokens[j];\n var replitem;\n if (expritem.type === IOP1) {\n replitem = unaryInstruction(expritem.value);\n } else if (expritem.type === IOP2) {\n replitem = binaryInstruction(expritem.value);\n } else if (expritem.type === IOP3) {\n replitem = ternaryInstruction(expritem.value);\n } else {\n replitem = new Instruction(expritem.type, expritem.value);\n }\n newexpression.push(replitem);\n }\n } else if (type === IEXPR) {\n newexpression.push(new Instruction(IEXPR, substitute(item.value, variable, expr)));\n } else {\n newexpression.push(item);\n }\n }\n return newexpression;\n}\n\nExpression.prototype.substitute = function (variable, expr) {\n if (!(expr instanceof Expression)) {\n expr = this.parser.parse(String(expr));\n }\n\n return new Expression(substitute(this.tokens, variable, expr), this.parser);\n};\n\nfunction evaluate(tokens, expr, values) {\n var nstack = [];\n var n1, n2, n3;\n var f;\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n var type = item.type;\n if (type === INUMBER) {\n nstack.push(item.value);\n } else if (type === IOP2) {\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = expr.binaryOps[item.value];\n nstack.push(f(n1, n2));\n } else if (type === IOP3) {\n n3 = nstack.pop();\n n2 = nstack.pop();\n n1 = nstack.pop();\n if (item.value === '?') {\n nstack.push(evaluate(n1 ? n2 : n3, expr, values));\n } else {\n f = expr.ternaryOps[item.value];\n nstack.push(f(n1, n2, n3));\n }\n } else if (type === IVAR) {\n if (item.value in expr.functions) {\n nstack.push(expr.functions[item.value]);\n } else {\n var v = values[item.value];\n if (v !== undefined) {\n nstack.push(v);\n } else {\n throw new Error('undefined variable: ' + item.value);\n }\n }\n } else if (type === IOP1) {\n n1 = nstack.pop();\n f = expr.unaryOps[item.value];\n nstack.push(f(n1));\n } else if (type === IFUNCALL) {\n var argCount = item.value;\n var args = [];\n while (argCount-- > 0) {\n args.unshift(nstack.pop());\n }\n f = nstack.pop();\n if (f.apply && f.call) {\n nstack.push(f.apply(undefined, args));\n } else {\n throw new Error(f + ' is not a function');\n }\n } else if (type === IEXPR) {\n nstack.push(item.value);\n } else if (type === IMEMBER) {\n n1 = nstack.pop();\n nstack.push(n1[item.value]);\n } else {\n throw new Error('invalid Expression');\n }\n }\n if (nstack.length > 1) {\n throw new Error('invalid Expression (parity)');\n }\n return nstack[0];\n}\n\nExpression.prototype.evaluate = function (values) {\n values = values || {};\n return evaluate(this.tokens, this, values);\n};\n\nfunction expressionToString(tokens, toJS) {\n var nstack = [];\n var n1, n2, n3;\n var f;\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n var type = item.type;\n if (type === INUMBER) {\n if (typeof item.value === 'number' && item.value < 0) {\n nstack.push('(' + item.value + ')');\n } else {\n nstack.push(escapeValue(item.value));\n }\n } else if (type === IOP2) {\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = item.value;\n if (toJS) {\n if (f === '^') {\n nstack.push('Math.pow(' + n1 + ', ' + n2 + ')');\n } else if (f === 'and') {\n nstack.push('(!!' + n1 + ' && !!' + n2 + ')');\n } else if (f === 'or') {\n nstack.push('(!!' + n1 + ' || !!' + n2 + ')');\n } else if (f === '||') {\n nstack.push('(String(' + n1 + ') + String(' + n2 + '))');\n } else if (f === '==') {\n nstack.push('(' + n1 + ' === ' + n2 + ')');\n } else if (f === '!=') {\n nstack.push('(' + n1 + ' !== ' + n2 + ')');\n } else {\n nstack.push('(' + n1 + ' ' + f + ' ' + n2 + ')');\n }\n } else {\n nstack.push('(' + n1 + ' ' + f + ' ' + n2 + ')');\n }\n } else if (type === IOP3) {\n n3 = nstack.pop();\n n2 = nstack.pop();\n n1 = nstack.pop();\n f = item.value;\n if (f === '?') {\n nstack.push('(' + n1 + ' ? ' + n2 + ' : ' + n3 + ')');\n } else {\n throw new Error('invalid Expression');\n }\n } else if (type === IVAR) {\n nstack.push(item.value);\n } else if (type === IOP1) {\n n1 = nstack.pop();\n f = item.value;\n if (f === '-' || f === '+') {\n nstack.push('(' + f + n1 + ')');\n } else if (toJS) {\n if (f === 'not') {\n nstack.push('(' + '!' + n1 + ')');\n } else if (f === '!') {\n nstack.push('fac(' + n1 + ')');\n } else {\n nstack.push(f + '(' + n1 + ')');\n }\n } else if (f === '!') {\n nstack.push('(' + n1 + '!)');\n } else {\n nstack.push('(' + f + ' ' + n1 + ')');\n }\n } else if (type === IFUNCALL) {\n var argCount = item.value;\n var args = [];\n while (argCount-- > 0) {\n args.unshift(nstack.pop());\n }\n f = nstack.pop();\n nstack.push(f + '(' + args.join(', ') + ')');\n } else if (type === IMEMBER) {\n n1 = nstack.pop();\n nstack.push(n1 + '.' + item.value);\n } else if (type === IEXPR) {\n nstack.push('(' + expressionToString(item.value, toJS) + ')');\n } else {\n throw new Error('invalid Expression');\n }\n }\n if (nstack.length > 1) {\n throw new Error('invalid Expression (parity)');\n }\n return nstack[0];\n}\n\nExpression.prototype.toString = function () {\n return expressionToString(this.tokens, false);\n};\n\nfunction getSymbols(tokens, symbols) {\n for (var i = 0, L = tokens.length; i < L; i++) {\n var item = tokens[i];\n if (item.type === IVAR && (indexOf(symbols, item.value) === -1)) {\n symbols.push(item.value);\n } else if (item.type === IEXPR) {\n getSymbols(item.value, symbols);\n }\n }\n}\n\nExpression.prototype.symbols = function () {\n var vars = [];\n getSymbols(this.tokens, vars);\n return vars;\n};\n\nExpression.prototype.variables = function () {\n var vars = [];\n getSymbols(this.tokens, vars);\n var functions = this.functions;\n return vars.filter(function (name) {\n return !(name in functions);\n });\n};\n\nExpression.prototype.toJSFunction = function (param, variables) {\n var expr = this;\n var f = new Function(param, 'with(this.functions) with (this.ternaryOps) with (this.binaryOps) with (this.unaryOps) { return ' + expressionToString(this.simplify(variables).tokens, true) + '; }'); // eslint-disable-line no-new-func\n return function () {\n return f.apply(expr, arguments);\n };\n};\n\nfunction add(a, b) {\n return Number(a) + Number(b);\n}\nfunction sub(a, b) {\n return a - b;\n}\nfunction mul(a, b) {\n return a * b;\n}\nfunction div(a, b) {\n return a / b;\n}\nfunction mod(a, b) {\n return a % b;\n}\nfunction concat(a, b) {\n return '' + a + b;\n}\nfunction equal(a, b) {\n return a === b;\n}\nfunction notEqual(a, b) {\n return a !== b;\n}\nfunction greaterThan(a, b) {\n return a > b;\n}\nfunction lessThan(a, b) {\n return a < b;\n}\nfunction greaterThanEqual(a, b) {\n return a >= b;\n}\nfunction lessThanEqual(a, b) {\n return a <= b;\n}\nfunction andOperator(a, b) {\n return Boolean(a && b);\n}\nfunction orOperator(a, b) {\n return Boolean(a || b);\n}\nfunction sinh(a) {\n return ((Math.exp(a) - Math.exp(-a)) / 2);\n}\nfunction cosh(a) {\n return ((Math.exp(a) + Math.exp(-a)) / 2);\n}\nfunction tanh(a) {\n if (a === Infinity) return 1;\n if (a === -Infinity) return -1;\n return (Math.exp(a) - Math.exp(-a)) / (Math.exp(a) + Math.exp(-a));\n}\nfunction asinh(a) {\n if (a === -Infinity) return a;\n return Math.log(a + Math.sqrt(a * a + 1));\n}\nfunction acosh(a) {\n return Math.log(a + Math.sqrt(a * a - 1));\n}\nfunction atanh(a) {\n return (Math.log((1 + a) / (1 - a)) / 2);\n}\nfunction log10(a) {\n return Math.log(a) * Math.LOG10E;\n}\nfunction neg(a) {\n return -a;\n}\nfunction not(a) {\n return !a;\n}\nfunction trunc(a) {\n return a < 0 ? Math.ceil(a) : Math.floor(a);\n}\nfunction random(a) {\n return Math.random() * (a || 1);\n}\nfunction factorial(a) { // a!\n return gamma(a + 1);\n}\nfunction stringLength(s) {\n return String(s).length;\n}\n\nfunction hypot() {\n var sum = 0;\n var larg = 0;\n for (var i = 0, L = arguments.length; i < L; i++) {\n var arg = Math.abs(arguments[i]);\n var div;\n if (larg < arg) {\n div = larg / arg;\n sum = sum * div * div + 1;\n larg = arg;\n } else if (arg > 0) {\n div = arg / larg;\n sum += div * div;\n } else {\n sum += arg;\n }\n }\n return larg === Infinity ? Infinity : larg * Math.sqrt(sum);\n}\n\nfunction condition(cond, yep, nope) {\n return cond ? yep : nope;\n}\n\nfunction isInteger(value) {\n return isFinite(value) && (value === Math.round(value));\n}\n\nvar GAMMA_G = 4.7421875;\nvar GAMMA_P = [\n 0.99999999999999709182,\n 57.156235665862923517, -59.597960355475491248,\n 14.136097974741747174, -0.49191381609762019978,\n 0.33994649984811888699e-4,\n 0.46523628927048575665e-4, -0.98374475304879564677e-4,\n 0.15808870322491248884e-3, -0.21026444172410488319e-3,\n 0.21743961811521264320e-3, -0.16431810653676389022e-3,\n 0.84418223983852743293e-4, -0.26190838401581408670e-4,\n 0.36899182659531622704e-5\n];\n\n// Gamma function from math.js\nfunction gamma(n) {\n var t, x;\n\n if (isInteger(n)) {\n if (n <= 0) {\n return isFinite(n) ? Infinity : NaN;\n }\n\n if (n > 171) {\n return Infinity; // Will overflow\n }\n\n var value = n - 2;\n var res = n - 1;\n while (value > 1) {\n res *= value;\n value--;\n }\n\n if (res === 0) {\n res = 1; // 0! is per definition 1\n }\n\n return res;\n }\n\n if (n < 0.5) {\n return Math.PI / (Math.sin(Math.PI * n) * gamma(1 - n));\n }\n\n if (n >= 171.35) {\n return Infinity; // will overflow\n }\n\n if (n > 85.0) { // Extended Stirling Approx\n var twoN = n * n;\n var threeN = twoN * n;\n var fourN = threeN * n;\n var fiveN = fourN * n;\n return Math.sqrt(2 * Math.PI / n) * Math.pow((n / Math.E), n) *\n (1 + 1 / (12 * n) + 1 / (288 * twoN) - 139 / (51840 * threeN) -\n 571 / (2488320 * fourN) + 163879 / (209018880 * fiveN) +\n 5246819 / (75246796800 * fiveN * n));\n }\n\n --n;\n x = GAMMA_P[0];\n for (var i = 1; i < GAMMA_P.length; ++i) {\n x += GAMMA_P[i] / (n + i);\n }\n\n t = n + GAMMA_G + 0.5;\n return Math.sqrt(2 * Math.PI) * Math.pow(t, n + 0.5) * Math.exp(-t) * x;\n}\n\nvar TEOF = 'TEOF';\nvar TOP = 'TOP';\nvar TNUMBER = 'TNUMBER';\nvar TSTRING = 'TSTRING';\nvar TPAREN = 'TPAREN';\nvar TCOMMA = 'TCOMMA';\nvar TNAME = 'TNAME';\n\nfunction Token(type, value, line, column) {\n this.type = type;\n this.value = value;\n this.line = line;\n this.column = column;\n}\n\nToken.prototype.toString = function () {\n return this.type + ': ' + this.value;\n};\n\nfunction TokenStream(expression, unaryOps, binaryOps, ternaryOps, consts) {\n this.pos = 0;\n this.line = 0;\n this.column = 0;\n this.current = null;\n this.unaryOps = unaryOps;\n this.binaryOps = binaryOps;\n this.ternaryOps = ternaryOps;\n this.consts = consts;\n this.expression = expression;\n this.savedPosition = 0;\n this.savedCurrent = null;\n this.savedLine = 0;\n this.savedColumn = 0;\n}\n\nTokenStream.prototype.newToken = function (type, value, line, column) {\n return new Token(type, value, line != null ? line : this.line, column != null ? column : this.column);\n};\n\nTokenStream.prototype.save = function () {\n this.savedPosition = this.pos;\n this.savedCurrent = this.current;\n this.savedLine = this.line;\n this.savedColumn = this.column;\n};\n\nTokenStream.prototype.restore = function () {\n this.pos = this.savedPosition;\n this.current = this.savedCurrent;\n this.line = this.savedLine;\n this.column = this.savedColumn;\n};\n\nTokenStream.prototype.next = function () {\n if (this.pos >= this.expression.length) {\n return this.newToken(TEOF, 'EOF');\n }\n\n if (this.isWhitespace() || this.isComment()) {\n return this.next();\n } else if (this.isNumber() ||\n this.isOperator() ||\n this.isString() ||\n this.isParen() ||\n this.isComma() ||\n this.isNamedOp() ||\n this.isConst() ||\n this.isName()) {\n return this.current;\n } else {\n this.parseError('Unknown character \"' + this.expression.charAt(this.pos) + '\"');\n }\n};\n\nTokenStream.prototype.isString = function () {\n var r = false;\n var startLine = this.line;\n var startColumn = this.column;\n var startPos = this.pos;\n var quote = this.expression.charAt(startPos);\n\n if (quote === '\\'' || quote === '\"') {\n this.pos++;\n this.column++;\n var index = this.expression.indexOf(quote, startPos + 1);\n while (index >= 0 && this.pos < this.expression.length) {\n this.pos = index + 1;\n if (this.expression.charAt(index - 1) !== '\\\\') {\n var rawString = this.expression.substring(startPos + 1, index);\n this.current = this.newToken(TSTRING, this.unescape(rawString), startLine, startColumn);\n var newLine = rawString.indexOf('\\n');\n var lastNewline = -1;\n while (newLine >= 0) {\n this.line++;\n this.column = 0;\n lastNewline = newLine;\n newLine = rawString.indexOf('\\n', newLine + 1);\n }\n this.column += rawString.length - lastNewline;\n r = true;\n break;\n }\n index = this.expression.indexOf(quote, index + 1);\n }\n }\n return r;\n};\n\nTokenStream.prototype.isParen = function () {\n var char = this.expression.charAt(this.pos);\n if (char === '(' || char === ')') {\n this.current = this.newToken(TPAREN, char);\n this.pos++;\n this.column++;\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isComma = function () {\n var char = this.expression.charAt(this.pos);\n if (char === ',') {\n this.current = this.newToken(TCOMMA, ',');\n this.pos++;\n this.column++;\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isConst = function () {\n var startPos = this.pos;\n var i = startPos;\n for (; i < this.expression.length; i++) {\n var c = this.expression.charAt(i);\n if (c.toUpperCase() === c.toLowerCase()) {\n if (i === this.pos || (c !== '_' && c !== '.' && (c < '0' || c > '9'))) {\n break;\n }\n }\n }\n if (i > startPos) {\n var str = this.expression.substring(startPos, i);\n if (str in this.consts) {\n this.current = this.newToken(TNUMBER, this.consts[str]);\n this.pos += str.length;\n this.column += str.length;\n return true;\n }\n }\n return false;\n};\n\nTokenStream.prototype.isNamedOp = function () {\n var startPos = this.pos;\n var i = startPos;\n for (; i < this.expression.length; i++) {\n var c = this.expression.charAt(i);\n if (c.toUpperCase() === c.toLowerCase()) {\n if (i === this.pos || (c !== '_' && (c < '0' || c > '9'))) {\n break;\n }\n }\n }\n if (i > startPos) {\n var str = this.expression.substring(startPos, i);\n if (str in this.binaryOps || str in this.unaryOps || str in this.ternaryOps) {\n this.current = this.newToken(TOP, str);\n this.pos += str.length;\n this.column += str.length;\n return true;\n }\n }\n return false;\n};\n\nTokenStream.prototype.isName = function () {\n var startPos = this.pos;\n var i = startPos;\n for (; i < this.expression.length; i++) {\n var c = this.expression.charAt(i);\n if (c.toUpperCase() === c.toLowerCase()) {\n if (i === this.pos || (c !== '_' && (c < '0' || c > '9'))) {\n break;\n }\n }\n }\n if (i > startPos) {\n var str = this.expression.substring(startPos, i);\n this.current = this.newToken(TNAME, str);\n this.pos += str.length;\n this.column += str.length;\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isWhitespace = function () {\n var r = false;\n var char = this.expression.charAt(this.pos);\n while (char === ' ' || char === '\\t' || char === '\\n' || char === '\\r') {\n r = true;\n this.pos++;\n this.column++;\n if (char === '\\n') {\n this.line++;\n this.column = 0;\n }\n if (this.pos >= this.expression.length) {\n break;\n }\n char = this.expression.charAt(this.pos);\n }\n return r;\n};\n\nvar codePointPattern = /^[0-9a-f]{4}$/i;\n\nTokenStream.prototype.unescape = function (v) {\n var index = v.indexOf('\\\\');\n if (index < 0) {\n return v;\n }\n\n var buffer = v.substring(0, index);\n while (index >= 0) {\n var c = v.charAt(++index);\n switch (c) {\n case '\\'':\n buffer += '\\'';\n break;\n case '\"':\n buffer += '\"';\n break;\n case '\\\\':\n buffer += '\\\\';\n break;\n case '/':\n buffer += '/';\n break;\n case 'b':\n buffer += '\\b';\n break;\n case 'f':\n buffer += '\\f';\n break;\n case 'n':\n buffer += '\\n';\n break;\n case 'r':\n buffer += '\\r';\n break;\n case 't':\n buffer += '\\t';\n break;\n case 'u':\n // interpret the following 4 characters as the hex of the unicode code point\n var codePoint = v.substring(index + 1, index + 5);\n if (!codePointPattern.test(codePoint)) {\n this.parseError('Illegal escape sequence: \\\\u' + codePoint);\n }\n buffer += String.fromCharCode(parseInt(codePoint, 16));\n index += 4;\n break;\n default:\n throw this.parseError('Illegal escape sequence: \"\\\\' + c + '\"');\n }\n ++index;\n var backslash = v.indexOf('\\\\', index);\n buffer += v.substring(index, backslash < 0 ? v.length : backslash);\n index = backslash;\n }\n\n return buffer;\n};\n\nTokenStream.prototype.isComment = function () {\n var char = this.expression.charAt(this.pos);\n if (char === '/' && this.expression.charAt(this.pos + 1) === '*') {\n var startPos = this.pos;\n this.pos = this.expression.indexOf('*/', this.pos) + 2;\n if (this.pos === 1) {\n this.pos = this.expression.length;\n }\n var comment = this.expression.substring(startPos, this.pos);\n var newline = comment.indexOf('\\n');\n while (newline >= 0) {\n this.line++;\n this.column = comment.length - newline;\n newline = comment.indexOf('\\n', newline + 1);\n }\n return true;\n }\n return false;\n};\n\nTokenStream.prototype.isNumber = function () {\n var valid = false;\n var pos = this.pos;\n var startPos = pos;\n var resetPos = pos;\n var column = this.column;\n var resetColumn = column;\n var foundDot = false;\n var foundDigits = false;\n var char;\n\n while (pos < this.expression.length) {\n char = this.expression.charAt(pos);\n if ((char >= '0' && char <= '9') || (!foundDot && char === '.')) {\n if (char === '.') {\n foundDot = true;\n } else {\n foundDigits = true;\n }\n pos++;\n column++;\n valid = foundDigits;\n } else {\n break;\n }\n }\n\n if (valid) {\n resetPos = pos;\n resetColumn = column;\n }\n\n if (char === 'e' || char === 'E') {\n pos++;\n column++;\n var acceptSign = true;\n var validExponent = false;\n while (pos < this.expression.length) {\n char = this.expression.charAt(pos);\n if (acceptSign && (char === '+' || char === '-')) {\n acceptSign = false;\n } else if (char >= '0' && char <= '9') {\n validExponent = true;\n acceptSign = false;\n } else {\n break;\n }\n pos++;\n column++;\n }\n\n if (!validExponent) {\n pos = resetPos;\n column = resetColumn;\n }\n }\n\n if (valid) {\n this.current = this.newToken(TNUMBER, parseFloat(this.expression.substring(startPos, pos)));\n this.pos = pos;\n this.column = column;\n } else {\n this.pos = resetPos;\n this.column = resetColumn;\n }\n return valid;\n};\n\nTokenStream.prototype.isOperator = function () {\n var char = this.expression.charAt(this.pos);\n\n if (char === '+' || char === '-' || char === '*' || char === '/' || char === '%' || char === '^' || char === '?' || char === ':' || char === '.') {\n this.current = this.newToken(TOP, char);\n } else if (char === '∙' || char === '•') {\n this.current = this.newToken(TOP, '*');\n } else if (char === '>') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '>=');\n this.pos++;\n this.column++;\n } else {\n this.current = this.newToken(TOP, '>');\n }\n } else if (char === '<') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '<=');\n this.pos++;\n this.column++;\n } else {\n this.current = this.newToken(TOP, '<');\n }\n } else if (char === '|') {\n if (this.expression.charAt(this.pos + 1) === '|') {\n this.current = this.newToken(TOP, '||');\n this.pos++;\n this.column++;\n } else {\n return false;\n }\n } else if (char === '=') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '==');\n this.pos++;\n this.column++;\n } else {\n return false;\n }\n } else if (char === '!') {\n if (this.expression.charAt(this.pos + 1) === '=') {\n this.current = this.newToken(TOP, '!=');\n this.pos++;\n this.column++;\n } else {\n this.current = this.newToken(TOP, char);\n }\n } else {\n return false;\n }\n this.pos++;\n this.column++;\n return true;\n};\n\nTokenStream.prototype.parseError = function (msg) {\n throw new Error('parse error [' + (this.line + 1) + ':' + (this.column + 1) + ']: ' + msg);\n};\n\nfunction unaryInstruction(value) {\n return new Instruction(IOP1, value);\n}\n\nfunction binaryInstruction(value) {\n return new Instruction(IOP2, value);\n}\n\nfunction ternaryInstruction(value) {\n return new Instruction(IOP3, value);\n}\n\nfunction ParserState(parser, tokenStream) {\n this.parser = parser;\n this.tokens = tokenStream;\n this.current = null;\n this.nextToken = null;\n this.next();\n this.savedCurrent = null;\n this.savedNextToken = null;\n}\n\nParserState.prototype.next = function () {\n this.current = this.nextToken;\n return (this.nextToken = this.tokens.next());\n};\n\nParserState.prototype.tokenMatches = function (token, value) {\n if (typeof value === 'undefined') {\n return true;\n } else if (Array.isArray(value)) {\n return indexOf(value, token.value) >= 0;\n } else if (typeof value === 'function') {\n return value(token);\n } else {\n return token.value === value;\n }\n};\n\nParserState.prototype.save = function () {\n this.savedCurrent = this.current;\n this.savedNextToken = this.nextToken;\n this.tokens.save();\n};\n\nParserState.prototype.restore = function () {\n this.tokens.restore();\n this.current = this.savedCurrent;\n this.nextToken = this.savedNextToken;\n};\n\nParserState.prototype.accept = function (type, value) {\n if (this.nextToken.type === type && this.tokenMatches(this.nextToken, value)) {\n this.next();\n return true;\n }\n return false;\n};\n\nParserState.prototype.expect = function (type, value) {\n if (!this.accept(type, value)) {\n throw new Error('parse error [' + this.tokens.line + ':' + this.tokens.column + ']: Expected ' + (value || type));\n }\n};\n\nParserState.prototype.parseAtom = function (instr) {\n if (this.accept(TNAME)) {\n instr.push(new Instruction(IVAR, this.current.value));\n } else if (this.accept(TNUMBER)) {\n instr.push(new Instruction(INUMBER, this.current.value));\n } else if (this.accept(TSTRING)) {\n instr.push(new Instruction(INUMBER, this.current.value));\n } else if (this.accept(TPAREN, '(')) {\n this.parseExpression(instr);\n this.expect(TPAREN, ')');\n } else {\n throw new Error('unexpected ' + this.nextToken);\n }\n};\n\nParserState.prototype.parseExpression = function (instr) {\n this.parseConditionalExpression(instr);\n};\n\nParserState.prototype.parseConditionalExpression = function (instr) {\n this.parseOrExpression(instr);\n while (this.accept(TOP, '?')) {\n var trueBranch = [];\n var falseBranch = [];\n this.parseConditionalExpression(trueBranch);\n this.expect(TOP, ':');\n this.parseConditionalExpression(falseBranch);\n instr.push(new Instruction(IEXPR, trueBranch));\n instr.push(new Instruction(IEXPR, falseBranch));\n instr.push(ternaryInstruction('?'));\n }\n};\n\nParserState.prototype.parseOrExpression = function (instr) {\n this.parseAndExpression(instr);\n while (this.accept(TOP, 'or')) {\n this.parseAndExpression(instr);\n instr.push(binaryInstruction('or'));\n }\n};\n\nParserState.prototype.parseAndExpression = function (instr) {\n this.parseComparison(instr);\n while (this.accept(TOP, 'and')) {\n this.parseComparison(instr);\n instr.push(binaryInstruction('and'));\n }\n};\n\nParserState.prototype.parseComparison = function (instr) {\n this.parseAddSub(instr);\n while (this.accept(TOP, ['==', '!=', '<', '<=', '>=', '>'])) {\n var op = this.current;\n this.parseAddSub(instr);\n instr.push(binaryInstruction(op.value));\n }\n};\n\nParserState.prototype.parseAddSub = function (instr) {\n this.parseTerm(instr);\n while (this.accept(TOP, ['+', '-', '||'])) {\n var op = this.current;\n this.parseTerm(instr);\n instr.push(binaryInstruction(op.value));\n }\n};\n\nParserState.prototype.parseTerm = function (instr) {\n this.parseFactor(instr);\n while (this.accept(TOP, ['*', '/', '%'])) {\n var op = this.current;\n this.parseFactor(instr);\n instr.push(binaryInstruction(op.value));\n }\n};\n\nParserState.prototype.parseFactor = function (instr) {\n var unaryOps = this.tokens.unaryOps;\n function isPrefixOperator(token) {\n return token.value in unaryOps;\n }\n\n this.save();\n if (this.accept(TOP, isPrefixOperator)) {\n if ((this.current.value !== '-' && this.current.value !== '+' && this.nextToken.type === TPAREN && this.nextToken.value === '(')) {\n this.restore();\n this.parseExponential(instr);\n } else {\n var op = this.current;\n this.parseFactor(instr);\n instr.push(unaryInstruction(op.value));\n }\n } else {\n this.parseExponential(instr);\n }\n};\n\nParserState.prototype.parseExponential = function (instr) {\n this.parsePostfixExpression(instr);\n while (this.accept(TOP, '^')) {\n this.parseFactor(instr);\n instr.push(binaryInstruction('^'));\n }\n};\n\nParserState.prototype.parsePostfixExpression = function (instr) {\n this.parseFunctionCall(instr);\n while (this.accept(TOP, '!')) {\n instr.push(unaryInstruction('!'));\n }\n};\n\nParserState.prototype.parseFunctionCall = function (instr) {\n var unaryOps = this.tokens.unaryOps;\n function isPrefixOperator(token) {\n return token.value in unaryOps;\n }\n\n if (this.accept(TOP, isPrefixOperator)) {\n var op = this.current;\n this.parseAtom(instr);\n instr.push(unaryInstruction(op.value));\n } else {\n this.parseMemberExpression(instr);\n while (this.accept(TPAREN, '(')) {\n if (this.accept(TPAREN, ')')) {\n instr.push(new Instruction(IFUNCALL, 0));\n } else {\n var argCount = this.parseArgumentList(instr);\n instr.push(new Instruction(IFUNCALL, argCount));\n }\n }\n }\n};\n\nParserState.prototype.parseArgumentList = function (instr) {\n var argCount = 0;\n\n while (!this.accept(TPAREN, ')')) {\n this.parseExpression(instr);\n ++argCount;\n while (this.accept(TCOMMA)) {\n this.parseExpression(instr);\n ++argCount;\n }\n }\n\n return argCount;\n};\n\nParserState.prototype.parseMemberExpression = function (instr) {\n this.parseAtom(instr);\n while (this.accept(TOP, '.')) {\n this.expect(TNAME);\n instr.push(new Instruction(IMEMBER, this.current.value));\n }\n};\n\nfunction Parser() {\n this.unaryOps = {\n sin: Math.sin,\n cos: Math.cos,\n tan: Math.tan,\n asin: Math.asin,\n acos: Math.acos,\n atan: Math.atan,\n sinh: Math.sinh || sinh,\n cosh: Math.cosh || cosh,\n tanh: Math.tanh || tanh,\n asinh: Math.asinh || asinh,\n acosh: Math.acosh || acosh,\n atanh: Math.atanh || atanh,\n sqrt: Math.sqrt,\n log: Math.log,\n ln: Math.log,\n lg: Math.log10 || log10,\n log10: Math.log10 || log10,\n abs: Math.abs,\n ceil: Math.ceil,\n floor: Math.floor,\n round: Math.round,\n trunc: Math.trunc || trunc,\n '-': neg,\n '+': Number,\n exp: Math.exp,\n not: not,\n length: stringLength,\n '!': factorial\n };\n\n this.binaryOps = {\n '+': add,\n '-': sub,\n '*': mul,\n '/': div,\n '%': mod,\n '^': Math.pow,\n '||': concat,\n '==': equal,\n '!=': notEqual,\n '>': greaterThan,\n '<': lessThan,\n '>=': greaterThanEqual,\n '<=': lessThanEqual,\n and: andOperator,\n or: orOperator\n };\n\n this.ternaryOps = {\n '?': condition\n };\n\n this.functions = {\n random: random,\n fac: factorial,\n min: Math.min,\n max: Math.max,\n hypot: Math.hypot || hypot,\n pyt: Math.hypot || hypot, // backward compat\n pow: Math.pow,\n atan2: Math.atan2,\n 'if': condition,\n gamma: gamma\n };\n\n this.consts = {\n E: Math.E,\n PI: Math.PI,\n 'true': true,\n 'false': false\n };\n}\n\nParser.parse = function (expr) {\n return new Parser().parse(expr);\n};\n\nParser.evaluate = function (expr, variables) {\n return Parser.parse(expr).evaluate(variables);\n};\n\nParser.prototype = {\n parse: function (expr) {\n var instr = [];\n var parserState = new ParserState(this, new TokenStream(expr, this.unaryOps, this.binaryOps, this.ternaryOps, this.consts));\n parserState.parseExpression(instr);\n parserState.expect(TEOF, 'EOF');\n\n return new Expression(instr, this);\n },\n\n evaluate: function (expr, variables) {\n return this.parse(expr).evaluate(variables);\n }\n};\n\nvar parser = {\n Parser: Parser,\n Expression: Expression\n};\n\nreturn parser;\n\n})));\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport Parser from 'expr-eval'\nimport { Image2D } from '../core/Image2D.js';\nimport { ImageToImageFilter } from '../core/ImageToImageFilter.js';\n\n\n/**\n* An instance of ImageBlendExpressionFilter takes Image2D inputs, as many as\n* we need as long as they have the same size and the same number of components\n* per pixel.\n* This filter blends images pixel values using a literal expression. This expression\n* should be set using `setMetadata( \"expresssion\", \"A * B\" )` , where `A` and `B`\n* are the categories set in input.\n*\n* Using a blending expression is the aesiest way to test a blending but it is a\n* pretty slow process since the expresion has to be evaluated for every process.\n* To speed-up your process, it is recomended to develop a new filter that does\n* exactly (and only) the blending method you want.\n*\n* **usage** \n* - [examples/imageBlending.html](../examples/imageBlending.html)\n* - [examples/imageBlending2.html](../examples/imageBlending2.html)\n* - [examples/forEachPixelGradientBlend.html](../examples/forEachPixelGradientBlend.html)\n*\n*/\nclass ImageBlendExpressionFilter extends ImageToImageFilter {\n\n constructor(){\n super();\n }\n\n\n\n _run(){\n\n // the metadata was not set\n if(!this.hasMetadata(\"expression\")){\n console.warn(\"A filter of type ImageBlendExpressionFilter requires a blending expression.\\nUse 'setMetadata(\\\"expression\\\", \\\"...\\\")' to set it.\" );\n return;\n }\n\n if( !this.hasSameNcppInput() || !this.hasSameSizeInput() ){\n return;\n }\n\n if(!this.getNumberOfInputs()){\n console.warn(\"A filter of type ImageBlendExpressionFilter requires at least one input.\");\n return;\n }\n\n var inputCategories = this.getInputCategories();\n var firstInput = this._getInput( inputCategories[0] );\n var outputBuffer = firstInput.getDataCopy();\n var parser = new Parser.Parser();\n var expr = parser.parse( this.getMetadata(\"expression\") );\n\n for(var i=0; i N\n [-1 , 0], // [1] => W\n [ 0 , 1], // [2] => S\n [ 1 , 0] // [3] => E\n ];\n \n this._directionListConnexity8 = [\n [ 0 , -1], // [0] => N\n [-1 , -1], // [1] => NW\n [-1 , 0], // [2] => W\n [-1 , 1], // [3] => SW\n [ 0 , 1], // [4] => S\n [ 1 , 1], // [5] => SE\n [ 1 , 0], // [6] => E\n [ 1 , -1] // [7] => NE\n ];\n \n }\n \n \n _run(){\n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type AngleToHueWheelHelper requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput(0);\n var ncpp = imageIn.getNcpp();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var directionList = null\n \n if( this.getMetadata(\"connexity\") == 8){\n directionList = this._directionListConnexity8;\n }else{\n directionList = this._directionListConnexity4;\n }\n \n // handy color comparison\n function isSameColor(c1, c2){\n if(c1.length != c2.length)\n return false;\n \n for(var i=0; i=width || newSeed[1]>= height){\n console.warn(\"The seed is out of image range.\");\n return;\n }\n \n var clusterColor = imageIn.getPixel( {x: newSeed[0], y: newSeed[1]} );\n var newColor = clusterColor;\n var atNorth = newSeed.slice();\n \n \n var canStartFromOriginalSeed = false;\n \n \n // test the local surrounding and avoid going North\n for(var i=0; i= width || potentialPosition[1] >= height)\n {\n return 2;\n }\n \n var potentialPositionColor = imageIn.getPixel( {x: potentialPosition[0], y: potentialPosition[1]} );\n \n // test if the new direction goes with the same color\n if( isSameColor(potentialPositionColor, clusterColor) ){\n \n if( potentialPosition[0]==listOfValidPoints[0] && // the point just found is the\n potentialPosition[1]==listOfValidPoints[1] ) // same as the very first\n {\n return 0; // break the loop\n }else{\n // we validate the point and keep moving\n movingPoint[0] = potentialPosition[0];\n movingPoint[1] = potentialPosition[1];\n listOfValidPoints.push( movingPoint[0] );\n listOfValidPoints.push( movingPoint[1] );\n }\n return 1; // continue the loop\n }\n return 2; // try directions\n }\n \n \n // start the real navigation, starting from movingPoint\n main_loop:\n while( true ){\n \n // go the previous direction on the list\n direction -= directionIncrement;\n if(direction<0)\n direction = directionList.length - directionIncrement;\n \n var score = tryPotientialPosition();\n \n if( score == 0){ \n break main_loop;\n }else if(score == 1){\n continue;\n }else{ // score == 2\n \n var nbTrials = 0;\n \n // we try the other directions\n direction_loop:\n for(var i=0; i N\n [-1 , 0], // [1] => W\n [ 0 , 1], // [2] => S\n [ 1 , 0] // [3] => E\n ];\n \n this._directionListConnexity8 = [\n [ 0 , -1], // [0] => N\n [-1 , -1], // [1] => NW\n [-1 , 0], // [2] => W\n [-1 , 1], // [3] => SW\n [ 0 , 1], // [4] => S\n [ 1 , 1], // [5] => SE\n [ 1 , 0], // [6] => E\n [ 1 , -1] // [7] => NE\n ];\n \n }\n \n \n _run(){\n \n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type FloodFillImageFilter requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput(0);\n var ncpp = imageIn.getNcpp();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var directionList = null;\n \n if( this.getMetadata(\"connexity\") == 8){\n directionList = this._directionListConnexity8;\n }else{\n directionList = this._directionListConnexity4;\n }\n \n var replacementColor = new Array(ncpp); // red\n replacementColor[0] = 255;\n \n var paintColor = this.getMetadata(\"color\") || replacementColor;\n \n // checking color validity\n if(paintColor.length != ncpp){\n if(!(paintColor.length == 3 && ncpp ==4)){\n console.warn(\"The color to fill must have the same number of components as the input image. (RGB color for RGBA image is accepted)\");\n return;\n }\n }\n \n \n \n // to mark the place we've been in the filling\n var markerImage = new Image2D({width: width, height: height, color: [0]});\n var seed = this.getMetadata(\"seed\");\n var seedColor = imageIn.getPixel({x: seed[0], y: seed[1]});\n var tolerance = this.getMetadata(\"tolerance\");\n var onlyHits = this.getMetadata(\"onlyHits\");\n \n var imageOut = null;\n if(!onlyHits){\n imageOut = imageIn.clone();\n }\n \n \n // the points in this list are points at the edge, except the edge of the image\n var edgePointList = [];\n \n var pixelStack = [];\n pixelStack.push( seed );\n \n while(pixelStack.length > 0){\n \n var currentPixel = pixelStack.pop();\n var x = currentPixel[0];\n var y = currentPixel[1];\n \n if(x<0 || x>=width || y<0 || y>=height){\n continue;\n }\n \n // if the image was not filled here...\n if(markerImage.getPixel({x: x, y: y})[0] == 0){\n \n // mark as visited\n markerImage.setPixel({x: x, y: y}, [1]);\n \n // paint the image\n if(!onlyHits){\n imageOut.setPixel({x: x, y: y}, paintColor);\n }\n \n // check neighbours upon connexity degree\n var potentialPosition = [0, 0];\n var isOnEdge = false;\n \n for(var i=0; i=width || \n potentialPosition[1]<0 || potentialPosition[1]>=height ) \n { \n continue;\n }\n \n var targetColor = imageIn.getPixel({x:potentialPosition[0], y: potentialPosition[1] });\n \n var isWithinTolerance = true;\n for(var c=0; c tolerance ){\n isWithinTolerance = false;\n isOnEdge = true;\n break;\n }\n } /* END for loop color channels */\n \n if(isWithinTolerance ){\n var newCandidate = [potentialPosition[0], potentialPosition[1]];\n pixelStack.push( newCandidate );\n }\n \n } /* END for loop direction*/\n \n if(isOnEdge){\n if(x!=0 && x!=(width-1) && y!=0 && y!=(height-1)){ // we dont want the edge of the image\n edgePointList.push( currentPixel );\n }\n }\n \n } /* END if image was not filled at this position */\n \n } /* END while loop unstacking the points */\n \n if(!onlyHits){\n this._output[0] = imageOut;\n }\n \n this._output[\"edgePoints\"] = edgePointList;\n \n } /* END of _run() */\n \n \n \n \n} /* END of class FloodFillImageFilter */\n\nexport { FloodFillImageFilter }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { LineString } from '../core/LineString.js';\nimport { ContourImage2DFilter } from './ContourImage2DFilter.js';\nimport { FloodFillImageFilter } from './FloodFillImageFilter.js';\n\n\n/**\n*\n*/\nclass ContourHolesImage2DFilter extends Filter {\n \n constructor() {\n super();\n this.addInputValidator(0, Image2D);\n this.setMetadata(\"connexity\", 4);\n this.setMetadata(\"seed\", [0, 0]);\n }\n \n \n _run(){\n \n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type ContourHolesImage2DFilter requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput(0);\n var ncpp = imageIn.getNcpp();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var directionList = null;\n var contours = [];\n \n var connexity = this.getMetadata(\"connexity\");\n var seed = this.getMetadata(\"seed\");\n \n // finding the 1st contour\n var contourDetector = new ContourImage2DFilter();\n contourDetector.addInput( imageIn );\n contourDetector.setMetadata(\"connexity\", connexity);\n contourDetector.setMetadata(\"seed\", seed);\n contourDetector.update();\n \n contours.push( contourDetector.getOutput() );\n \n // From the same seed, flood fill - we dont care about the filled image, but\n // we want the hit points from it\n var filler = new FloodFillImageFilter();\n filler.addInput( imageIn );\n filler.setMetadata('onlyHits', false); // if we are not interested in the image but just want the hit points, this must be true.\n filler.setMetadata(\"connexity\", 4); // could be 4\n filler.setMetadata(\"tolerance\", 0); // in pixel value, applied to each component\n filler.setMetadata(\"seed\", seed);\n filler.update();\n \n var fillingEdgePoints = filler.getOutput(\"edgePoints\");\n \n var flyContourDetector = new ContourImage2DFilter(); // will be reused several times\n flyContourDetector.setMetadata(\"time\", false); // prevent every little contour finding to print their time\n \n // for each point found while filling, we check if already in one of the contours.\n // if not already, we launch a new contour extraction from this point (as a seed)\n // and add a new contour.\n for(var i=0; i xmax) xmax = vertices[i][0];\n if(vertices[i][1] < ymin) ymin = vertices[i][1];\n if(vertices[i][1] > ymax) ymax = vertices[i][1];\n }\n\n dx = xmax - xmin;\n dy = ymax - ymin;\n dmax = Math.max(dx, dy);\n xmid = xmin + dx * 0.5;\n ymid = ymin + dy * 0.5;\n\n return [\n [xmid - 20 * dmax, ymid - dmax],\n [xmid , ymid + 20 * dmax],\n [xmid + 20 * dmax, ymid - dmax]\n ];\n }\n\n function circumcircle(vertices, i, j, k) {\n var x1 = vertices[i][0],\n y1 = vertices[i][1],\n x2 = vertices[j][0],\n y2 = vertices[j][1],\n x3 = vertices[k][0],\n y3 = vertices[k][1],\n fabsy1y2 = Math.abs(y1 - y2),\n fabsy2y3 = Math.abs(y2 - y3),\n xc, yc, m1, m2, mx1, mx2, my1, my2, dx, dy;\n\n /* Check for coincident points */\n if(fabsy1y2 < EPSILON && fabsy2y3 < EPSILON)\n throw new Error(\"Eek! Coincident points!\");\n\n if(fabsy1y2 < EPSILON) {\n m2 = -((x3 - x2) / (y3 - y2));\n mx2 = (x2 + x3) / 2.0;\n my2 = (y2 + y3) / 2.0;\n xc = (x2 + x1) / 2.0;\n yc = m2 * (xc - mx2) + my2;\n }\n\n else if(fabsy2y3 < EPSILON) {\n m1 = -((x2 - x1) / (y2 - y1));\n mx1 = (x1 + x2) / 2.0;\n my1 = (y1 + y2) / 2.0;\n xc = (x3 + x2) / 2.0;\n yc = m1 * (xc - mx1) + my1;\n }\n\n else {\n m1 = -((x2 - x1) / (y2 - y1));\n m2 = -((x3 - x2) / (y3 - y2));\n mx1 = (x1 + x2) / 2.0;\n mx2 = (x2 + x3) / 2.0;\n my1 = (y1 + y2) / 2.0;\n my2 = (y2 + y3) / 2.0;\n xc = (m1 * mx1 - m2 * mx2 + my2 - my1) / (m1 - m2);\n yc = (fabsy1y2 > fabsy2y3) ?\n m1 * (xc - mx1) + my1 :\n m2 * (xc - mx2) + my2;\n }\n\n dx = x2 - xc;\n dy = y2 - yc;\n return {i: i, j: j, k: k, x: xc, y: yc, r: dx * dx + dy * dy};\n }\n\n function dedup(edges) {\n var i, j, a, b, m, n;\n\n for(j = edges.length; j; ) {\n b = edges[--j];\n a = edges[--j];\n\n for(i = j; i; ) {\n n = edges[--i];\n m = edges[--i];\n\n if((a === m && b === n) || (a === n && b === m)) {\n edges.splice(j, 2);\n edges.splice(i, 2);\n break;\n }\n }\n }\n }\n\n Delaunay = {\n triangulate: function(vertices, key) {\n var n = vertices.length,\n i, j, indices, st, open, closed, edges, dx, dy, a, b, c;\n\n /* Bail if there aren't enough vertices to form any triangles. */\n if(n < 3)\n return [];\n\n /* Slice out the actual vertices from the passed objects. (Duplicate the\n * array even if we don't, though, since we need to make a supertriangle\n * later on!) */\n vertices = vertices.slice(0);\n\n if(key)\n for(i = n; i--; )\n vertices[i] = vertices[i][key];\n\n /* Make an array of indices into the vertex array, sorted by the\n * vertices' x-position. Force stable sorting by comparing indices if\n * the x-positions are equal. */\n indices = new Array(n);\n\n for(i = n; i--; )\n indices[i] = i;\n\n indices.sort(function(i, j) {\n var diff = vertices[j][0] - vertices[i][0];\n return diff !== 0 ? diff : i - j;\n });\n\n /* Next, find the vertices of the supertriangle (which contains all other\n * triangles), and append them onto the end of a (copy of) the vertex\n * array. */\n st = supertriangle(vertices);\n vertices.push(st[0], st[1], st[2]);\n \n /* Initialize the open list (containing the supertriangle and nothing\n * else) and the closed list (which is empty since we havn't processed\n * any triangles yet). */\n open = [circumcircle(vertices, n + 0, n + 1, n + 2)];\n closed = [];\n edges = [];\n\n /* Incrementally add each vertex to the mesh. */\n for(i = indices.length; i--; edges.length = 0) {\n c = indices[i];\n\n /* For each open triangle, check to see if the current point is\n * inside it's circumcircle. If it is, remove the triangle and add\n * it's edges to an edge list. */\n for(j = open.length; j--; ) {\n /* If this point is to the right of this triangle's circumcircle,\n * then this triangle should never get checked again. Remove it\n * from the open list, add it to the closed list, and skip. */\n dx = vertices[c][0] - open[j].x;\n if(dx > 0.0 && dx * dx > open[j].r) {\n closed.push(open[j]);\n open.splice(j, 1);\n continue;\n }\n\n /* If we're outside the circumcircle, skip this triangle. */\n dy = vertices[c][1] - open[j].y;\n if(dx * dx + dy * dy - open[j].r > EPSILON)\n continue;\n\n /* Remove the triangle and add it's edges to the edge list. */\n edges.push(\n open[j].i, open[j].j,\n open[j].j, open[j].k,\n open[j].k, open[j].i\n );\n open.splice(j, 1);\n }\n\n /* Remove any doubled edges. */\n dedup(edges);\n\n /* Add a new triangle for each edge. */\n for(j = edges.length; j; ) {\n b = edges[--j];\n a = edges[--j];\n open.push(circumcircle(vertices, a, b, c));\n }\n }\n\n /* Copy any remaining open triangles to the closed list, and then\n * remove any triangles that share a vertex with the supertriangle,\n * building a list of triplets that represent triangles. */\n for(i = open.length; i--; )\n closed.push(open[i]);\n open.length = 0;\n\n for(i = closed.length; i--; )\n if(closed[i].i < n && closed[i].j < n && closed[i].k < n)\n open.push(closed[i].i, closed[i].j, closed[i].k);\n\n /* Yay, we're done! */\n return open;\n },\n contains: function(tri, p) {\n /* Bounding box test first, for quick rejections. */\n if((p[0] < tri[0][0] && p[0] < tri[1][0] && p[0] < tri[2][0]) ||\n (p[0] > tri[0][0] && p[0] > tri[1][0] && p[0] > tri[2][0]) ||\n (p[1] < tri[0][1] && p[1] < tri[1][1] && p[1] < tri[2][1]) ||\n (p[1] > tri[0][1] && p[1] > tri[1][1] && p[1] > tri[2][1]))\n return null;\n\n var a = tri[1][0] - tri[0][0],\n b = tri[2][0] - tri[0][0],\n c = tri[1][1] - tri[0][1],\n d = tri[2][1] - tri[0][1],\n i = a * d - b * c;\n\n /* Degenerate tri. */\n if(i === 0.0)\n return null;\n\n var u = (d * (p[0] - tri[0][0]) - b * (p[1] - tri[0][1])) / i,\n v = (a * (p[1] - tri[0][1]) - c * (p[0] - tri[0][0])) / i;\n\n /* If we're outside the tri, fail. */\n if(u < 0.0 || v < 0.0 || (u + v) > 1.0)\n return null;\n\n return [u, v];\n }\n };\n\n if(typeof module !== \"undefined\")\n module.exports = Delaunay;\n})();\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n*\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\n\nimport Delaunay from 'delaunay-fast';\n\n/**\n* An instance of TriangulationSparseInterpolationImageFilter performs a triangulation\n* of an original dataset followed by a barycentric 2D interpolation. It is used to\n* perform a 2D linear interpolation of a sparse dataset.\n* The original dataset is specified using the method `.addInput( points )`, where\n* `points` is an `Array` of `{x: Number, y: Number, value: Number}`.\n* The triangulation is the result of a Delaunay triangulation.\n* This filter outputs an `Image2D` with interpolated values only within the boundaries\n* of the convex hull created by the triangulation. The size of the output must be\n* specified using the method `.setMetadata( \"outputSize\", {width: Number, height: Number})`.\n*\n* Note 1: at least 3 unaligned points are required to perform a triangulation\n* Note 2: points can be outside the boundaries of the original image\n* Note 3: interpolated values are floating point\n*\n* Note that only single-component images are outputed from this filter.\n* \n* **Usage**\n* - [examples/TriangleSparseInterpolation.html](../examples/TriangleSparseInterpolation.html)\n*/ \n\nclass TriangulationSparseInterpolationImageFilter extends Filter {\n \n constructor(){\n super()\n this.setMetadata( \"outputSize\", {width: 0, height: 0})\n }\n \n _run(){\n \n var origPoints = null;\n \n // getting the input\n if( \"0\" in this._input ){\n origPoints = this._input[ 0 ];\n }else{\n console.warn(\"No input point set were given.\");\n return;\n }\n \n var outputSize = this.getMetadata( \"outputSize\" );\n \n // checking output size\n if( outputSize.width == 0 || outputSize.height == 0 ){\n console.warn(\"The output size cannot be 0.\");\n return;\n }\n \n // remapping the point as an array of ArrayBuffer\n var points = origPoints.map( function(p){\n return [p.x, p.y];\n })\n \n // computing the list of triangles\n var triangleVertices = Delaunay.triangulate( points );\n\n // rearranging the triangles in a propper array that group by 3 the index of vertices used\n var triangles = [];\n for(var i=0; i<=triangleVertices.length-3; i+=3){\n triangles.push( [\n triangleVertices[i],\n triangleVertices[i+1],\n triangleVertices[i+2],\n ] );\n }\n\n console.log( points );\n console.log( triangles );\n \n // return the area of a triangle using Heron's formula\n // Each point A, B and C is a couple of 2D coords like [Number, Number] \n function getTriangleArea(A, B, C){\n // manhattan distances\n var _AB = [ A[0] - B[0], A[1] - B[1]];\n var _BC = [ B[0] - C[0], B[1] - C[1]];\n var _CA = [ C[0] - A[0], C[1] - A[1]];\n \n // Euclidian distances - Pythagore\n var a = Math.sqrt( _BC[0]*_BC[0] + _BC[1]*_BC[1] );\n var b = Math.sqrt( _CA[0]*_CA[0] + _CA[1]*_CA[1] );\n var c = Math.sqrt( _AB[0]*_AB[0] + _AB[1]*_AB[1] );\n \n // semiperimeter\n var s = (a + b + c) / 2;\n \n var area = Math.sqrt( s*(s-a)*(s-b)*(s-c) );\n return area;\n }\n \n // creating the output image\n var out = new pixpipe.Image2D({width: Math.round(outputSize.width), height: Math.round(outputSize.height), color: [0]})\n \n // each line of the output image...\n for(var i=0; i= ncpp ){\n console.warn(\"The component to filter must be valid.\");\n return;\n }\n \n var imageIn = this._getInput();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var ncpp = imageIn.getNcpp();\n \n var minAngle = this.getMetadata(\"minAngle\");\n var maxAngle = this.getMetadata(\"maxAngle\");\n \n if(minAngle === \"auto\" || maxAngle === \"auto\"){\n minAngle = imageIn.getMin();\n maxAngle = imageIn.getMax();\n }\n \n var imageOut = new Image2D( {width: width, height: height, color: [0, 0, 0, 255] } );\n var forEachPixelFilter = new pixpipe.ForEachPixelImageFilter();\n \n // add the input input\n forEachPixelFilter.addInput( imageOut );\n\n forEachPixelFilter.on( \"pixel\", function(position, color){\n var angle = imageIn.getPixel( position )[component];\n var angle360 = ( (angle - minAngle) / (maxAngle - minAngle) ) * 360;\n var colorRGB = that._hsl2Rgba( angle360, 100, 50 );\n return colorRGB;\n });\n \n // run the filter to create a gradient image\n forEachPixelFilter.update();\n \n if( forEachPixelFilter.getNumberOfOutputs() == 0 ){\n console.warn(\"No output of ForEachPixelImageFilter.\");\n return;\n }\n \n // mapping the output\n this._output[ 0 ] = forEachPixelFilter.getOutput();\n \n }\n \n \n /**\n * \n * A part of this code was borrowed from github.com/netbeast/colorsys and modified.\n */\n _hsl2Rgba( h, s=100, l=100 ){\n // pseudo constants\n var HUE_MAX = 360;\n var SV_MAX = 100;\n var RGB_MAX = 255;\n \n // ouputs\n var r, g, b\n\n h = (h === HUE_MAX) ? 1 : (h % HUE_MAX / HUE_MAX)\n s = (s === SV_MAX) ? 1 : (s % SV_MAX / SV_MAX)\n l = (l === SV_MAX) ? 1 : (l % SV_MAX / SV_MAX)\n\n if (s === 0) {\n r = g = b = l // achromatic\n } else {\n var hue2rgb = function hue2rgb (p, q, t) {\n if (t < 0) t += 1\n if (t > 1) t -= 1\n if (t < 1 / 6) return p + (q - p) * 6 * t\n if (t < 1 / 2) return q\n if (t < 2 / 3) return p + (q - p) * (2 / 3 - t) * 6\n return p\n }\n\n var q = l < 0.5 ? l * (1 + s) : l + s - l * s\n var p = 2 * l - q\n r = hue2rgb(p, q, h + 1 / 3)\n g = hue2rgb(p, q, h)\n b = hue2rgb(p, q, h - 1 / 3)\n }\n \n return [ Math.round(r * RGB_MAX), \n Math.round(g * RGB_MAX), \n Math.round(b * RGB_MAX),\n 255 ];\n }\n \n \n} /* END of class AngleToHueWheelFilter */\n\nexport { AngleToHueWheelHelper }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Image2D } from '../core/Image2D.js';\nimport { LineString } from '../core/LineString.js';\nimport { ImageToImageFilter } from '../core/ImageToImageFilter.js';\n\n\n/**\n* A instance of LineStringPrinterOnImage2DHelper prints a list of LineStrings on\n* an Image2D. To add the Image2D input, use `.addInput(myImage2D)`.\n* To add a LineString, use `.addLineString(ls, c );` where `ls` is a LineString \n* instance and `c` is an Array representing a color (i.e. [255, 0, 0] for red).\n*\n* **Usage**\n* - [examples/contourImage2D.html](../examples/contourImage2D.html)\n*\n*/\nclass LineStringPrinterOnImage2DHelper extends ImageToImageFilter {\n \n constructor() {\n super();\n this.addInputValidator(0, Image2D);\n this.setMetadata(\"lineStrings\", []);\n this.setMetadata(\"lineStringsColors\", []);\n }\n \n \n /**\n * Add a LineString instance to be printed on the image\n * @param {LineString} ls - a linestring to add\n * @param {Array} color - of for [R, G, B] or [R, G, B, A] \n */\n addLineString(ls, color){\n this._metadata.lineStrings.push( ls ) ;\n this._metadata.lineStringsColors.push( color ) ;\n }\n \n \n _run(){\n \n // the input checking\n if( ! this.hasValidInput()){\n console.warn(\"A filter of type LineStringPrinterOnImage2DHelper requires 1 input of category '0'.\");\n return;\n }\n \n var imageIn = this._getInput();\n var imageOut = imageIn.clone();\n var width = imageIn.getWidth();\n var height = imageIn.getHeight();\n var ncpp = imageIn.getNcpp();\n \n var printed = false;\n \n var lineStrings = this._metadata.lineStrings;\n var colors = this._metadata.lineStringsColors;\n \n for(var i=0; i1 ){\n console.warn(\"Each colormap segment 'index' property should be in [0, 1]\");\n return false;\n }\n }else{\n console.warn(\"Each colormap segment 'index' property should be a number.\");\n return false;\n }\n\n // the rgb property has to be an array\n if( Array.isArray( d[i].rgb ) ){\n if(d[i].rgb.length == 3){\n for(var j=0; j 255 ){\n console.warn(\"The colormap must have only values in [0, 255]\");\n return false;\n }\n }\n }else{\n console.warn(\"Each colormap segment 'rgb' should contain 3 values\");\n return false;\n }\n }\n }else{\n console.warn(\"Each colormap segment must have a 'index' property and a 'rgb' property.\");\n return false;\n }\n }else{\n console.warn(\"Each colormap segment must be a non-null object\");\n return false;\n }\n }\n return true;\n }\n\n\n /**\n * Get the color at the colormap position\n * @param {Number} position - position within the colormap in [0, 1]\n * @return {Array} color array as [r, g, b] , values being in [0, 255]\n */\n getValueAt( position ){\n if( !this._colormapDescription ){\n console.warn(\"The colormap description is not defined.\");\n return null;\n }\n\n // case 1: before the first \"index\" position\n if(position <= this._colormapDescription[0].index){\n return this._colormapDescription[0].rgb.slice()\n }\n\n // case 2: after the last \"index\" position\n if(position >= this._colormapDescription[this._colormapDescription.length - 1].index){\n return this._colormapDescription[this._colormapDescription.length - 1].rgb.slice()\n }\n\n // case 3: between 2 values of the descrition (most likely to happen)\n for(var i=0; i= this._colormapDescription[i].index && position < this._colormapDescription[i+1].index ){\n\n var unitDistanceToFirst = (position - this._colormapDescription[i].index) / (this._colormapDescription[i+1].index - this._colormapDescription[i].index);\n var unitDistanceToSecond = 1 - unitDistanceToFirst;\n\n var color = [\n Math.round(this._colormapDescription[i].rgb[0] * unitDistanceToSecond + this._colormapDescription[i+1].rgb[0] * unitDistanceToFirst), // R\n Math.round(this._colormapDescription[i].rgb[1] * unitDistanceToSecond + this._colormapDescription[i+1].rgb[1] * unitDistanceToFirst), // G\n Math.round(this._colormapDescription[i].rgb[2] * unitDistanceToSecond + this._colormapDescription[i+1].rgb[2] * unitDistanceToFirst), // B\n ]\n\n return color;\n }\n }\n }\n\n\n /**\n * Build a LUT from the colormap description\n * @param {Number} size - number of samples in the LUT\n */\n buildLut( size ){\n if( !this._colormapDescription ){\n console.warn(\"The colormap description is not defined, the LUT cannot be created\");\n return null;\n }\n\n if( size < 0 ){\n console.warn(\"Size of the colormap can not be negative.\");\n return;\n }\n\n this._LUT = new Array( size );\n\n for(var i=0; i this._LUT.length )\n return null;\n\n return this._LUT[ index ];\n }\n\n\n /**\n * Creates a horizontal Image2D of the colormap. The height is 1px and\n * the width is the size of the LUT currently in use.\n * @param {Boolean} flip - flips the colormap image\n * @return {Image2D} the result image\n */\n createHorizontalLutImage( flip=false ){\n if(! this._LUT ){\n console.warn(\"The LUT must be built before creating a LUT image.\");\n return;\n }\n\n var LutSize = this._LUT.length;\n var colorStrip = new Image2D({width: LutSize, height: 1, color: [0, 0, 0]});\n\n for(var i=0; i1){\n console.warn(\"The color cannot be added because its index is out of range [0, 1]\");\n return false;\n }\n\n // checking if a color is already present at the given index\n var indexAlreadyPresent = this._colormapDescription.find(function(indexAndColor){\n return indexAndColor.index == index;\n })\n\n if( indexAlreadyPresent ){\n console.warn(\"A color is already present at index \" + index);\n return false;\n }\n\n if( rgb && Array.isArray(rgb) && rgb.length == 3){\n for(var i=0; i 255){\n console.warn(\"The rgb colors must be in [0, 255]\");\n return false;\n }\n }\n }else{\n console.warn(\"The color cannot be added because its rgb array is the wrong size.\");\n return false;\n }\n\n // data integrity is ok\n this._colormapDescription.push({\"index\":index,\"rgb\":rgb.slice()})\n this._colormapDescription.sort(function(a, b) {\n return a.index - b.index;\n });\n\n return true;\n }\n\n\n /**\n * Remove the color at the given index\n * @param {Number} index - the [0, 1] index of the color to remove\n * @return {Boolean} true if successfully remove, false if not\n */\n removeColor( index ){\n if( !this._colormapDescription ){\n console.warn(\"The colormap description is empty.\");\n return false;\n }\n\n var indexAlreadyIn = this._colormapDescription.findIndex(function(element){\n return (element.index == index);\n })\n\n if( indexAlreadyIn == -1 ){\n console.warn(\"Such index does not exist.\");\n return false;\n }\n\n this._colormapDescription.splice(indexAlreadyIn, 1);\n return true;\n }\n\n\n /**\n * Get a json version of the colormap description\n * @return {String} the json string\n */\n toJson(){\n return JSON.stringify(this._colormapDescription);\n }\n\n\n} /* END of class Colormap */\n\nexport { Colormap }\n","/*\n* Author Jonathan Lurie - http://me.jonahanlurie.fr\n* License MIT\n* Link https://github.com/jonathanlurie/pixpipejs\n* Lab MCIN - Montreal Neurological Institute\n*/\n\n\nimport { Filter } from '../core/Filter.js';\nimport { Image2D } from '../core/Image2D.js';\nimport { Image3D } from '../core/Image3D.js';\n\n\n/**\n* An instance of Image3DToMosaicFilter takes an Image3D as Input and output a\n* mosaic composed of each slice. The axis: \"xspace\", \"yspace\" or \"zspace\" can be\n* specified with `setMetadata(\"axis\", \"xspace\")`, the default being xspace.\n* The default output image is 4096x4096 but these boundaries can be changed using\n* `setMetadata(\"maxWidth\", n)` and `setMetadata(\"maxHeight\", m)`.\n* These are boundaries so the size of the output image(s) will possibly be lower\n* to not contain unused space.\n* If mosaicing the whole given Image3D does not fit in maxWidth*maxHeight, more\n* Image2D will be created and accessible through `getOutput(n)`.\n* All output image have the same size so that the last one may have dead space.\n* To know precisely the size of the output mosaic use `getMetadata(\"gridWidth\")`\n* and `getMetadata(\"gridHeight\")`, this will give the number of slices used in\n* horizontal and vertical respectively.\n*\n* **Usage**\n* - [examples/niftiToMosaic.html](../examples/niftiToMosaic.html)\n*/\nclass Image3DToMosaicFilter extends Filter{\n\n constructor(){\n super();\n this.addInputValidator(0, Image3D);\n\n // default settings\n this.setMetadata(\"maxWidth\", 4096);\n this.setMetadata(\"maxHeight\", 4096);\n this.setMetadata(\"axis\", \"xspace\");\n this.setMetadata(\"time\", 0);\n }\n\n\n _run(){\n if(! this.hasValidInput() ){\n return;\n }\n\n var inputImage3D = this._getInput(0);\n var spaceInfo = inputImage3D.getMetadata( this.getMetadata(\"axis\") );\n\n if(!spaceInfo){\n console.warn(\"Sampling axis for mosaicing was not poperly set. Has to be 'xspace', 'yspace' or 'zspace'.\");\n return;\n }\n\n var numOfSlices = spaceInfo.space_length;\n var width = spaceInfo.width;\n var height = spaceInfo.height;\n \n // dealing with time series\n var startTime = 0;\n var endTime = 1;\n \n if( inputImage3D.hasMetadata(\"time\") ){\n var timeInfo = inputImage3D.getMetadata(\"time\");\n var timeLength = timeInfo.space_length;\n \n if(this._metadata.time == -1 ){\n startTime = 0;\n endTime = timeLength;\n }else if( this._metadata.time < timeLength){\n startTime = this._metadata.time;\n endTime = startTime + 1;\n }\n }\n \n var numberOfSlicesWithTime = numOfSlices * (endTime-startTime);\n\n // number of image we can fit in the with and heigth of an output image\n var widthFit = Math.floor( this.getMetadata(\"maxWidth\") / width );\n var heightFit = Math.floor( this.getMetadata(\"maxHeight\") / height );\n\n // size of the ouput image(s)\n var outputWidth = widthFit * width;\n var outputHeight = heightFit * height;\n var slicePerOutputIm = widthFit * heightFit;\n\n // Number of output image(s) necessary to cover the whole Image3D dataset\n //var outputNecessary = Math.ceil( numOfSlices / slicePerOutputIm ); // does not work for time series\n var outputNecessary = Math.ceil( numberOfSlicesWithTime / slicePerOutputIm );\n\n // if only one output, maybe it's not filled entirely, so we can make it a bit smaller\n if( outputNecessary == 1){\n outputHeight = Math.ceil( numberOfSlicesWithTime / widthFit ) * height;\n }\n\n this.setMetadata(\"gridWidth\", outputWidth / width);\n this.setMetadata(\"gridHeight\", outputHeight / height);\n\n var outputCounter = 0;\n var sliceCounter = 0;\n var sliceIndexCurrentOutput = 0;\n\n var outImage = null;\n\n // the 3 following functions are a work around to fetch voxel along the right axis\n function fetchAlongXspace(i, j, sliceIndex, time){\n return inputImage3D.getIntensity_xyz(sliceIndex, i, j, time)\n }\n\n function fetchAlongYspace(i, j, sliceIndex, time){\n return inputImage3D.getIntensity_xyz(i, sliceIndex, j, time)\n }\n\n function fetchAlongZspace(i, j, sliceIndex, time){\n return inputImage3D.getIntensity_xyz(i, j, sliceIndex, time)\n }\n\n var fetchAlongAxis = null;\n\n if( this._metadata.axis === \"xspace\")\n fetchAlongAxis = fetchAlongXspace;\n else if( this._metadata.axis === \"yspace\")\n fetchAlongAxis = fetchAlongYspace;\n else if( this._metadata.axis === \"zspace\")\n fetchAlongAxis = fetchAlongZspace;\n \n if( !fetchAlongAxis ){\n console.warn(\"The axis to sample along for the mosaic was not properly set.\");\n return;\n }\n\n // to make it works no matter the ncpp\n var initPixel = new Array( inputImage3D.getMetadata(\"ncpp\") ).fill(0);\n \n for(var t=startTime; t 1 ){ - + // a-la-mano slicing argument array to comply with V8 JS engine optimization... var argToSend = []; for(var i=1; i