// ==UserScript==
// @name video-shot2emoji
// @namespace http://tampermonkey.net/
// @version 0.3
// @description 页面视频截图自动转字符画
// @author asteryk
// @match https://*.bilibili.com/*
// @match https://*.youtube.com/*
// @grant none
// ==/UserScript==
(function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i<t.length;i++)o(t[i]);return o}return r})()({1:[function(require,module,exports){
var getPixels = require("get-pixels");
// rgb(0,0,0)⬛
// rgb(255,255,255) ⬜
// rgb(232,0,0) 🟥
// rgb(255,132,0) 🟧
// rgb(255,194,0) 🟨
// rgb(0,194,0)🟩
// rgb(0,100,251) 🟦
// rgb(203,35,255) 🟪
// rgb(128,64,21) 🟫
class Pixel2Emoji {
constructor(file){
this.colorMap = {
'⬛': [0, 0, 0],
'🟫': [128,64,21],
'🟩': [0,150,0],
'💠': [168,255,255],
'🟦': [0,100,251],
'🟥': [232,0,0],
'🟧': [255,132,0],
'🟨': [255,194,0],
'🟪': [203,35,255],
'⬜': [255,255,255]
}
this.file = file;
}
compareColor = (r, g, b) => {
// R*0.299 + G*0.587 + B*0.114
let min = Number.MAX_SAFE_INTEGER;
let result = '⬜';
// abs( r1 * r1 - r2 * r2 ) * 0.299 + abs( g1 * g1 - g2 * g2 ) * 0.587 + abs( b1 * b1 - b2 * b2 ) * 0.114
Object.entries(this.colorMap).forEach(([emoji, rgb]) => {
const distance = Math.abs(r * r - rgb[0] * rgb[0]) + 0.299 +
Math.abs(g * g - rgb[1] * rgb[1]) + 0.587 +
Math.abs(b * b - rgb[2] * rgb[2]) * 0.114;
// console.log(distance);
if(distance < min){
min = distance;
result = emoji;
}
})
return result;
}
getEmoji = () => {
return new Promise((resolve, reject) => {
getPixels(this.file, (err, pixels) => {
if(!err) {
// console.log(pixels.shape)
let str = '';
for(let i = 0; i < pixels.shape[1]; i += Math.floor(pixels.shape[1] / 10)){
for(let j = 0; j < pixels.shape[0]; j += Math.floor(pixels.shape[0] / 10)){
const r = pixels.get(j,i,0);
const g = pixels.get(j,i,1);
const b = pixels.get(j,i,2);
str += this.compareColor(r, g, b);
}
str += '\n';
}
resolve(str);
} else {
reject(err);
}
});
})
}
}
const createFlowPic = async () => {
const videoEle = window.document.getElementsByTagName('video')[0];
let canvas = document.createElement('canvas');
canvas.width = 640;
canvas.height = 480;
var ctx = canvas.getContext('2d');
ctx.drawImage(videoEle, 0, 0, canvas.width, canvas.height);
const dataURI = canvas.toDataURL('image/jpeg'); // can also use 'image/png'
const test = new Pixel2Emoji(dataURI);
canvas.remove();
return await test.getEmoji();
}
const video2shotinit = () => {
const createElementFromHTML = (htmlString) => {
let div = document.createElement('div');
div.innerHTML = htmlString.trim();
return div.firstChild;
}
const css1 = `background: #c0d4f3;
color: #2d7cf3;
overflow: hidden;
z-index: 998;
position: fixed;
padding: 5px;
text-align: left;
width: 200px;
height: 500px;
border-radius: 5px;
left: 2px;
top: 10%;`;
const div1Str = `<div class="videoshot2emoji" id="videoshot2emoji" style='${css1}'>
<div class="videoshot2emoji__head" style="
display: flex;
align-items: center;
margin-bottom: 10px;
">
<h2 style="font-size:14px; font-weight: bold;">视频转色块画</h2>
<div class="videoshot2emoji__btnlist">
<button id="videoshot2emoji__btnlist-start" style="
color: black !important;
padding: 2px;
width: 40px;
background: #fafafa;
border: 1px solid #ddd;
border-radius: 4px;
cursor: pointer;
">开始</button>
<button id="videoshot2emoji__btnlist-stop" style="
color: black !important;
padding: 2px;
width: 40px;
background: #fafafa;
border: 1px solid #ddd;
border-radius: 4px;
cursor: pointer;
">停止</button>
<a id="videoshot2emoji__btnlist-copy" href="javascript:void(0);" style="color:blue">Copy</a>
</div>
</div>
<div id="videoshot2emoji__body" style="
width: 100%;
height: calc(100% - 67px);
overflow-y: scroll;
overflow-x: scroll;
background: #ffffff;
border: steelblue;
">
</div>
</div>`;
const div1 = createElementFromHTML(div1Str)
document.body.append(div1);
// frag & drop
const Box = div1;
Box.onmousedown = (ev) => {
let oLeft = Box.offsetLeft;
let oTop = Box.offsetTop;
let a = ev.clientX - oLeft;
let b = ev.clientY - oTop;
Box.onmousemove = (ev) => {
Box.style.left = ev.clientX - a + 'px';
Box.style.top = ev.clientY - b + 'px';
}
}
document.onmouseup = () => {
Box.onmousemove = () => {
return null
}
}
// create content
const listContainer = div1.querySelector('#videoshot2emoji__body');
const startBtn = div1.querySelector('#videoshot2emoji__btnlist-start');
const endBtn = div1.querySelector('#videoshot2emoji__btnlist-stop');
const copyBtn = div1.querySelector('#videoshot2emoji__btnlist-copy')
window.__ALL_VIDEO_2_SHOTS__ = [];
if(listContainer) {
let intervalHandler = null;
let intervalCount = 0;
let intervalArr = [];
startBtn.addEventListener('click', () => {
startBtn.innerText = '生成中';
startBtn.style.cursor = 'not-allowed';
if(intervalHandler) {
console.log('already runing!');
return;
}
intervalHandler = setInterval(async () => {
const picText = await createFlowPic();
window.__ALL_VIDEO_2_SHOTS__.push(picText.replaceAll('\n', ''));
const htmlStr = `<div>${intervalCount}
<p
class="videoshot2emoji__content-single"
id="videoshot2emoji__content-${intervalCount}"
style="margin: 10px 0; background: #fafafa"
>
${picText.replaceAll('\n', '<br />')}
</p></div>`;
const child = createElementFromHTML(htmlStr);
intervalArr.push(child);
intervalCount ++;
listContainer.append(child);
listContainer.scrollTop = listContainer.scrollHeight
if(intervalArr.length > 9){
intervalArr.shift().remove();
window.__ALL_VIDEO_2_SHOTS__.shift();
}
}, 4000);
});
endBtn.addEventListener('click', () => {
startBtn.innerText = '开始';
startBtn.style.cursor = 'pointer';
clearInterval(intervalHandler);
intervalHandler = null;
intervalCount = 0;
intervalArr = [];
listContainer.innerHTML = null;
console.log('clear!');
});
copyBtn.addEventListener('click', () => {
const copyValue = (e, value) => {
e.clipboardData.setData('text/plain', value);
e.preventDefault();
}
document.addEventListener('copy', (e) => copyValue(e, window.__ALL_VIDEO_2_SHOTS__.join('\n\n')));
document.execCommand('copy');
})
}
}
setTimeout(() => {
const videoEleTest = window.document.getElementsByTagName('video')[0];
if(videoEleTest) {
video2shotinit();
} else {
console.log('no video');
}
}, 4000);
},{"get-pixels":10}],2:[function(require,module,exports){
'use strict'
exports.byteLength = byteLength
exports.toByteArray = toByteArray
exports.fromByteArray = fromByteArray
var lookup = []
var revLookup = []
var Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array
var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
for (var i = 0, len = code.length; i < len; ++i) {
lookup[i] = code[i]
revLookup[code.charCodeAt(i)] = i
}
// Support decoding URL-safe base64 strings, as Node.js does.
// See: https://en.wikipedia.org/wiki/Base64#URL_applications
revLookup['-'.charCodeAt(0)] = 62
revLookup['_'.charCodeAt(0)] = 63
function getLens (b64) {
var len = b64.length
if (len % 4 > 0) {
throw new Error('Invalid string. Length must be a multiple of 4')
}
// Trim off extra bytes after placeholder bytes are found
// See: https://github.com/beatgammit/base64-js/issues/42
var validLen = b64.indexOf('=')
if (validLen === -1) validLen = len
var placeHoldersLen = validLen === len
? 0
: 4 - (validLen % 4)
return [validLen, placeHoldersLen]
}
// base64 is 4/3 + up to two characters of the original data
function byteLength (b64) {
var lens = getLens(b64)
var validLen = lens[0]
var placeHoldersLen = lens[1]
return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen
}
function _byteLength (b64, validLen, placeHoldersLen) {
return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen
}
function toByteArray (b64) {
var tmp
var lens = getLens(b64)
var validLen = lens[0]
var placeHoldersLen = lens[1]
var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen))
var curByte = 0
// if there are placeholders, only get up to the last complete 4 chars
var len = placeHoldersLen > 0
? validLen - 4
: validLen
var i
for (i = 0; i < len; i += 4) {
tmp =
(revLookup[b64.charCodeAt(i)] << 18) |
(revLookup[b64.charCodeAt(i + 1)] << 12) |
(revLookup[b64.charCodeAt(i + 2)] << 6) |
revLookup[b64.charCodeAt(i + 3)]
arr[curByte++] = (tmp >> 16) & 0xFF
arr[curByte++] = (tmp >> 8) & 0xFF
arr[curByte++] = tmp & 0xFF
}
if (placeHoldersLen === 2) {
tmp =
(revLookup[b64.charCodeAt(i)] << 2) |
(revLookup[b64.charCodeAt(i + 1)] >> 4)
arr[curByte++] = tmp & 0xFF
}
if (placeHoldersLen === 1) {
tmp =
(revLookup[b64.charCodeAt(i)] << 10) |
(revLookup[b64.charCodeAt(i + 1)] << 4) |
(revLookup[b64.charCodeAt(i + 2)] >> 2)
arr[curByte++] = (tmp >> 8) & 0xFF
arr[curByte++] = tmp & 0xFF
}
return arr
}
function tripletToBase64 (num) {
return lookup[num >> 18 & 0x3F] +
lookup[num >> 12 & 0x3F] +
lookup[num >> 6 & 0x3F] +
lookup[num & 0x3F]
}
function encodeChunk (uint8, start, end) {
var tmp
var output = []
for (var i = start; i < end; i += 3) {
tmp =
((uint8[i] << 16) & 0xFF0000) +
((uint8[i + 1] << 8) & 0xFF00) +
(uint8[i + 2] & 0xFF)
output.push(tripletToBase64(tmp))
}
return output.join('')
}
function fromByteArray (uint8) {
var tmp
var len = uint8.length
var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes
var parts = []
var maxChunkLength = 16383 // must be multiple of 3
// go through the array every three bytes, we'll deal with trailing stuff later
for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {
parts.push(encodeChunk(
uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)
))
}
// pad the end with zeros, but make sure to not forget the extra bytes
if (extraBytes === 1) {
tmp = uint8[len - 1]
parts.push(
lookup[tmp >> 2] +
lookup[(tmp << 4) & 0x3F] +
'=='
)
} else if (extraBytes === 2) {
tmp = (uint8[len - 2] << 8) + uint8[len - 1]
parts.push(
lookup[tmp >> 10] +
lookup[(tmp >> 4) & 0x3F] +
lookup[(tmp << 2) & 0x3F] +
'='
)
}
return parts.join('')
}
},{}],3:[function(require,module,exports){
},{}],4:[function(require,module,exports){
(function (Buffer){(function (){
/*!
* The buffer module from node.js, for the browser.
*
* @author Feross Aboukhadijeh <https://feross.org>
* @license MIT
*/
/* eslint-disable no-proto */
'use strict'
var base64 = require('base64-js')
var ieee754 = require('ieee754')
exports.Buffer = Buffer
exports.SlowBuffer = SlowBuffer
exports.INSPECT_MAX_BYTES = 50
var K_MAX_LENGTH = 0x7fffffff
exports.kMaxLength = K_MAX_LENGTH
/**
* If `Buffer.TYPED_ARRAY_SUPPORT`:
* === true Use Uint8Array implementation (fastest)
* === false Print warning and recommend using `buffer` v4.x which has an Object
* implementation (most compatible, even IE6)
*
* Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,
* Opera 11.6+, iOS 4.2+.
*
* We report that the browser does not support typed arrays if the are not subclassable
* using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array`
* (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support
* for __proto__ and has a buggy typed array implementation.
*/
Buffer.TYPED_ARRAY_SUPPORT = typedArraySupport()
if (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' &&
typeof console.error === 'function') {
console.error(
'This browser lacks typed array (Uint8Array) support which is required by ' +
'`buffer` v5.x. Use `buffer` v4.x if you require old browser support.'
)
}
function typedArraySupport () {
// Can typed array instances can be augmented?
try {
var arr = new Uint8Array(1)
arr.__proto__ = { __proto__: Uint8Array.prototype, foo: function () { return 42 } }
return arr.foo() === 42
} catch (e) {
return false
}
}
Object.defineProperty(Buffer.prototype, 'parent', {
enumerable: true,
get: function () {
if (!Buffer.isBuffer(this)) return undefined
return this.buffer
}
})
Object.defineProperty(Buffer.prototype, 'offset', {
enumerable: true,
get: function () {
if (!Buffer.isBuffer(this)) return undefined
return this.byteOffset
}
})
function createBuffer (length) {
if (length > K_MAX_LENGTH) {
throw new RangeError('The value "' + length + '" is invalid for option "size"')
}
// Return an augmented `Uint8Array` instance
var buf = new Uint8Array(length)
buf.__proto__ = Buffer.prototype
return buf
}
/**
* The Buffer constructor returns instances of `Uint8Array` that have their
* prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of
* `Uint8Array`, so the returned instances will have all the node `Buffer` methods
* and the `Uint8Array` methods. Square bracket notation works as expected -- it
* returns a single octet.
*
* The `Uint8Array` prototype remains unmodified.
*/
function Buffer (arg, encodingOrOffset, length) {
// Common case.
if (typeof arg === 'number') {
if (typeof encodingOrOffset === 'string') {
throw new TypeError(
'The "string" argument must be of type string. Received type number'
)
}
return allocUnsafe(arg)
}
return from(arg, encodingOrOffset, length)
}
// Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97
if (typeof Symbol !== 'undefined' && Symbol.species != null &&
Buffer[Symbol.species] === Buffer) {
Object.defineProperty(Buffer, Symbol.species, {
value: null,
configurable: true,
enumerable: false,
writable: false
})
}
Buffer.poolSize = 8192 // not used by this implementation
function from (value, encodingOrOffset, length) {
if (typeof value === 'string') {
return fromString(value, encodingOrOffset)
}
if (ArrayBuffer.isView(value)) {
return fromArrayLike(value)
}
if (value == null) {
throw TypeError(
'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' +
'or Array-like Object. Received type ' + (typeof value)
)
}
if (isInstance(value, ArrayBuffer) ||
(value && isInstance(value.buffer, ArrayBuffer))) {
return fromArrayBuffer(value, encodingOrOffset, length)
}
if (typeof value === 'number') {
throw new TypeError(
'The "value" argument must not be of type number. Received type number'
)
}
var valueOf = value.valueOf && value.valueOf()
if (valueOf != null && valueOf !== value) {
return Buffer.from(valueOf, encodingOrOffset, length)
}
var b = fromObject(value)
if (b) return b
if (typeof Symbol !== 'undefined' && Symbol.toPrimitive != null &&
typeof value[Symbol.toPrimitive] === 'function') {
return Buffer.from(
value[Symbol.toPrimitive]('string'), encodingOrOffset, length
)
}
throw new TypeError(
'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' +
'or Array-like Object. Received type ' + (typeof value)
)
}
/**
* Functionally equivalent to Buffer(arg, encoding) but throws a TypeError
* if value is a number.
* Buffer.from(str[, encoding])
* Buffer.from(array)
* Buffer.from(buffer)
* Buffer.from(arrayBuffer[, byteOffset[, length]])
**/
Buffer.from = function (value, encodingOrOffset, length) {
return from(value, encodingOrOffset, length)
}
// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug:
// https://github.com/feross/buffer/pull/148
Buffer.prototype.__proto__ = Uint8Array.prototype
Buffer.__proto__ = Uint8Array
function assertSize (size) {
if (typeof size !== 'number') {
throw new TypeError('"size" argument must be of type number')
} else if (size < 0) {
throw new RangeError('The value "' + size + '" is invalid for option "size"')
}
}
function alloc (size, fill, encoding) {
assertSize(size)
if (size <= 0) {
return createBuffer(size)
}
if (fill !== undefined) {
// Only pay attention to encoding if it's a string. This
// prevents accidentally sending in a number that would
// be interpretted as a start offset.
return typeof encoding === 'string'
? createBuffer(size).fill(fill, encoding)
: createBuffer(size).fill(fill)
}
return createBuffer(size)
}
/**
* Creates a new filled Buffer instance.
* alloc(size[, fill[, encoding]])
**/
Buffer.alloc = function (size, fill, encoding) {
return alloc(size, fill, encoding)
}
function allocUnsafe (size) {
assertSize(size)
return createBuffer(size < 0 ? 0 : checked(size) | 0)
}
/**
* Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.
* */
Buffer.allocUnsafe = function (size) {
return allocUnsafe(size)
}
/**
* Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.
*/
Buffer.allocUnsafeSlow = function (size) {
return allocUnsafe(size)
}
function fromString (string, encoding) {
if (typeof encoding !== 'string' || encoding === '') {
encoding = 'utf8'
}
if (!Buffer.isEncoding(encoding)) {
throw new TypeError('Unknown encoding: ' + encoding)
}
var length = byteLength(string, encoding) | 0
var buf = createBuffer(length)
var actual = buf.write(string, encoding)
if (actual !== length) {
// Writing a hex string, for example, that contains invalid characters will
// cause everything after the first invalid character to be ignored. (e.g.
// 'abxxcd' will be treated as 'ab')
buf = buf.slice(0, actual)
}
return buf
}
function fromArrayLike (array) {
var length = array.length < 0 ? 0 : checked(array.length) | 0
var buf = createBuffer(length)
for (var i = 0; i < length; i += 1) {
buf[i] = array[i] & 255
}
return buf
}
function fromArrayBuffer (array, byteOffset, length) {
if (byteOffset < 0 || array.byteLength < byteOffset) {
throw new RangeError('"offset" is outside of buffer bounds')
}
if (array.byteLength < byteOffset + (length || 0)) {
throw new RangeError('"length" is outside of buffer bounds')
}
var buf
if (byteOffset === undefined && length === undefined) {
buf = new Uint8Array(array)
} else if (length === undefined) {
buf = new Uint8Array(array, byteOffset)
} else {
buf = new Uint8Array(array, byteOffset, length)
}
// Return an augmented `Uint8Array` instance
buf.__proto__ = Buffer.prototype
return buf
}
function fromObject (obj) {
if (Buffer.isBuffer(obj)) {
var len = checked(obj.length) | 0
var buf = createBuffer(len)
if (buf.length === 0) {
return buf
}
obj.copy(buf, 0, 0, len)
return buf
}
if (obj.length !== undefined) {
if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) {
return createBuffer(0)
}
return fromArrayLike(obj)
}
if (obj.type === 'Buffer' && Array.isArray(obj.data)) {
return fromArrayLike(obj.data)
}
}
function checked (length) {
// Note: cannot use `length < K_MAX_LENGTH` here because that fails when
// length is NaN (which is otherwise coerced to zero.)
if (length >= K_MAX_LENGTH) {
throw new RangeError('Attempt to allocate Buffer larger than maximum ' +
'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes')
}
return length | 0
}
function SlowBuffer (length) {
if (+length != length) { // eslint-disable-line eqeqeq
length = 0
}
return Buffer.alloc(+length)
}
Buffer.isBuffer = function isBuffer (b) {
return b != null && b._isBuffer === true &&
b !== Buffer.prototype // so Buffer.isBuffer(Buffer.prototype) will be false
}
Buffer.compare = function compare (a, b) {
if (isInstance(a, Uint8Array)) a = Buffer.from(a, a.offset, a.byteLength)
if (isInstance(b, Uint8Array)) b = Buffer.from(b, b.offset, b.byteLength)
if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {
throw new TypeError(
'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array'
)
}
if (a === b) return 0
var x = a.length
var y = b.length
for (var i = 0, len = Math.min(x, y); i < len; ++i) {
if (a[i] !== b[i]) {
x = a[i]
y = b[i]
break
}
}
if (x < y) return -1
if (y < x) return 1
return 0
}
Buffer.isEncoding = function isEncoding (encoding) {
switch (String(encoding).toLowerCase()) {
case 'hex':
case 'utf8':
case 'utf-8':
case 'ascii':
case 'latin1':
case 'binary':
case 'base64':
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return true
default:
return false
}
}
Buffer.concat = function concat (list, length) {
if (!Array.isArray(list)) {
throw new TypeError('"list" argument must be an Array of Buffers')
}
if (list.length === 0) {
return Buffer.alloc(0)
}
var i
if (length === undefined) {
length = 0
for (i = 0; i < list.length; ++i) {
length += list[i].length
}
}
var buffer = Buffer.allocUnsafe(length)
var pos = 0
for (i = 0; i < list.length; ++i) {
var buf = list[i]
if (isInstance(buf, Uint8Array)) {
buf = Buffer.from(buf)
}
if (!Buffer.isBuffer(buf)) {
throw new TypeError('"list" argument must be an Array of Buffers')
}
buf.copy(buffer, pos)
pos += buf.length
}
return buffer
}
function byteLength (string, encoding) {
if (Buffer.isBuffer(string)) {
return string.length
}
if (ArrayBuffer.isView(string) || isInstance(string, ArrayBuffer)) {
return string.byteLength
}
if (typeof string !== 'string') {
throw new TypeError(
'The "string" argument must be one of type string, Buffer, or ArrayBuffer. ' +
'Received type ' + typeof string
)
}
var len = string.length
var mustMatch = (arguments.length > 2 && arguments[2] === true)
if (!mustMatch && len === 0) return 0
// Use a for loop to avoid recursion
var loweredCase = false
for (;;) {
switch (encoding) {
case 'ascii':
case 'latin1':
case 'binary':
return len
case 'utf8':
case 'utf-8':
return utf8ToBytes(string).length
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return len * 2
case 'hex':
return len >>> 1
case 'base64':
return base64ToBytes(string).length
default:
if (loweredCase) {
return mustMatch ? -1 : utf8ToBytes(string).length // assume utf8
}
encoding = ('' + encoding).toLowerCase()
loweredCase = true
}
}
}
Buffer.byteLength = byteLength
function slowToString (encoding, start, end) {
var loweredCase = false
// No need to verify that "this.length <= MAX_UINT32" since it's a read-only
// property of a typed array.
// This behaves neither like String nor Uint8Array in that we set start/end
// to their upper/lower bounds if the value passed is out of range.
// undefined is handled specially as per ECMA-262 6th Edition,
// Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.
if (start === undefined || start < 0) {
start = 0
}
// Return early if start > this.length. Done here to prevent potential uint32
// coercion fail below.
if (start > this.length) {
return ''
}
if (end === undefined || end > this.length) {
end = this.length
}
if (end <= 0) {
return ''
}
// Force coersion to uint32. This will also coerce falsey/NaN values to 0.
end >>>= 0
start >>>= 0
if (end <= start) {
return ''
}
if (!encoding) encoding = 'utf8'
while (true) {
switch (encoding) {
case 'hex':
return hexSlice(this, start, end)
case 'utf8':
case 'utf-8':
return utf8Slice(this, start, end)
case 'ascii':
return asciiSlice(this, start, end)
case 'latin1':
case 'binary':
return latin1Slice(this, start, end)
case 'base64':
return base64Slice(this, start, end)
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return utf16leSlice(this, start, end)
default:
if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
encoding = (encoding + '').toLowerCase()
loweredCase = true
}
}
}
// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package)
// to detect a Buffer instance. It's not possible to use `instanceof Buffer`
// reliably in a browserify context because there could be multiple different
// copies of the 'buffer' package in use. This method works even for Buffer
// instances that were created from another copy of the `buffer` package.
// See: https://github.com/feross/buffer/issues/154
Buffer.prototype._isBuffer = true
function swap (b, n, m) {
var i = b[n]
b[n] = b[m]
b[m] = i
}
Buffer.prototype.swap16 = function swap16 () {
var len = this.length
if (len % 2 !== 0) {
throw new RangeError('Buffer size must be a multiple of 16-bits')
}
for (var i = 0; i < len; i += 2) {
swap(this, i, i + 1)
}
return this
}
Buffer.prototype.swap32 = function swap32 () {
var len = this.length
if (len % 4 !== 0) {
throw new RangeError('Buffer size must be a multiple of 32-bits')
}
for (var i = 0; i < len; i += 4) {
swap(this, i, i + 3)
swap(this, i + 1, i + 2)
}
return this
}
Buffer.prototype.swap64 = function swap64 () {
var len = this.length
if (len % 8 !== 0) {
throw new RangeError('Buffer size must be a multiple of 64-bits')
}
for (var i = 0; i < len; i += 8) {
swap(this, i, i + 7)
swap(this, i + 1, i + 6)
swap(this, i + 2, i + 5)
swap(this, i + 3, i + 4)
}
return this
}
Buffer.prototype.toString = function toString () {
var length = this.length
if (length === 0) return ''
if (arguments.length === 0) return utf8Slice(this, 0, length)
return slowToString.apply(this, arguments)
}
Buffer.prototype.toLocaleString = Buffer.prototype.toString
Buffer.prototype.equals = function equals (b) {
if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')
if (this === b) return true
return Buffer.compare(this, b) === 0
}
Buffer.prototype.inspect = function inspect () {
var str = ''
var max = exports.INSPECT_MAX_BYTES
str = this.toString('hex', 0, max).replace(/(.{2})/g, '$1 ').trim()
if (this.length > max) str += ' ... '
return '<Buffer ' + str + '>'
}
Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {
if (isInstance(target, Uint8Array)) {
target = Buffer.from(target, target.offset, target.byteLength)
}
if (!Buffer.isBuffer(target)) {
throw new TypeError(
'The "target" argument must be one of type Buffer or Uint8Array. ' +
'Received type ' + (typeof target)
)
}
if (start === undefined) {
start = 0
}
if (end === undefined) {
end = target ? target.length : 0
}
if (thisStart === undefined) {
thisStart = 0
}
if (thisEnd === undefined) {
thisEnd = this.length
}
if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {
throw new RangeError('out of range index')
}
if (thisStart >= thisEnd && start >= end) {
return 0
}
if (thisStart >= thisEnd) {
return -1
}
if (start >= end) {
return 1
}
start >>>= 0
end >>>= 0
thisStart >>>= 0
thisEnd >>>= 0
if (this === target) return 0
var x = thisEnd - thisStart
var y = end - start
var len = Math.min(x, y)
var thisCopy = this.slice(thisStart, thisEnd)
var targetCopy = target.slice(start, end)
for (var i = 0; i < len; ++i) {
if (thisCopy[i] !== targetCopy[i]) {
x = thisCopy[i]
y = targetCopy[i]
break
}
}
if (x < y) return -1
if (y < x) return 1
return 0
}
// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,
// OR the last index of `val` in `buffer` at offset <= `byteOffset`.
//
// Arguments:
// - buffer - a Buffer to search
// - val - a string, Buffer, or number
// - byteOffset - an index into `buffer`; will be clamped to an int32
// - encoding - an optional encoding, relevant is val is a string
// - dir - true for indexOf, false for lastIndexOf
function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {
// Empty buffer means no match
if (buffer.length === 0) return -1
// Normalize byteOffset
if (typeof byteOffset === 'string') {
encoding = byteOffset
byteOffset = 0
} else if (byteOffset > 0x7fffffff) {
byteOffset = 0x7fffffff
} else if (byteOffset < -0x80000000) {
byteOffset = -0x80000000
}
byteOffset = +byteOffset // Coerce to Number.
if (numberIsNaN(byteOffset)) {
// byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer
byteOffset = dir ? 0 : (buffer.length - 1)
}
// Normalize byteOffset: negative offsets start from the end of the buffer
if (byteOffset < 0) byteOffset = buffer.length + byteOffset
if (byteOffset >= buffer.length) {
if (dir) return -1
else byteOffset = buffer.length - 1
} else if (byteOffset < 0) {
if (dir) byteOffset = 0
else return -1
}
// Normalize val
if (typeof val === 'string') {
val = Buffer.from(val, encoding)
}
// Finally, search either indexOf (if dir is true) or lastIndexOf
if (Buffer.isBuffer(val)) {
// Special case: looking for empty string/buffer always fails
if (val.length === 0) {
return -1
}
return arrayIndexOf(buffer, val, byteOffset, encoding, dir)
} else if (typeof val === 'number') {
val = val & 0xFF // Search for a byte value [0-255]
if (typeof Uint8Array.prototype.indexOf === 'function') {
if (dir) {
return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)
} else {
return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)
}
}
return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)
}
throw new TypeError('val must be string, number or Buffer')
}
function arrayIndexOf (arr, val, byteOffset, encoding, dir) {
var indexSize = 1
var arrLength = arr.length
var valLength = val.length
if (encoding !== undefined) {
encoding = String(encoding).toLowerCase()
if (encoding === 'ucs2' || encoding === 'ucs-2' ||
encoding === 'utf16le' || encoding === 'utf-16le') {
if (arr.length < 2 || val.length < 2) {
return -1
}
indexSize = 2
arrLength /= 2
valLength /= 2
byteOffset /= 2
}
}
function read (buf, i) {
if (indexSize === 1) {
return buf[i]
} else {
return buf.readUInt16BE(i * indexSize)
}
}
var i
if (dir) {
var foundIndex = -1
for (i = byteOffset; i < arrLength; i++) {
if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {
if (foundIndex === -1) foundIndex = i
if (i - foundIndex + 1 === valLength) return foundIndex * indexSize
} else {
if (foundIndex !== -1) i -= i - foundIndex
foundIndex = -1
}
}
} else {
if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength
for (i = byteOffset; i >= 0; i--) {
var found = true
for (var j = 0; j < valLength; j++) {
if (read(arr, i + j) !== read(val, j)) {
found = false
break
}
}
if (found) return i
}
}
return -1
}
Buffer.prototype.includes = function includes (val, byteOffset, encoding) {
return this.indexOf(val, byteOffset, encoding) !== -1
}
Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {
return bidirectionalIndexOf(this, val, byteOffset, encoding, true)
}
Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {
return bidirectionalIndexOf(this, val, byteOffset, encoding, false)
}
function hexWrite (buf, string, offset, length) {
offset = Number(offset) || 0
var remaining = buf.length - offset
if (!length) {
length = remaining
} else {
length = Number(length)
if (length > remaining) {
length = remaining
}
}
var strLen = string.length
if (length > strLen / 2) {
length = strLen / 2
}
for (var i = 0; i < length; ++i) {
var parsed = parseInt(string.substr(i * 2, 2), 16)
if (numberIsNaN(parsed)) return i
buf[offset + i] = parsed
}
return i
}
function utf8Write (buf, string, offset, length) {
return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)
}
function asciiWrite (buf, string, offset, length) {
return blitBuffer(asciiToBytes(string), buf, offset, length)
}
function latin1Write (buf, string, offset, length) {
return asciiWrite(buf, string, offset, length)
}
function base64Write (buf, string, offset, length) {
return blitBuffer(base64ToBytes(string), buf, offset, length)
}
function ucs2Write (buf, string, offset, length) {
return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)
}
Buffer.prototype.write = function write (string, offset, length, encoding) {
// Buffer#write(string)
if (offset === undefined) {
encoding = 'utf8'
length = this.length
offset = 0
// Buffer#write(string, encoding)
} else if (length === undefined && typeof offset === 'string') {
encoding = offset
length = this.length
offset = 0
// Buffer#write(string, offset[, length][, encoding])
} else if (isFinite(offset)) {
offset = offset >>> 0
if (isFinite(length)) {
length = length >>> 0
if (encoding === undefined) encoding = 'utf8'
} else {
encoding = length
length = undefined
}
} else {
throw new Error(
'Buffer.write(string, encoding, offset[, length]) is no longer supported'
)
}
var remaining = this.length - offset
if (length === undefined || length > remaining) length = remaining
if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {
throw new RangeError('Attempt to write outside buffer bounds')
}
if (!encoding) encoding = 'utf8'
var loweredCase = false
for (;;) {
switch (encoding) {
case 'hex':
return hexWrite(this, string, offset, length)
case 'utf8':
case 'utf-8':
return utf8Write(this, string, offset, length)
case 'ascii':
return asciiWrite(this, string, offset, length)
case 'latin1':
case 'binary':
return latin1Write(this, string, offset, length)
case 'base64':
// Warning: maxLength not taken into account in base64Write
return base64Write(this, string, offset, length)
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return ucs2Write(this, string, offset, length)
default:
if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
encoding = ('' + encoding).toLowerCase()
loweredCase = true
}
}
}
Buffer.prototype.toJSON = function toJSON () {
return {
type: 'Buffer',
data: Array.prototype.slice.call(this._arr || this, 0)
}
}
function base64Slice (buf, start, end) {
if (start === 0 && end === buf.length) {
return base64.fromByteArray(buf)
} else {
return base64.fromByteArray(buf.slice(start, end))
}
}
function utf8Slice (buf, start, end) {
end = Math.min(buf.length, end)
var res = []
var i = start
while (i < end) {
var firstByte = buf[i]
var codePoint = null
var bytesPerSequence = (firstByte > 0xEF) ? 4
: (firstByte > 0xDF) ? 3
: (firstByte > 0xBF) ? 2
: 1
if (i + bytesPerSequence <= end) {
var secondByte, thirdByte, fourthByte, tempCodePoint
switch (bytesPerSequence) {
case 1:
if (firstByte < 0x80) {
codePoint = firstByte
}
break
case 2:
secondByte = buf[i + 1]
if ((secondByte & 0xC0) === 0x80) {
tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)
if (tempCodePoint > 0x7F) {
codePoint = tempCodePoint
}
}
break
case 3:
secondByte = buf[i + 1]
thirdByte = buf[i + 2]
if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {
tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)
if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {
codePoint = tempCodePoint
}
}
break
case 4:
secondByte = buf[i + 1]
thirdByte = buf[i + 2]
fourthByte = buf[i + 3]
if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {
tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)
if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {
codePoint = tempCodePoint
}
}
}
}
if (codePoint === null) {
// we did not generate a valid codePoint so insert a
// replacement char (U+FFFD) and advance only 1 byte
codePoint = 0xFFFD
bytesPerSequence = 1
} else if (codePoint > 0xFFFF) {
// encode to utf16 (surrogate pair dance)
codePoint -= 0x10000
res.push(codePoint >>> 10 & 0x3FF | 0xD800)
codePoint = 0xDC00 | codePoint & 0x3FF
}
res.push(codePoint)
i += bytesPerSequence
}
return decodeCodePointsArray(res)
}
// Based on http://stackoverflow.com/a/22747272/680742, the browser with
// the lowest limit is Chrome, with 0x10000 args.
// We go 1 magnitude less, for safety
var MAX_ARGUMENTS_LENGTH = 0x1000
function decodeCodePointsArray (codePoints) {
var len = codePoints.length
if (len <= MAX_ARGUMENTS_LENGTH) {
return String.fromCharCode.apply(String, codePoints) // avoid extra slice()
}
// Decode in chunks to avoid "call stack size exceeded".
var res = ''
var i = 0
while (i < len) {
res += String.fromCharCode.apply(
String,
codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)
)
}
return res
}
function asciiSlice (buf, start, end) {
var ret = ''
end = Math.min(buf.length, end)
for (var i = start; i < end; ++i) {
ret += String.fromCharCode(buf[i] & 0x7F)
}
return ret
}
function latin1Slice (buf, start, end) {
var ret = ''
end = Math.min(buf.length, end)
for (var i = start; i < end; ++i) {
ret += String.fromCharCode(buf[i])
}
return ret
}
function hexSlice (buf, start, end) {
var len = buf.length
if (!start || start < 0) start = 0
if (!end || end < 0 || end > len) end = len
var out = ''
for (var i = start; i < end; ++i) {
out += toHex(buf[i])
}
return out
}
function utf16leSlice (buf, start, end) {
var bytes = buf.slice(start, end)
var res = ''
for (var i = 0; i < bytes.length; i += 2) {
res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256))
}
return res
}
Buffer.prototype.slice = function slice (start, end) {
var len = this.length
start = ~~start
end = end === undefined ? len : ~~end
if (start < 0) {
start += len
if (start < 0) start = 0
} else if (start > len) {
start = len
}
if (end < 0) {
end += len
if (end < 0) end = 0
} else if (end > len) {
end = len
}
if (end < start) end = start
var newBuf = this.subarray(start, end)
// Return an augmented `Uint8Array` instance
newBuf.__proto__ = Buffer.prototype
return newBuf
}
/*
* Need to make sure that buffer isn't trying to write out of bounds.
*/
function checkOffset (offset, ext, length) {
if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')
if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')
}
Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) checkOffset(offset, byteLength, this.length)
var val = this[offset]
var mul = 1
var i = 0
while (++i < byteLength && (mul *= 0x100)) {
val += this[offset + i] * mul
}
return val
}
Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) {
checkOffset(offset, byteLength, this.length)
}
var val = this[offset + --byteLength]
var mul = 1
while (byteLength > 0 && (mul *= 0x100)) {
val += this[offset + --byteLength] * mul
}
return val
}
Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 1, this.length)
return this[offset]
}
Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
return this[offset] | (this[offset + 1] << 8)
}
Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
return (this[offset] << 8) | this[offset + 1]
}
Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return ((this[offset]) |
(this[offset + 1] << 8) |
(this[offset + 2] << 16)) +
(this[offset + 3] * 0x1000000)
}
Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return (this[offset] * 0x1000000) +
((this[offset + 1] << 16) |
(this[offset + 2] << 8) |
this[offset + 3])
}
Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) checkOffset(offset, byteLength, this.length)
var val = this[offset]
var mul = 1
var i = 0
while (++i < byteLength && (mul *= 0x100)) {
val += this[offset + i] * mul
}
mul *= 0x80
if (val >= mul) val -= Math.pow(2, 8 * byteLength)
return val
}
Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) checkOffset(offset, byteLength, this.length)
var i = byteLength
var mul = 1
var val = this[offset + --i]
while (i > 0 && (mul *= 0x100)) {
val += this[offset + --i] * mul
}
mul *= 0x80
if (val >= mul) val -= Math.pow(2, 8 * byteLength)
return val
}
Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 1, this.length)
if (!(this[offset] & 0x80)) return (this[offset])
return ((0xff - this[offset] + 1) * -1)
}
Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
var val = this[offset] | (this[offset + 1] << 8)
return (val & 0x8000) ? val | 0xFFFF0000 : val
}
Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
var val = this[offset + 1] | (this[offset] << 8)
return (val & 0x8000) ? val | 0xFFFF0000 : val
}
Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return (this[offset]) |
(this[offset + 1] << 8) |
(this[offset + 2] << 16) |
(this[offset + 3] << 24)
}
Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return (this[offset] << 24) |
(this[offset + 1] << 16) |
(this[offset + 2] << 8) |
(this[offset + 3])
}
Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return ieee754.read(this, offset, true, 23, 4)
}
Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return ieee754.read(this, offset, false, 23, 4)
}
Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 8, this.length)
return ieee754.read(this, offset, true, 52, 8)
}
Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 8, this.length)
return ieee754.read(this, offset, false, 52, 8)
}
function checkInt (buf, value, offset, ext, max, min) {
if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance')
if (value > max || value < min) throw new RangeError('"value" argument is out of bounds')
if (offset + ext > buf.length) throw new RangeError('Index out of range')
}
Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) {
var maxBytes = Math.pow(2, 8 * byteLength) - 1
checkInt(this, value, offset, byteLength, maxBytes, 0)
}
var mul = 1
var i = 0
this[offset] = value & 0xFF
while (++i < byteLength && (mul *= 0x100)) {
this[offset + i] = (value / mul) & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) {
var maxBytes = Math.pow(2, 8 * byteLength) - 1
checkInt(this, value, offset, byteLength, maxBytes, 0)
}
var i = byteLength - 1
var mul = 1
this[offset + i] = value & 0xFF
while (--i >= 0 && (mul *= 0x100)) {
this[offset + i] = (value / mul) & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)
this[offset] = (value & 0xff)
return offset + 1
}
Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)
this[offset] = (value & 0xff)
this[offset + 1] = (value >>> 8)
return offset + 2
}
Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)
this[offset] = (value >>> 8)
this[offset + 1] = (value & 0xff)
return offset + 2
}
Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)
this[offset + 3] = (value >>> 24)
this[offset + 2] = (value >>> 16)
this[offset + 1] = (value >>> 8)
this[offset] = (value & 0xff)
return offset + 4
}
Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)
this[offset] = (value >>> 24)
this[offset + 1] = (value >>> 16)
this[offset + 2] = (value >>> 8)
this[offset + 3] = (value & 0xff)
return offset + 4
}
Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
var limit = Math.pow(2, (8 * byteLength) - 1)
checkInt(this, value, offset, byteLength, limit - 1, -limit)
}
var i = 0
var mul = 1
var sub = 0
this[offset] = value & 0xFF
while (++i < byteLength && (mul *= 0x100)) {
if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {
sub = 1
}
this[offset + i] = ((value / mul) >> 0) - sub & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
var limit = Math.pow(2, (8 * byteLength) - 1)
checkInt(this, value, offset, byteLength, limit - 1, -limit)
}
var i = byteLength - 1
var mul = 1
var sub = 0
this[offset + i] = value & 0xFF
while (--i >= 0 && (mul *= 0x100)) {
if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {
sub = 1
}
this[offset + i] = ((value / mul) >> 0) - sub & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)
if (value < 0) value = 0xff + value + 1
this[offset] = (value & 0xff)
return offset + 1
}
Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)
this[offset] = (value & 0xff)
this[offset + 1] = (value >>> 8)
return offset + 2
}
Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)
this[offset] = (value >>> 8)
this[offset + 1] = (value & 0xff)
return offset + 2
}
Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
this[offset] = (value & 0xff)
this[offset + 1] = (value >>> 8)
this[offset + 2] = (value >>> 16)
this[offset + 3] = (value >>> 24)
return offset + 4
}
Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
if (value < 0) value = 0xffffffff + value + 1
this[offset] = (value >>> 24)
this[offset + 1] = (value >>> 16)
this[offset + 2] = (value >>> 8)
this[offset + 3] = (value & 0xff)
return offset + 4
}
function checkIEEE754 (buf, value, offset, ext, max, min) {
if (offset + ext > buf.length) throw new RangeError('Index out of range')
if (offset < 0) throw new RangeError('Index out of range')
}
function writeFloat (buf, value, offset, littleEndian, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)
}
ieee754.write(buf, value, offset, littleEndian, 23, 4)
return offset + 4
}
Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {
return writeFloat(this, value, offset, true, noAssert)
}
Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {
return writeFloat(this, value, offset, false, noAssert)
}
function writeDouble (buf, value, offset, littleEndian, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)
}
ieee754.write(buf, value, offset, littleEndian, 52, 8)
return offset + 8
}
Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {
return writeDouble(this, value, offset, true, noAssert)
}
Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {
return writeDouble(this, value, offset, false, noAssert)
}
// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)
Buffer.prototype.copy = function copy (target, targetStart, start, end) {
if (!Buffer.isBuffer(target)) throw new TypeError('argument should be a Buffer')
if (!start) start = 0
if (!end && end !== 0) end = this.length
if (targetStart >= target.length) targetStart = target.length
if (!targetStart) targetStart = 0
if (end > 0 && end < start) end = start
// Copy 0 bytes; we're done
if (end === start) return 0
if (target.length === 0 || this.length === 0) return 0
// Fatal error conditions
if (targetStart < 0) {
throw new RangeError('targetStart out of bounds')
}
if (start < 0 || start >= this.length) throw new RangeError('Index out of range')
if (end < 0) throw new RangeError('sourceEnd out of bounds')
// Are we oob?
if (end > this.length) end = this.length
if (target.length - targetStart < end - start) {
end = target.length - targetStart + start
}
var len = end - start
if (this === target && typeof Uint8Array.prototype.copyWithin === 'function') {
// Use built-in when available, missing from IE11
this.copyWithin(targetStart, start, end)
} else if (this === target && start < targetStart && targetStart < end) {
// descending copy from end
for (var i = len - 1; i >= 0; --i) {
target[i + targetStart] = this[i + start]
}
} else {
Uint8Array.prototype.set.call(
target,
this.subarray(start, end),
targetStart
)
}
return len
}
// Usage:
// buffer.fill(number[, offset[, end]])
// buffer.fill(buffer[, offset[, end]])
// buffer.fill(string[, offset[, end]][, encoding])
Buffer.prototype.fill = function fill (val, start, end, encoding) {
// Handle string cases:
if (typeof val === 'string') {
if (typeof start === 'string') {
encoding = start
start = 0
end = this.length
} else if (typeof end === 'string') {
encoding = end
end = this.length
}
if (encoding !== undefined && typeof encoding !== 'string') {
throw new TypeError('encoding must be a string')
}
if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {
throw new TypeError('Unknown encoding: ' + encoding)
}
if (val.length === 1) {
var code = val.charCodeAt(0)
if ((encoding === 'utf8' && code < 128) ||
encoding === 'latin1') {
// Fast path: If `val` fits into a single byte, use that numeric value.
val = code
}
}
} else if (typeof val === 'number') {
val = val & 255
}
// Invalid ranges are not set to a default, so can range check early.
if (start < 0 || this.length < start || this.length < end) {
throw new RangeError('Out of range index')
}
if (end <= start) {
return this
}
start = start >>> 0
end = end === undefined ? this.length : end >>> 0
if (!val) val = 0
var i
if (typeof val === 'number') {
for (i = start; i < end; ++i) {
this[i] = val
}
} else {
var bytes = Buffer.isBuffer(val)
? val
: Buffer.from(val, encoding)
var len = bytes.length
if (len === 0) {
throw new TypeError('The value "' + val +
'" is invalid for argument "value"')
}
for (i = 0; i < end - start; ++i) {
this[i + start] = bytes[i % len]
}
}
return this
}
// HELPER FUNCTIONS
// ================
var INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g
function base64clean (str) {
// Node takes equal signs as end of the Base64 encoding
str = str.split('=')[0]
// Node strips out invalid characters like \n and \t from the string, base64-js does not
str = str.trim().replace(INVALID_BASE64_RE, '')
// Node converts strings with length < 2 to ''
if (str.length < 2) return ''
// Node allows for non-padded base64 strings (missing trailing ===), base64-js does not
while (str.length % 4 !== 0) {
str = str + '='
}
return str
}
function toHex (n) {
if (n < 16) return '0' + n.toString(16)
return n.toString(16)
}
function utf8ToBytes (string, units) {
units = units || Infinity
var codePoint
var length = string.length
var leadSurrogate = null
var bytes = []
for (var i = 0; i < length; ++i) {
codePoint = string.charCodeAt(i)
// is surrogate component
if (codePoint > 0xD7FF && codePoint < 0xE000) {
// last char was a lead
if (!leadSurrogate) {
// no lead yet
if (codePoint > 0xDBFF) {
// unexpected trail
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
continue
} else if (i + 1 === length) {
// unpaired lead
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
continue
}
// valid lead
leadSurrogate = codePoint
continue
}
// 2 leads in a row
if (codePoint < 0xDC00) {
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
leadSurrogate = codePoint
continue
}
// valid surrogate pair
codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000
} else if (leadSurrogate) {
// valid bmp char, but last char was a lead
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
}
leadSurrogate = null
// encode utf8
if (codePoint < 0x80) {
if ((units -= 1) < 0) break
bytes.push(codePoint)
} else if (codePoint < 0x800) {
if ((units -= 2) < 0) break
bytes.push(
codePoint >> 0x6 | 0xC0,
codePoint & 0x3F | 0x80
)
} else if (codePoint < 0x10000) {
if ((units -= 3) < 0) break
bytes.push(
codePoint >> 0xC | 0xE0,
codePoint >> 0x6 & 0x3F | 0x80,
codePoint & 0x3F | 0x80
)
} else if (codePoint < 0x110000) {
if ((units -= 4) < 0) break
bytes.push(
codePoint >> 0x12 | 0xF0,
codePoint >> 0xC & 0x3F | 0x80,
codePoint >> 0x6 & 0x3F | 0x80,
codePoint & 0x3F | 0x80
)
} else {
throw new Error('Invalid code point')
}
}
return bytes
}
function asciiToBytes (str) {
var byteArray = []
for (var i = 0; i < str.length; ++i) {
// Node's code seems to be doing this and not & 0x7F..
byteArray.push(str.charCodeAt(i) & 0xFF)
}
return byteArray
}
function utf16leToBytes (str, units) {
var c, hi, lo
var byteArray = []
for (var i = 0; i < str.length; ++i) {
if ((units -= 2) < 0) break
c = str.charCodeAt(i)
hi = c >> 8
lo = c % 256
byteArray.push(lo)
byteArray.push(hi)
}
return byteArray
}
function base64ToBytes (str) {
return base64.toByteArray(base64clean(str))
}
function blitBuffer (src, dst, offset, length) {
for (var i = 0; i < length; ++i) {
if ((i + offset >= dst.length) || (i >= src.length)) break
dst[i + offset] = src[i]
}
return i
}
// ArrayBuffer or Uint8Array objects from other contexts (i.e. iframes) do not pass
// the `instanceof` check but they should be treated as of that type.
// See: https://github.com/feross/buffer/issues/166
function isInstance (obj, type) {
return obj instanceof type ||
(obj != null && obj.constructor != null && obj.constructor.name != null &&
obj.constructor.name === type.name)
}
function numberIsNaN (obj) {
// For IE11 support
return obj !== obj // eslint-disable-line no-self-compare
}
}).call(this)}).call(this,require("buffer").Buffer)
},{"base64-js":2,"buffer":4,"ieee754":11}],5:[function(require,module,exports){
"use strict"
var createThunk = require("./lib/thunk.js")
function Procedure() {
this.argTypes = []
this.shimArgs = []
this.arrayArgs = []
this.arrayBlockIndices = []
this.scalarArgs = []
this.offsetArgs = []
this.offsetArgIndex = []
this.indexArgs = []
this.shapeArgs = []
this.funcName = ""
this.pre = null
this.body = null
this.post = null
this.debug = false
}
function compileCwise(user_args) {
//Create procedure
var proc = new Procedure()
//Parse blocks
proc.pre = user_args.pre
proc.body = user_args.body
proc.post = user_args.post
//Parse arguments
var proc_args = user_args.args.slice(0)
proc.argTypes = proc_args
for(var i=0; i<proc_args.length; ++i) {
var arg_type = proc_args[i]
if(arg_type === "array" || (typeof arg_type === "object" && arg_type.blockIndices)) {
proc.argTypes[i] = "array"
proc.arrayArgs.push(i)
proc.arrayBlockIndices.push(arg_type.blockIndices ? arg_type.blockIndices : 0)
proc.shimArgs.push("array" + i)
if(i < proc.pre.args.length && proc.pre.args[i].count>0) {
throw new Error("cwise: pre() block may not reference array args")
}
if(i < proc.post.args.length && proc.post.args[i].count>0) {
throw new Error("cwise: post() block may not reference array args")
}
} else if(arg_type === "scalar") {
proc.scalarArgs.push(i)
proc.shimArgs.push("scalar" + i)
} else if(arg_type === "index") {
proc.indexArgs.push(i)
if(i < proc.pre.args.length && proc.pre.args[i].count > 0) {
throw new Error("cwise: pre() block may not reference array index")
}
if(i < proc.body.args.length && proc.body.args[i].lvalue) {
throw new Error("cwise: body() block may not write to array index")
}
if(i < proc.post.args.length && proc.post.args[i].count > 0) {
throw new Error("cwise: post() block may not reference array index")
}
} else if(arg_type === "shape") {
proc.shapeArgs.push(i)
if(i < proc.pre.args.length && proc.pre.args[i].lvalue) {
throw new Error("cwise: pre() block may not write to array shape")
}
if(i < proc.body.args.length && proc.body.args[i].lvalue) {
throw new Error("cwise: body() block may not write to array shape")
}
if(i < proc.post.args.length && proc.post.args[i].lvalue) {
throw new Error("cwise: post() block may not write to array shape")
}
} else if(typeof arg_type === "object" && arg_type.offset) {
proc.argTypes[i] = "offset"
proc.offsetArgs.push({ array: arg_type.array, offset:arg_type.offset })
proc.offsetArgIndex.push(i)
} else {
throw new Error("cwise: Unknown argument type " + proc_args[i])
}
}
//Make sure at least one array argument was specified
if(proc.arrayArgs.length <= 0) {
throw new Error("cwise: No array arguments specified")
}
//Make sure arguments are correct
if(proc.pre.args.length > proc_args.length) {
throw new Error("cwise: Too many arguments in pre() block")
}
if(proc.body.args.length > proc_args.length) {
throw new Error("cwise: Too many arguments in body() block")
}
if(proc.post.args.length > proc_args.length) {
throw new Error("cwise: Too many arguments in post() block")
}
//Check debug flag
proc.debug = !!user_args.printCode || !!user_args.debug
//Retrieve name
proc.funcName = user_args.funcName || "cwise"
//Read in block size
proc.blockSize = user_args.blockSize || 64
return createThunk(proc)
}
module.exports = compileCwise
},{"./lib/thunk.js":7}],6:[function(require,module,exports){
"use strict"
var uniq = require("uniq")
// This function generates very simple loops analogous to how you typically traverse arrays (the outermost loop corresponds to the slowest changing index, the innermost loop to the fastest changing index)
// TODO: If two arrays have the same strides (and offsets) there is potential for decreasing the number of "pointers" and related variables. The drawback is that the type signature would become more specific and that there would thus be less potential for caching, but it might still be worth it, especially when dealing with large numbers of arguments.
function innerFill(order, proc, body) {
var dimension = order.length
, nargs = proc.arrayArgs.length
, has_index = proc.indexArgs.length>0
, code = []
, vars = []
, idx=0, pidx=0, i, j
for(i=0; i<dimension; ++i) { // Iteration variables
vars.push(["i",i,"=0"].join(""))
}
//Compute scan deltas
for(j=0; j<nargs; ++j) {
for(i=0; i<dimension; ++i) {
pidx = idx
idx = order[i]
if(i === 0) { // The innermost/fastest dimension's delta is simply its stride
vars.push(["d",j,"s",i,"=t",j,"p",idx].join(""))
} else { // For other dimensions the delta is basically the stride minus something which essentially "rewinds" the previous (more inner) dimension
vars.push(["d",j,"s",i,"=(t",j,"p",idx,"-s",pidx,"*t",j,"p",pidx,")"].join(""))
}
}
}
if (vars.length > 0) {
code.push("var " + vars.join(","))
}
//Scan loop
for(i=dimension-1; i>=0; --i) { // Start at largest stride and work your way inwards
idx = order[i]
code.push(["for(i",i,"=0;i",i,"<s",idx,";++i",i,"){"].join(""))
}
//Push body of inner loop
code.push(body)
//Advance scan pointers
for(i=0; i<dimension; ++i) {
pidx = idx
idx = order[i]
for(j=0; j<nargs; ++j) {
code.push(["p",j,"+=d",j,"s",i].join(""))
}
if(has_index) {
if(i > 0) {
code.push(["index[",pidx,"]-=s",pidx].join(""))
}
code.push(["++index[",idx,"]"].join(""))
}
code.push("}")
}
return code.join("\n")
}
// Generate "outer" loops that loop over blocks of data, applying "inner" loops to the blocks by manipulating the local variables in such a way that the inner loop only "sees" the current block.
// TODO: If this is used, then the previous declaration (done by generateCwiseOp) of s* is essentially unnecessary.
// I believe the s* are not used elsewhere (in particular, I don't think they're used in the pre/post parts and "shape" is defined independently), so it would be possible to make defining the s* dependent on what loop method is being used.
function outerFill(matched, order, proc, body) {
var dimension = order.length
, nargs = proc.arrayArgs.length
, blockSize = proc.blockSize
, has_index = proc.indexArgs.length > 0
, code = []
for(var i=0; i<nargs; ++i) {
code.push(["var offset",i,"=p",i].join(""))
}
//Generate loops for unmatched dimensions
// The order in which these dimensions are traversed is fairly arbitrary (from small stride to large stride, for the first argument)
// TODO: It would be nice if the order in which these loops are placed would also be somehow "optimal" (at the very least we should check that it really doesn't hurt us if they're not).
for(var i=matched; i<dimension; ++i) {
code.push(["for(var j"+i+"=SS[", order[i], "]|0;j", i, ">0;){"].join("")) // Iterate back to front
code.push(["if(j",i,"<",blockSize,"){"].join("")) // Either decrease j by blockSize (s = blockSize), or set it to zero (after setting s = j).
code.push(["s",order[i],"=j",i].join(""))
code.push(["j",i,"=0"].join(""))
code.push(["}else{s",order[i],"=",blockSize].join(""))
code.push(["j",i,"-=",blockSize,"}"].join(""))
if(has_index) {
code.push(["index[",order[i],"]=j",i].join(""))
}
}
for(var i=0; i<nargs; ++i) {
var indexStr = ["offset"+i]
for(var j=matched; j<dimension; ++j) {
indexStr.push(["j",j,"*t",i,"p",order[j]].join(""))
}
code.push(["p",i,"=(",indexStr.join("+"),")"].join(""))
}
code.push(innerFill(order, proc, body))
for(var i=matched; i<dimension; ++i) {
code.push("}")
}
return code.join("\n")
}
//Count the number of compatible inner orders
// This is the length of the longest common prefix of the arrays in orders.
// Each array in orders lists the dimensions of the correspond ndarray in order of increasing stride.
// This is thus the maximum number of dimensions that can be efficiently traversed by simple nested loops for all arrays.
function countMatches(orders) {
var matched = 0, dimension = orders[0].length
while(matched < dimension) {
for(var j=1; j<orders.length; ++j) {
if(orders[j][matched] !== orders[0][matched]) {
return matched
}
}
++matched
}
return matched
}
//Processes a block according to the given data types
// Replaces variable names by different ones, either "local" ones (that are then ferried in and out of the given array) or ones matching the arguments that the function performing the ultimate loop will accept.
function processBlock(block, proc, dtypes) {
var code = block.body
var pre = []
var post = []
for(var i=0; i<block.args.length; ++i) {
var carg = block.args[i]
if(carg.count <= 0) {
continue
}
var re = new RegExp(carg.name, "g")
var ptrStr = ""
var arrNum = proc.arrayArgs.indexOf(i)
switch(proc.argTypes[i]) {
case "offset":
var offArgIndex = proc.offsetArgIndex.indexOf(i)
var offArg = proc.offsetArgs[offArgIndex]
arrNum = offArg.array
ptrStr = "+q" + offArgIndex // Adds offset to the "pointer" in the array
case "array":
ptrStr = "p" + arrNum + ptrStr
var localStr = "l" + i
var arrStr = "a" + arrNum
if (proc.arrayBlockIndices[arrNum] === 0) { // Argument to body is just a single value from this array
if(carg.count === 1) { // Argument/array used only once(?)
if(dtypes[arrNum] === "generic") {
if(carg.lvalue) {
pre.push(["var ", localStr, "=", arrStr, ".get(", ptrStr, ")"].join("")) // Is this necessary if the argument is ONLY used as an lvalue? (keep in mind that we can have a += something, so we would actually need to check carg.rvalue)
code = code.replace(re, localStr)
post.push([arrStr, ".set(", ptrStr, ",", localStr,")"].join(""))
} else {
code = code.replace(re, [arrStr, ".get(", ptrStr, ")"].join(""))
}
} else {
code = code.replace(re, [arrStr, "[", ptrStr, "]"].join(""))
}
} else if(dtypes[arrNum] === "generic") {
pre.push(["var ", localStr, "=", arrStr, ".get(", ptrStr, ")"].join("")) // TODO: Could we optimize by checking for carg.rvalue?
code = code.replace(re, localStr)
if(carg.lvalue) {
post.push([arrStr, ".set(", ptrStr, ",", localStr,")"].join(""))
}
} else {
pre.push(["var ", localStr, "=", arrStr, "[", ptrStr, "]"].join("")) // TODO: Could we optimize by checking for carg.rvalue?
code = code.replace(re, localStr)
if(carg.lvalue) {
post.push([arrStr, "[", ptrStr, "]=", localStr].join(""))
}
}
} else { // Argument to body is a "block"
var reStrArr = [carg.name], ptrStrArr = [ptrStr]
for(var j=0; j<Math.abs(proc.arrayBlockIndices[arrNum]); j++) {
reStrArr.push("\\s*\\[([^\\]]+)\\]")
ptrStrArr.push("$" + (j+1) + "*t" + arrNum + "b" + j) // Matched index times stride
}
re = new RegExp(reStrArr.join(""), "g")
ptrStr = ptrStrArr.join("+")
if(dtypes[arrNum] === "generic") {
/*if(carg.lvalue) {
pre.push(["var ", localStr, "=", arrStr, ".get(", ptrStr, ")"].join("")) // Is this necessary if the argument is ONLY used as an lvalue? (keep in mind that we can have a += something, so we would actually need to check carg.rvalue)
code = code.replace(re, localStr)
post.push([arrStr, ".set(", ptrStr, ",", localStr,")"].join(""))
} else {
code = code.replace(re, [arrStr, ".get(", ptrStr, ")"].join(""))
}*/
throw new Error("cwise: Generic arrays not supported in combination with blocks!")
} else {
// This does not produce any local variables, even if variables are used multiple times. It would be possible to do so, but it would complicate things quite a bit.
code = code.replace(re, [arrStr, "[", ptrStr, "]"].join(""))
}
}
break
case "scalar":
code = code.replace(re, "Y" + proc.scalarArgs.indexOf(i))
break
case "index":
code = code.replace(re, "index")
break
case "shape":
code = code.replace(re, "shape")
break
}
}
return [pre.join("\n"), code, post.join("\n")].join("\n").trim()
}
function typeSummary(dtypes) {
var summary = new Array(dtypes.length)
var allEqual = true
for(var i=0; i<dtypes.length; ++i) {
var t = dtypes[i]
var digits = t.match(/\d+/)
if(!digits) {
digits = ""
} else {
digits = digits[0]
}
if(t.charAt(0) === 0) {
summary[i] = "u" + t.charAt(1) + digits
} else {
summary[i] = t.charAt(0) + digits
}
if(i > 0) {
allEqual = allEqual && summary[i] === summary[i-1]
}
}
if(allEqual) {
return summary[0]
}
return summary.join("")
}
//Generates a cwise operator
function generateCWiseOp(proc, typesig) {
//Compute dimension
// Arrays get put first in typesig, and there are two entries per array (dtype and order), so this gets the number of dimensions in the first array arg.
var dimension = (typesig[1].length - Math.abs(proc.arrayBlockIndices[0]))|0
var orders = new Array(proc.arrayArgs.length)
var dtypes = new Array(proc.arrayArgs.length)
for(var i=0; i<proc.arrayArgs.length; ++i) {
dtypes[i] = typesig[2*i]
orders[i] = typesig[2*i+1]
}
//Determine where block and loop indices start and end
var blockBegin = [], blockEnd = [] // These indices are exposed as blocks
var loopBegin = [], loopEnd = [] // These indices are iterated over
var loopOrders = [] // orders restricted to the loop indices
for(var i=0; i<proc.arrayArgs.length; ++i) {
if (proc.arrayBlockIndices[i]<0) {
loopBegin.push(0)
loopEnd.push(dimension)
blockBegin.push(dimension)
blockEnd.push(dimension+proc.arrayBlockIndices[i])
} else {
loopBegin.push(proc.arrayBlockIndices[i]) // Non-negative
loopEnd.push(proc.arrayBlockIndices[i]+dimension)
blockBegin.push(0)
blockEnd.push(proc.arrayBlockIndices[i])
}
var newOrder = []
for(var j=0; j<orders[i].length; j++) {
if (loopBegin[i]<=orders[i][j] && orders[i][j]<loopEnd[i]) {
newOrder.push(orders[i][j]-loopBegin[i]) // If this is a loop index, put it in newOrder, subtracting loopBegin, to make sure that all loopOrders are using a common set of indices.
}
}
loopOrders.push(newOrder)
}
//First create arguments for procedure
var arglist = ["SS"] // SS is the overall shape over which we iterate
var code = ["'use strict'"]
var vars = []
for(var j=0; j<dimension; ++j) {
vars.push(["s", j, "=SS[", j, "]"].join("")) // The limits for each dimension.
}
for(var i=0; i<proc.arrayArgs.length; ++i) {
arglist.push("a"+i) // Actual data array
arglist.push("t"+i) // Strides
arglist.push("p"+i) // Offset in the array at which the data starts (also used for iterating over the data)
for(var j=0; j<dimension; ++j) { // Unpack the strides into vars for looping
vars.push(["t",i,"p",j,"=t",i,"[",loopBegin[i]+j,"]"].join(""))
}
for(var j=0; j<Math.abs(proc.arrayBlockIndices[i]); ++j) { // Unpack the strides into vars for block iteration
vars.push(["t",i,"b",j,"=t",i,"[",blockBegin[i]+j,"]"].join(""))
}
}
for(var i=0; i<proc.scalarArgs.length; ++i) {
arglist.push("Y" + i)
}
if(proc.shapeArgs.length > 0) {
vars.push("shape=SS.slice(0)") // Makes the shape over which we iterate available to the user defined functions (so you can use width/height for example)
}
if(proc.indexArgs.length > 0) {
// Prepare an array to keep track of the (logical) indices, initialized to dimension zeroes.
var zeros = new Array(dimension)
for(var i=0; i<dimension; ++i) {
zeros[i] = "0"
}
vars.push(["index=[", zeros.join(","), "]"].join(""))
}
for(var i=0; i<proc.offsetArgs.length; ++i) { // Offset arguments used for stencil operations
var off_arg = proc.offsetArgs[i]
var init_string = []
for(var j=0; j<off_arg.offset.length; ++j) {
if(off_arg.offset[j] === 0) {
continue
} else if(off_arg.offset[j] === 1) {
init_string.push(["t", off_arg.array, "p", j].join(""))
} else {
init_string.push([off_arg.offset[j], "*t", off_arg.array, "p", j].join(""))
}
}
if(init_string.length === 0) {
vars.push("q" + i + "=0")
} else {
vars.push(["q", i, "=", init_string.join("+")].join(""))
}
}
//Prepare this variables
var thisVars = uniq([].concat(proc.pre.thisVars)
.concat(proc.body.thisVars)
.concat(proc.post.thisVars))
vars = vars.concat(thisVars)
if (vars.length > 0) {
code.push("var " + vars.join(","))
}
for(var i=0; i<proc.arrayArgs.length; ++i) {
code.push("p"+i+"|=0")
}
//Inline prelude
if(proc.pre.body.length > 3) {
code.push(processBlock(proc.pre, proc, dtypes))
}
//Process body
var body = processBlock(proc.body, proc, dtypes)
var matched = countMatches(loopOrders)
if(matched < dimension) {
code.push(outerFill(matched, loopOrders[0], proc, body)) // TODO: Rather than passing loopOrders[0], it might be interesting to look at passing an order that represents the majority of the arguments for example.
} else {
code.push(innerFill(loopOrders[0], proc, body))
}
//Inline epilog
if(proc.post.body.length > 3) {
code.push(processBlock(proc.post, proc, dtypes))
}
if(proc.debug) {
console.log("-----Generated cwise routine for ", typesig, ":\n" + code.join("\n") + "\n----------")
}
var loopName = [(proc.funcName||"unnamed"), "_cwise_loop_", orders[0].join("s"),"m",matched,typeSummary(dtypes)].join("")
var f = new Function(["function ",loopName,"(", arglist.join(","),"){", code.join("\n"),"} return ", loopName].join(""))
return f()
}
module.exports = generateCWiseOp
},{"uniq":39}],7:[function(require,module,exports){
"use strict"
// The function below is called when constructing a cwise function object, and does the following:
// A function object is constructed which accepts as argument a compilation function and returns another function.
// It is this other function that is eventually returned by createThunk, and this function is the one that actually
// checks whether a certain pattern of arguments has already been used before and compiles new loops as needed.
// The compilation passed to the first function object is used for compiling new functions.
// Once this function object is created, it is called with compile as argument, where the first argument of compile
// is bound to "proc" (essentially containing a preprocessed version of the user arguments to cwise).
// So createThunk roughly works like this:
// function createThunk(proc) {
// var thunk = function(compileBound) {
// var CACHED = {}
// return function(arrays and scalars) {
// if (dtype and order of arrays in CACHED) {
// var func = CACHED[dtype and order of arrays]
// } else {
// var func = CACHED[dtype and order of arrays] = compileBound(dtype and order of arrays)
// }
// return func(arrays and scalars)
// }
// }
// return thunk(compile.bind1(proc))
// }
var compile = require("./compile.js")
function createThunk(proc) {
var code = ["'use strict'", "var CACHED={}"]
var vars = []
var thunkName = proc.funcName + "_cwise_thunk"
//Build thunk
code.push(["return function ", thunkName, "(", proc.shimArgs.join(","), "){"].join(""))
var typesig = []
var string_typesig = []
var proc_args = [["array",proc.arrayArgs[0],".shape.slice(", // Slice shape so that we only retain the shape over which we iterate (which gets passed to the cwise operator as SS).
Math.max(0,proc.arrayBlockIndices[0]),proc.arrayBlockIndices[0]<0?(","+proc.arrayBlockIndices[0]+")"):")"].join("")]
var shapeLengthConditions = [], shapeConditions = []
// Process array arguments
for(var i=0; i<proc.arrayArgs.length; ++i) {
var j = proc.arrayArgs[i]
vars.push(["t", j, "=array", j, ".dtype,",
"r", j, "=array", j, ".order"].join(""))
typesig.push("t" + j)
typesig.push("r" + j)
string_typesig.push("t"+j)
string_typesig.push("r"+j+".join()")
proc_args.push("array" + j + ".data")
proc_args.push("array" + j + ".stride")
proc_args.push("array" + j + ".offset|0")
if (i>0) { // Gather conditions to check for shape equality (ignoring block indices)
shapeLengthConditions.push("array" + proc.arrayArgs[0] + ".shape.length===array" + j + ".shape.length+" + (Math.abs(proc.arrayBlockIndices[0])-Math.abs(proc.arrayBlockIndices[i])))
shapeConditions.push("array" + proc.arrayArgs[0] + ".shape[shapeIndex+" + Math.max(0,proc.arrayBlockIndices[0]) + "]===array" + j + ".shape[shapeIndex+" + Math.max(0,proc.arrayBlockIndices[i]) + "]")
}
}
// Check for shape equality
if (proc.arrayArgs.length > 1) {
code.push("if (!(" + shapeLengthConditions.join(" && ") + ")) throw new Error('cwise: Arrays do not all have the same dimensionality!')")
code.push("for(var shapeIndex=array" + proc.arrayArgs[0] + ".shape.length-" + Math.abs(proc.arrayBlockIndices[0]) + "; shapeIndex-->0;) {")
code.push("if (!(" + shapeConditions.join(" && ") + ")) throw new Error('cwise: Arrays do not all have the same shape!')")
code.push("}")
}
// Process scalar arguments
for(var i=0; i<proc.scalarArgs.length; ++i) {
proc_args.push("scalar" + proc.scalarArgs[i])
}
// Check for cached function (and if not present, generate it)
vars.push(["type=[", string_typesig.join(","), "].join()"].join(""))
vars.push("proc=CACHED[type]")
code.push("var " + vars.join(","))
code.push(["if(!proc){",
"CACHED[type]=proc=compile([", typesig.join(","), "])}",
"return proc(", proc_args.join(","), ")}"].join(""))
if(proc.debug) {
console.log("-----Generated thunk:\n" + code.join("\n") + "\n----------")
}
//Compile thunk
var thunk = new Function("compile", code.join("\n"))
return thunk(compile.bind(undefined, proc))
}
module.exports = createThunk
},{"./compile.js":6}],8:[function(require,module,exports){
(function (Buffer){(function (){
/**
* Module exports.
*/
module.exports = dataUriToBuffer;
/**
* Returns a `Buffer` instance from the given data URI `uri`.
*
* @param {String} uri Data URI to turn into a Buffer instance
* @return {Buffer} Buffer instance from Data URI
* @api public
*/
function dataUriToBuffer (uri) {
if (!/^data\:/i.test(uri)) {
throw new TypeError('`uri` does not appear to be a Data URI (must begin with "data:")');
}
// strip newlines
uri = uri.replace(/\r?\n/g, '');
// split the URI up into the "metadata" and the "data" portions
var firstComma = uri.indexOf(',');
if (-1 === firstComma || firstComma <= 4) throw new TypeError('malformed data: URI');
// remove the "data:" scheme and parse the metadata
var meta = uri.substring(5, firstComma).split(';');
var base64 = false;
var charset = 'US-ASCII';
for (var i = 0; i < meta.length; i++) {
if ('base64' == meta[i]) {
base64 = true;
} else if (0 == meta[i].indexOf('charset=')) {
charset = meta[i].substring(8);
}
}
// get the encoded data portion and decode URI-encoded chars
var data = unescape(uri.substring(firstComma + 1));
var encoding = base64 ? 'base64' : 'ascii';
var buffer = new Buffer(data, encoding);
// set `.type` property to MIME type
buffer.type = meta[0] || 'text/plain';
// set the `.charset` property
buffer.charset = charset;
return buffer;
}
}).call(this)}).call(this,require("buffer").Buffer)
},{"buffer":4}],9:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
var R = typeof Reflect === 'object' ? Reflect : null
var ReflectApply = R && typeof R.apply === 'function'
? R.apply
: function ReflectApply(target, receiver, args) {
return Function.prototype.apply.call(target, receiver, args);
}
var ReflectOwnKeys
if (R && typeof R.ownKeys === 'function') {
ReflectOwnKeys = R.ownKeys
} else if (Object.getOwnPropertySymbols) {
ReflectOwnKeys = function ReflectOwnKeys(target) {
return Object.getOwnPropertyNames(target)
.concat(Object.getOwnPropertySymbols(target));
};
} else {
ReflectOwnKeys = function ReflectOwnKeys(target) {
return Object.getOwnPropertyNames(target);
};
}
function ProcessEmitWarning(warning) {
if (console && console.warn) console.warn(warning);
}
var NumberIsNaN = Number.isNaN || function NumberIsNaN(value) {
return value !== value;
}
function EventEmitter() {
EventEmitter.init.call(this);
}
module.exports = EventEmitter;
module.exports.once = once;
// Backwards-compat with node 0.10.x
EventEmitter.EventEmitter = EventEmitter;
EventEmitter.prototype._events = undefined;
EventEmitter.prototype._eventsCount = 0;
EventEmitter.prototype._maxListeners = undefined;
// By default EventEmitters will print a warning if more than 10 listeners are
// added to it. This is a useful default which helps finding memory leaks.
var defaultMaxListeners = 10;
function checkListener(listener) {
if (typeof listener !== 'function') {
throw new TypeError('The "listener" argument must be of type Function. Received type ' + typeof listener);
}
}
Object.defineProperty(EventEmitter, 'defaultMaxListeners', {
enumerable: true,
get: function() {
return defaultMaxListeners;
},
set: function(arg) {
if (typeof arg !== 'number' || arg < 0 || NumberIsNaN(arg)) {
throw new RangeError('The value of "defaultMaxListeners" is out of range. It must be a non-negative number. Received ' + arg + '.');
}
defaultMaxListeners = arg;
}
});
EventEmitter.init = function() {
if (this._events === undefined ||
this._events === Object.getPrototypeOf(this)._events) {
this._events = Object.create(null);
this._eventsCount = 0;
}
this._maxListeners = this._maxListeners || undefined;
};
// Obviously not all Emitters should be limited to 10. This function allows
// that to be increased. Set to zero for unlimited.
EventEmitter.prototype.setMaxListeners = function setMaxListeners(n) {
if (typeof n !== 'number' || n < 0 || NumberIsNaN(n)) {
throw new RangeError('The value of "n" is out of range. It must be a non-negative number. Received ' + n + '.');
}
this._maxListeners = n;
return this;
};
function _getMaxListeners(that) {
if (that._maxListeners === undefined)
return EventEmitter.defaultMaxListeners;
return that._maxListeners;
}
EventEmitter.prototype.getMaxListeners = function getMaxListeners() {
return _getMaxListeners(this);
};
EventEmitter.prototype.emit = function emit(type) {
var args = [];
for (var i = 1; i < arguments.length; i++) args.push(arguments[i]);
var doError = (type === 'error');
var events = this._events;
if (events !== undefined)
doError = (doError && events.error === undefined);
else if (!doError)
return false;
// If there is no 'error' event listener then throw.
if (doError) {
var er;
if (args.length > 0)
er = args[0];
if (er instanceof Error) {
// Note: The comments on the `throw` lines are intentional, they show
// up in Node's output if this results in an unhandled exception.
throw er; // Unhandled 'error' event
}
// At least give some kind of context to the user
var err = new Error('Unhandled error.' + (er ? ' (' + er.message + ')' : ''));
err.context = er;
throw err; // Unhandled 'error' event
}
var handler = events[type];
if (handler === undefined)
return false;
if (typeof handler === 'function') {
ReflectApply(handler, this, args);
} else {
var len = handler.length;
var listeners = arrayClone(handler, len);
for (var i = 0; i < len; ++i)
ReflectApply(listeners[i], this, args);
}
return true;
};
function _addListener(target, type, listener, prepend) {
var m;
var events;
var existing;
checkListener(listener);
events = target._events;
if (events === undefined) {
events = target._events = Object.create(null);
target._eventsCount = 0;
} else {
// To avoid recursion in the case that type === "newListener"! Before
// adding it to the listeners, first emit "newListener".
if (events.newListener !== undefined) {
target.emit('newListener', type,
listener.listener ? listener.listener : listener);
// Re-assign `events` because a newListener handler could have caused the
// this._events to be assigned to a new object
events = target._events;
}
existing = events[type];
}
if (existing === undefined) {
// Optimize the case of one listener. Don't need the extra array object.
existing = events[type] = listener;
++target._eventsCount;
} else {
if (typeof existing === 'function') {
// Adding the second element, need to change to array.
existing = events[type] =
prepend ? [listener, existing] : [existing, listener];
// If we've already got an array, just append.
} else if (prepend) {
existing.unshift(listener);
} else {
existing.push(listener);
}
// Check for listener leak
m = _getMaxListeners(target);
if (m > 0 && existing.length > m && !existing.warned) {
existing.warned = true;
// No error code for this since it is a Warning
// eslint-disable-next-line no-restricted-syntax
var w = new Error('Possible EventEmitter memory leak detected. ' +
existing.length + ' ' + String(type) + ' listeners ' +
'added. Use emitter.setMaxListeners() to ' +
'increase limit');
w.name = 'MaxListenersExceededWarning';
w.emitter = target;
w.type = type;
w.count = existing.length;
ProcessEmitWarning(w);
}
}
return target;
}
EventEmitter.prototype.addListener = function addListener(type, listener) {
return _addListener(this, type, listener, false);
};
EventEmitter.prototype.on = EventEmitter.prototype.addListener;
EventEmitter.prototype.prependListener =
function prependListener(type, listener) {
return _addListener(this, type, listener, true);
};
function onceWrapper() {
if (!this.fired) {
this.target.removeListener(this.type, this.wrapFn);
this.fired = true;
if (arguments.length === 0)
return this.listener.call(this.target);
return this.listener.apply(this.target, arguments);
}
}
function _onceWrap(target, type, listener) {
var state = { fired: false, wrapFn: undefined, target: target, type: type, listener: listener };
var wrapped = onceWrapper.bind(state);
wrapped.listener = listener;
state.wrapFn = wrapped;
return wrapped;
}
EventEmitter.prototype.once = function once(type, listener) {
checkListener(listener);
this.on(type, _onceWrap(this, type, listener));
return this;
};
EventEmitter.prototype.prependOnceListener =
function prependOnceListener(type, listener) {
checkListener(listener);
this.prependListener(type, _onceWrap(this, type, listener));
return this;
};
// Emits a 'removeListener' event if and only if the listener was removed.
EventEmitter.prototype.removeListener =
function removeListener(type, listener) {
var list, events, position, i, originalListener;
checkListener(listener);
events = this._events;
if (events === undefined)
return this;
list = events[type];
if (list === undefined)
return this;
if (list === listener || list.listener === listener) {
if (--this._eventsCount === 0)
this._events = Object.create(null);
else {
delete events[type];
if (events.removeListener)
this.emit('removeListener', type, list.listener || listener);
}
} else if (typeof list !== 'function') {
position = -1;
for (i = list.length - 1; i >= 0; i--) {
if (list[i] === listener || list[i].listener === listener) {
originalListener = list[i].listener;
position = i;
break;
}
}
if (position < 0)
return this;
if (position === 0)
list.shift();
else {
spliceOne(list, position);
}
if (list.length === 1)
events[type] = list[0];
if (events.removeListener !== undefined)
this.emit('removeListener', type, originalListener || listener);
}
return this;
};
EventEmitter.prototype.off = EventEmitter.prototype.removeListener;
EventEmitter.prototype.removeAllListeners =
function removeAllListeners(type) {
var listeners, events, i;
events = this._events;
if (events === undefined)
return this;
// not listening for removeListener, no need to emit
if (events.removeListener === undefined) {
if (arguments.length === 0) {
this._events = Object.create(null);
this._eventsCount = 0;
} else if (events[type] !== undefined) {
if (--this._eventsCount === 0)
this._events = Object.create(null);
else
delete events[type];
}
return this;
}
// emit removeListener for all listeners on all events
if (arguments.length === 0) {
var keys = Object.keys(events);
var key;
for (i = 0; i < keys.length; ++i) {
key = keys[i];
if (key === 'removeListener') continue;
this.removeAllListeners(key);
}
this.removeAllListeners('removeListener');
this._events = Object.create(null);
this._eventsCount = 0;
return this;
}
listeners = events[type];
if (typeof listeners === 'function') {
this.removeListener(type, listeners);
} else if (listeners !== undefined) {
// LIFO order
for (i = listeners.length - 1; i >= 0; i--) {
this.removeListener(type, listeners[i]);
}
}
return this;
};
function _listeners(target, type, unwrap) {
var events = target._events;
if (events === undefined)
return [];
var evlistener = events[type];
if (evlistener === undefined)
return [];
if (typeof evlistener === 'function')
return unwrap ? [evlistener.listener || evlistener] : [evlistener];
return unwrap ?
unwrapListeners(evlistener) : arrayClone(evlistener, evlistener.length);
}
EventEmitter.prototype.listeners = function listeners(type) {
return _listeners(this, type, true);
};
EventEmitter.prototype.rawListeners = function rawListeners(type) {
return _listeners(this, type, false);
};
EventEmitter.listenerCount = function(emitter, type) {
if (typeof emitter.listenerCount === 'function') {
return emitter.listenerCount(type);
} else {
return listenerCount.call(emitter, type);
}
};
EventEmitter.prototype.listenerCount = listenerCount;
function listenerCount(type) {
var events = this._events;
if (events !== undefined) {
var evlistener = events[type];
if (typeof evlistener === 'function') {
return 1;
} else if (evlistener !== undefined) {
return evlistener.length;
}
}
return 0;
}
EventEmitter.prototype.eventNames = function eventNames() {
return this._eventsCount > 0 ? ReflectOwnKeys(this._events) : [];
};
function arrayClone(arr, n) {
var copy = new Array(n);
for (var i = 0; i < n; ++i)
copy[i] = arr[i];
return copy;
}
function spliceOne(list, index) {
for (; index + 1 < list.length; index++)
list[index] = list[index + 1];
list.pop();
}
function unwrapListeners(arr) {
var ret = new Array(arr.length);
for (var i = 0; i < ret.length; ++i) {
ret[i] = arr[i].listener || arr[i];
}
return ret;
}
function once(emitter, name) {
return new Promise(function (resolve, reject) {
function eventListener() {
if (errorListener !== undefined) {
emitter.removeListener('error', errorListener);
}
resolve([].slice.call(arguments));
};
var errorListener;
// Adding an error listener is not optional because
// if an error is thrown on an event emitter we cannot
// guarantee that the actual event we are waiting will
// be fired. The result could be a silent way to create
// memory or file descriptor leaks, which is something
// we should avoid.
if (name !== 'error') {
errorListener = function errorListener(err) {
emitter.removeListener(name, eventListener);
reject(err);
};
emitter.once('error', errorListener);
}
emitter.once(name, eventListener);
});
}
},{}],10:[function(require,module,exports){
(function (Buffer,process){(function (){
'use strict'
var path = require('path')
var ndarray = require('ndarray')
var GifReader = require('omggif').GifReader
var pack = require('ndarray-pack')
var through = require('through')
var parseDataURI = require('data-uri-to-buffer')
function defaultImage(url, cb) {
var img = new Image()
img.crossOrigin = "Anonymous"
img.onload = function() {
var canvas = document.createElement('canvas')
canvas.width = img.width
canvas.height = img.height
var context = canvas.getContext('2d')
context.drawImage(img, 0, 0)
var pixels = context.getImageData(0, 0, img.width, img.height)
cb(null, ndarray(new Uint8Array(pixels.data), [img.width, img.height, 4], [4, 4*img.width, 1], 0))
}
img.onerror = function(err) {
cb(err)
}
img.src = url
}
//Animated gif loading
function handleGif(data, cb) {
var reader
try {
reader = new GifReader(data)
} catch(err) {
cb(err)
return
}
if(reader.numFrames() > 0) {
var nshape = [reader.numFrames(), reader.height, reader.width, 4]
var ndata = new Uint8Array(nshape[0] * nshape[1] * nshape[2] * nshape[3])
var result = ndarray(ndata, nshape)
try {
for(var i=0; i<reader.numFrames(); ++i) {
reader.decodeAndBlitFrameRGBA(i, ndata.subarray(
result.index(i, 0, 0, 0),
result.index(i+1, 0, 0, 0)))
}
} catch(err) {
cb(err)
return
}
cb(null, result.transpose(0,2,1))
} else {
var nshape = [reader.height, reader.width, 4]
var ndata = new Uint8Array(nshape[0] * nshape[1] * nshape[2])
var result = ndarray(ndata, nshape)
try {
reader.decodeAndBlitFrameRGBA(0, ndata)
} catch(err) {
cb(err)
return
}
cb(null, result.transpose(1,0))
}
}
function httpGif(url, cb) {
var xhr = new XMLHttpRequest()
xhr.open('GET', url, true)
xhr.responseType = 'arraybuffer'
if(xhr.overrideMimeType){
xhr.overrideMimeType('application/binary')
}
xhr.onerror = function(err) {
cb(err)
}
xhr.onload = function() {
if(xhr.readyState !== 4) {
return
}
var data = new Uint8Array(xhr.response)
handleGif(data, cb)
return
}
xhr.send()
}
function copyBuffer(buffer) {
if(buffer[0] === undefined) {
var n = buffer.length
var result = new Uint8Array(n)
for(var i=0; i<n; ++i) {
result[i] = buffer.get(i)
}
return result
} else {
return new Uint8Array(buffer)
}
}
function dataGif(url, cb) {
process.nextTick(function() {
try {
var buffer = parseDataURI(url)
if(buffer) {
handleGif(copyBuffer(buffer), cb)
} else {
cb(new Error('Error parsing data URI'))
}
} catch(err) {
cb(err)
}
})
}
module.exports = function getPixels(url, type, cb) {
if(!cb) {
cb = type
type = ''
}
var ext = path.extname(url)
switch(type || ext.toUpperCase()) {
case '.GIF':
httpGif(url, cb)
break
default:
if(Buffer.isBuffer(url)) {
url = 'data:' + type + ';base64,' + url.toString('base64')
}
if(url.indexOf('data:image/gif;') === 0) {
dataGif(url, cb)
} else {
defaultImage(url, cb)
}
}
}
}).call(this)}).call(this,{"isBuffer":require("../is-buffer/index.js")},require('_process'))
},{"../is-buffer/index.js":14,"_process":20,"data-uri-to-buffer":8,"ndarray":17,"ndarray-pack":15,"omggif":18,"path":19,"through":38}],11:[function(require,module,exports){
/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh <https://feross.org/opensource> */
exports.read = function (buffer, offset, isLE, mLen, nBytes) {
var e, m
var eLen = (nBytes * 8) - mLen - 1
var eMax = (1 << eLen) - 1
var eBias = eMax >> 1
var nBits = -7
var i = isLE ? (nBytes - 1) : 0
var d = isLE ? -1 : 1
var s = buffer[offset + i]
i += d
e = s & ((1 << (-nBits)) - 1)
s >>= (-nBits)
nBits += eLen
for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {}
m = e & ((1 << (-nBits)) - 1)
e >>= (-nBits)
nBits += mLen
for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {}
if (e === 0) {
e = 1 - eBias
} else if (e === eMax) {
return m ? NaN : ((s ? -1 : 1) * Infinity)
} else {
m = m + Math.pow(2, mLen)
e = e - eBias
}
return (s ? -1 : 1) * m * Math.pow(2, e - mLen)
}
exports.write = function (buffer, value, offset, isLE, mLen, nBytes) {
var e, m, c
var eLen = (nBytes * 8) - mLen - 1
var eMax = (1 << eLen) - 1
var eBias = eMax >> 1
var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0)
var i = isLE ? 0 : (nBytes - 1)
var d = isLE ? 1 : -1
var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0
value = Math.abs(value)
if (isNaN(value) || value === Infinity) {
m = isNaN(value) ? 1 : 0
e = eMax
} else {
e = Math.floor(Math.log(value) / Math.LN2)
if (value * (c = Math.pow(2, -e)) < 1) {
e--
c *= 2
}
if (e + eBias >= 1) {
value += rt / c
} else {
value += rt * Math.pow(2, 1 - eBias)
}
if (value * c >= 2) {
e++
c /= 2
}
if (e + eBias >= eMax) {
m = 0
e = eMax
} else if (e + eBias >= 1) {
m = ((value * c) - 1) * Math.pow(2, mLen)
e = e + eBias
} else {
m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen)
e = 0
}
}
for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}
e = (e << mLen) | m
eLen += mLen
for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}
buffer[offset + i - d] |= s * 128
}
},{}],12:[function(require,module,exports){
if (typeof Object.create === 'function') {
// implementation from standard node.js 'util' module
module.exports = function inherits(ctor, superCtor) {
if (superCtor) {
ctor.super_ = superCtor
ctor.prototype = Object.create(superCtor.prototype, {
constructor: {
value: ctor,
enumerable: false,
writable: true,
configurable: true
}
})
}
};
} else {
// old school shim for old browsers
module.exports = function inherits(ctor, superCtor) {
if (superCtor) {
ctor.super_ = superCtor
var TempCtor = function () {}
TempCtor.prototype = superCtor.prototype
ctor.prototype = new TempCtor()
ctor.prototype.constructor = ctor
}
}
}
},{}],13:[function(require,module,exports){
"use strict"
function iota(n) {
var result = new Array(n)
for(var i=0; i<n; ++i) {
result[i] = i
}
return result
}
module.exports = iota
},{}],14:[function(require,module,exports){
/*!
* Determine if an object is a Buffer
*
* @author Feross Aboukhadijeh <https://feross.org>
* @license MIT
*/
// The _isBuffer check is for Safari 5-7 support, because it's missing
// Object.prototype.constructor. Remove this eventually
module.exports = function (obj) {
return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer)
}
function isBuffer (obj) {
return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj)
}
// For Node v0.10 support. Remove this eventually.
function isSlowBuffer (obj) {
return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0))
}
},{}],15:[function(require,module,exports){
"use strict"
var ndarray = require("ndarray")
var do_convert = require("./doConvert.js")
module.exports = function convert(arr, result) {
var shape = [], c = arr, sz = 1
while(Array.isArray(c)) {
shape.push(c.length)
sz *= c.length
c = c[0]
}
if(shape.length === 0) {
return ndarray()
}
if(!result) {
result = ndarray(new Float64Array(sz), shape)
}
do_convert(result, arr)
return result
}
},{"./doConvert.js":16,"ndarray":17}],16:[function(require,module,exports){
module.exports=require('cwise-compiler')({"args":["array","scalar","index"],"pre":{"body":"{}","args":[],"thisVars":[],"localVars":[]},"body":{"body":"{\nvar _inline_1_v=_inline_1_arg1_,_inline_1_i\nfor(_inline_1_i=0;_inline_1_i<_inline_1_arg2_.length-1;++_inline_1_i) {\n_inline_1_v=_inline_1_v[_inline_1_arg2_[_inline_1_i]]\n}\n_inline_1_arg0_=_inline_1_v[_inline_1_arg2_[_inline_1_arg2_.length-1]]\n}","args":[{"name":"_inline_1_arg0_","lvalue":true,"rvalue":false,"count":1},{"name":"_inline_1_arg1_","lvalue":false,"rvalue":true,"count":1},{"name":"_inline_1_arg2_","lvalue":false,"rvalue":true,"count":4}],"thisVars":[],"localVars":["_inline_1_i","_inline_1_v"]},"post":{"body":"{}","args":[],"thisVars":[],"localVars":[]},"funcName":"convert","blockSize":64})
},{"cwise-compiler":5}],17:[function(require,module,exports){
var iota = require("iota-array")
var isBuffer = require("is-buffer")
var hasTypedArrays = ((typeof Float64Array) !== "undefined")
function compare1st(a, b) {
return a[0] - b[0]
}
function order() {
var stride = this.stride
var terms = new Array(stride.length)
var i
for(i=0; i<terms.length; ++i) {
terms[i] = [Math.abs(stride[i]), i]
}
terms.sort(compare1st)
var result = new Array(terms.length)
for(i=0; i<result.length; ++i) {
result[i] = terms[i][1]
}
return result
}
function compileConstructor(dtype, dimension) {
var className = ["View", dimension, "d", dtype].join("")
if(dimension < 0) {
className = "View_Nil" + dtype
}
var useGetters = (dtype === "generic")
if(dimension === -1) {
//Special case for trivial arrays
var code =
"function "+className+"(a){this.data=a;};\
var proto="+className+".prototype;\
proto.dtype='"+dtype+"';\
proto.index=function(){return -1};\
proto.size=0;\
proto.dimension=-1;\
proto.shape=proto.stride=proto.order=[];\
proto.lo=proto.hi=proto.transpose=proto.step=\
function(){return new "+className+"(this.data);};\
proto.get=proto.set=function(){};\
proto.pick=function(){return null};\
return function construct_"+className+"(a){return new "+className+"(a);}"
var procedure = new Function(code)
return procedure()
} else if(dimension === 0) {
//Special case for 0d arrays
var code =
"function "+className+"(a,d) {\
this.data = a;\
this.offset = d\
};\
var proto="+className+".prototype;\
proto.dtype='"+dtype+"';\
proto.index=function(){return this.offset};\
proto.dimension=0;\
proto.size=1;\
proto.shape=\
proto.stride=\
proto.order=[];\
proto.lo=\
proto.hi=\
proto.transpose=\
proto.step=function "+className+"_copy() {\
return new "+className+"(this.data,this.offset)\
};\
proto.pick=function "+className+"_pick(){\
return TrivialArray(this.data);\
};\
proto.valueOf=proto.get=function "+className+"_get(){\
return "+(useGetters ? "this.data.get(this.offset)" : "this.data[this.offset]")+
"};\
proto.set=function "+className+"_set(v){\
return "+(useGetters ? "this.data.set(this.offset,v)" : "this.data[this.offset]=v")+"\
};\
return function construct_"+className+"(a,b,c,d){return new "+className+"(a,d)}"
var procedure = new Function("TrivialArray", code)
return procedure(CACHED_CONSTRUCTORS[dtype][0])
}
var code = ["'use strict'"]
//Create constructor for view
var indices = iota(dimension)
var args = indices.map(function(i) { return "i"+i })
var index_str = "this.offset+" + indices.map(function(i) {
return "this.stride[" + i + "]*i" + i
}).join("+")
var shapeArg = indices.map(function(i) {
return "b"+i
}).join(",")
var strideArg = indices.map(function(i) {
return "c"+i
}).join(",")
code.push(
"function "+className+"(a," + shapeArg + "," + strideArg + ",d){this.data=a",
"this.shape=[" + shapeArg + "]",
"this.stride=[" + strideArg + "]",
"this.offset=d|0}",
"var proto="+className+".prototype",
"proto.dtype='"+dtype+"'",
"proto.dimension="+dimension)
//view.size:
code.push("Object.defineProperty(proto,'size',{get:function "+className+"_size(){\
return "+indices.map(function(i) { return "this.shape["+i+"]" }).join("*"),
"}})")
//view.order:
if(dimension === 1) {
code.push("proto.order=[0]")
} else {
code.push("Object.defineProperty(proto,'order',{get:")
if(dimension < 4) {
code.push("function "+className+"_order(){")
if(dimension === 2) {
code.push("return (Math.abs(this.stride[0])>Math.abs(this.stride[1]))?[1,0]:[0,1]}})")
} else if(dimension === 3) {
code.push(
"var s0=Math.abs(this.stride[0]),s1=Math.abs(this.stride[1]),s2=Math.abs(this.stride[2]);\
if(s0>s1){\
if(s1>s2){\
return [2,1,0];\
}else if(s0>s2){\
return [1,2,0];\
}else{\
return [1,0,2];\
}\
}else if(s0>s2){\
return [2,0,1];\
}else if(s2>s1){\
return [0,1,2];\
}else{\
return [0,2,1];\
}}})")
}
} else {
code.push("ORDER})")
}
}
//view.set(i0, ..., v):
code.push(
"proto.set=function "+className+"_set("+args.join(",")+",v){")
if(useGetters) {
code.push("return this.data.set("+index_str+",v)}")
} else {
code.push("return this.data["+index_str+"]=v}")
}
//view.get(i0, ...):
code.push("proto.get=function "+className+"_get("+args.join(",")+"){")
if(useGetters) {
code.push("return this.data.get("+index_str+")}")
} else {
code.push("return this.data["+index_str+"]}")
}
//view.index:
code.push(
"proto.index=function "+className+"_index(", args.join(), "){return "+index_str+"}")
//view.hi():
code.push("proto.hi=function "+className+"_hi("+args.join(",")+"){return new "+className+"(this.data,"+
indices.map(function(i) {
return ["(typeof i",i,"!=='number'||i",i,"<0)?this.shape[", i, "]:i", i,"|0"].join("")
}).join(",")+","+
indices.map(function(i) {
return "this.stride["+i + "]"
}).join(",")+",this.offset)}")
//view.lo():
var a_vars = indices.map(function(i) { return "a"+i+"=this.shape["+i+"]" })
var c_vars = indices.map(function(i) { return "c"+i+"=this.stride["+i+"]" })
code.push("proto.lo=function "+className+"_lo("+args.join(",")+"){var b=this.offset,d=0,"+a_vars.join(",")+","+c_vars.join(","))
for(var i=0; i<dimension; ++i) {
code.push(
"if(typeof i"+i+"==='number'&&i"+i+">=0){\
d=i"+i+"|0;\
b+=c"+i+"*d;\
a"+i+"-=d}")
}
code.push("return new "+className+"(this.data,"+
indices.map(function(i) {
return "a"+i
}).join(",")+","+
indices.map(function(i) {
return "c"+i
}).join(",")+",b)}")
//view.step():
code.push("proto.step=function "+className+"_step("+args.join(",")+"){var "+
indices.map(function(i) {
return "a"+i+"=this.shape["+i+"]"
}).join(",")+","+
indices.map(function(i) {
return "b"+i+"=this.stride["+i+"]"
}).join(",")+",c=this.offset,d=0,ceil=Math.ceil")
for(var i=0; i<dimension; ++i) {
code.push(
"if(typeof i"+i+"==='number'){\
d=i"+i+"|0;\
if(d<0){\
c+=b"+i+"*(a"+i+"-1);\
a"+i+"=ceil(-a"+i+"/d)\
}else{\
a"+i+"=ceil(a"+i+"/d)\
}\
b"+i+"*=d\
}")
}
code.push("return new "+className+"(this.data,"+
indices.map(function(i) {
return "a" + i
}).join(",")+","+
indices.map(function(i) {
return "b" + i
}).join(",")+",c)}")
//view.transpose():
var tShape = new Array(dimension)
var tStride = new Array(dimension)
for(var i=0; i<dimension; ++i) {
tShape[i] = "a[i"+i+"]"
tStride[i] = "b[i"+i+"]"
}
code.push("proto.transpose=function "+className+"_transpose("+args+"){"+
args.map(function(n,idx) { return n + "=(" + n + "===undefined?" + idx + ":" + n + "|0)"}).join(";"),
"var a=this.shape,b=this.stride;return new "+className+"(this.data,"+tShape.join(",")+","+tStride.join(",")+",this.offset)}")
//view.pick():
code.push("proto.pick=function "+className+"_pick("+args+"){var a=[],b=[],c=this.offset")
for(var i=0; i<dimension; ++i) {
code.push("if(typeof i"+i+"==='number'&&i"+i+">=0){c=(c+this.stride["+i+"]*i"+i+")|0}else{a.push(this.shape["+i+"]);b.push(this.stride["+i+"])}")
}
code.push("var ctor=CTOR_LIST[a.length+1];return ctor(this.data,a,b,c)}")
//Add return statement
code.push("return function construct_"+className+"(data,shape,stride,offset){return new "+className+"(data,"+
indices.map(function(i) {
return "shape["+i+"]"
}).join(",")+","+
indices.map(function(i) {
return "stride["+i+"]"
}).join(",")+",offset)}")
//Compile procedure
var procedure = new Function("CTOR_LIST", "ORDER", code.join("\n"))
return procedure(CACHED_CONSTRUCTORS[dtype], order)
}
function arrayDType(data) {
if(isBuffer(data)) {
return "buffer"
}
if(hasTypedArrays) {
switch(Object.prototype.toString.call(data)) {
case "[object Float64Array]":
return "float64"
case "[object Float32Array]":
return "float32"
case "[object Int8Array]":
return "int8"
case "[object Int16Array]":
return "int16"
case "[object Int32Array]":
return "int32"
case "[object Uint8Array]":
return "uint8"
case "[object Uint16Array]":
return "uint16"
case "[object Uint32Array]":
return "uint32"
case "[object Uint8ClampedArray]":
return "uint8_clamped"
case "[object BigInt64Array]":
return "bigint64"
case "[object BigUint64Array]":
return "biguint64"
}
}
if(Array.isArray(data)) {
return "array"
}
return "generic"
}
var CACHED_CONSTRUCTORS = {
"float32":[],
"float64":[],
"int8":[],
"int16":[],
"int32":[],
"uint8":[],
"uint16":[],
"uint32":[],
"array":[],
"uint8_clamped":[],
"bigint64": [],
"biguint64": [],
"buffer":[],
"generic":[]
}
;(function() {
for(var id in CACHED_CONSTRUCTORS) {
CACHED_CONSTRUCTORS[id].push(compileConstructor(id, -1))
}
});
function wrappedNDArrayCtor(data, shape, stride, offset) {
if(data === undefined) {
var ctor = CACHED_CONSTRUCTORS.array[0]
return ctor([])
} else if(typeof data === "number") {
data = [data]
}
if(shape === undefined) {
shape = [ data.length ]
}
var d = shape.length
if(stride === undefined) {
stride = new Array(d)
for(var i=d-1, sz=1; i>=0; --i) {
stride[i] = sz
sz *= shape[i]
}
}
if(offset === undefined) {
offset = 0
for(var i=0; i<d; ++i) {
if(stride[i] < 0) {
offset -= (shape[i]-1)*stride[i]
}
}
}
var dtype = arrayDType(data)
var ctor_list = CACHED_CONSTRUCTORS[dtype]
while(ctor_list.length <= d+1) {
ctor_list.push(compileConstructor(dtype, ctor_list.length-1))
}
var ctor = ctor_list[d+1]
return ctor(data, shape, stride, offset)
}
module.exports = wrappedNDArrayCtor
},{"iota-array":13,"is-buffer":14}],18:[function(require,module,exports){
// (c) Dean McNamee <[email protected]>, 2013.
//
// https://github.com/deanm/omggif
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// omggif is a JavaScript implementation of a GIF 89a encoder and decoder,
// including animation and compression. It does not rely on any specific
// underlying system, so should run in the browser, Node, or Plask.
"use strict";
function GifWriter(buf, width, height, gopts) {
var p = 0;
var gopts = gopts === undefined ? { } : gopts;
var loop_count = gopts.loop === undefined ? null : gopts.loop;
var global_palette = gopts.palette === undefined ? null : gopts.palette;
if (width <= 0 || height <= 0 || width > 65535 || height > 65535)
throw new Error("Width/Height invalid.");
function check_palette_and_num_colors(palette) {
var num_colors = palette.length;
if (num_colors < 2 || num_colors > 256 || num_colors & (num_colors-1)) {
throw new Error(
"Invalid code/color length, must be power of 2 and 2 .. 256.");
}
return num_colors;
}
// - Header.
buf[p++] = 0x47; buf[p++] = 0x49; buf[p++] = 0x46; // GIF
buf[p++] = 0x38; buf[p++] = 0x39; buf[p++] = 0x61; // 89a
// Handling of Global Color Table (palette) and background index.
var gp_num_colors_pow2 = 0;
var background = 0;
if (global_palette !== null) {
var gp_num_colors = check_palette_and_num_colors(global_palette);
while (gp_num_colors >>= 1) ++gp_num_colors_pow2;
gp_num_colors = 1 << gp_num_colors_pow2;
--gp_num_colors_pow2;
if (gopts.background !== undefined) {
background = gopts.background;
if (background >= gp_num_colors)
throw new Error("Background index out of range.");
// The GIF spec states that a background index of 0 should be ignored, so
// this is probably a mistake and you really want to set it to another
// slot in the palette. But actually in the end most browsers, etc end
// up ignoring this almost completely (including for dispose background).
if (background === 0)
throw new Error("Background index explicitly passed as 0.");
}
}
// - Logical Screen Descriptor.
// NOTE(deanm): w/h apparently ignored by implementations, but set anyway.
buf[p++] = width & 0xff; buf[p++] = width >> 8 & 0xff;
buf[p++] = height & 0xff; buf[p++] = height >> 8 & 0xff;
// NOTE: Indicates 0-bpp original color resolution (unused?).
buf[p++] = (global_palette !== null ? 0x80 : 0) | // Global Color Table Flag.
gp_num_colors_pow2; // NOTE: No sort flag (unused?).
buf[p++] = background; // Background Color Index.
buf[p++] = 0; // Pixel aspect ratio (unused?).
// - Global Color Table
if (global_palette !== null) {
for (var i = 0, il = global_palette.length; i < il; ++i) {
var rgb = global_palette[i];
buf[p++] = rgb >> 16 & 0xff;
buf[p++] = rgb >> 8 & 0xff;
buf[p++] = rgb & 0xff;
}
}
if (loop_count !== null) { // Netscape block for looping.
if (loop_count < 0 || loop_count > 65535)
throw new Error("Loop count invalid.")
// Extension code, label, and length.
buf[p++] = 0x21; buf[p++] = 0xff; buf[p++] = 0x0b;
// NETSCAPE2.0
buf[p++] = 0x4e; buf[p++] = 0x45; buf[p++] = 0x54; buf[p++] = 0x53;
buf[p++] = 0x43; buf[p++] = 0x41; buf[p++] = 0x50; buf[p++] = 0x45;
buf[p++] = 0x32; buf[p++] = 0x2e; buf[p++] = 0x30;
// Sub-block
buf[p++] = 0x03; buf[p++] = 0x01;
buf[p++] = loop_count & 0xff; buf[p++] = loop_count >> 8 & 0xff;
buf[p++] = 0x00; // Terminator.
}
var ended = false;
this.addFrame = function(x, y, w, h, indexed_pixels, opts) {
if (ended === true) { --p; ended = false; } // Un-end.
opts = opts === undefined ? { } : opts;
// TODO(deanm): Bounds check x, y. Do they need to be within the virtual
// canvas width/height, I imagine?
if (x < 0 || y < 0 || x > 65535 || y > 65535)
throw new Error("x/y invalid.")
if (w <= 0 || h <= 0 || w > 65535 || h > 65535)
throw new Error("Width/Height invalid.")
if (indexed_pixels.length < w * h)
throw new Error("Not enough pixels for the frame size.");
var using_local_palette = true;
var palette = opts.palette;
if (palette === undefined || palette === null) {
using_local_palette = false;
palette = global_palette;
}
if (palette === undefined || palette === null)
throw new Error("Must supply either a local or global palette.");
var num_colors = check_palette_and_num_colors(palette);
// Compute the min_code_size (power of 2), destroying num_colors.
var min_code_size = 0;
while (num_colors >>= 1) ++min_code_size;
num_colors = 1 << min_code_size; // Now we can easily get it back.
var delay = opts.delay === undefined ? 0 : opts.delay;
// From the spec:
// 0 - No disposal specified. The decoder is
// not required to take any action.
// 1 - Do not dispose. The graphic is to be left
// in place.
// 2 - Restore to background color. The area used by the
// graphic must be restored to the background color.
// 3 - Restore to previous. The decoder is required to
// restore the area overwritten by the graphic with
// what was there prior to rendering the graphic.
// 4-7 - To be defined.
// NOTE(deanm): Dispose background doesn't really work, apparently most
// browsers ignore the background palette index and clear to transparency.
var disposal = opts.disposal === undefined ? 0 : opts.disposal;
if (disposal < 0 || disposal > 3) // 4-7 is reserved.
throw new Error("Disposal out of range.");
var use_transparency = false;
var transparent_index = 0;
if (opts.transparent !== undefined && opts.transparent !== null) {
use_transparency = true;
transparent_index = opts.transparent;
if (transparent_index < 0 || transparent_index >= num_colors)
throw new Error("Transparent color index.");
}
if (disposal !== 0 || use_transparency || delay !== 0) {
// - Graphics Control Extension
buf[p++] = 0x21; buf[p++] = 0xf9; // Extension / Label.
buf[p++] = 4; // Byte size.
buf[p++] = disposal << 2 | (use_transparency === true ? 1 : 0);
buf[p++] = delay & 0xff; buf[p++] = delay >> 8 & 0xff;
buf[p++] = transparent_index; // Transparent color index.
buf[p++] = 0; // Block Terminator.
}
// - Image Descriptor
buf[p++] = 0x2c; // Image Seperator.
buf[p++] = x & 0xff; buf[p++] = x >> 8 & 0xff; // Left.
buf[p++] = y & 0xff; buf[p++] = y >> 8 & 0xff; // Top.
buf[p++] = w & 0xff; buf[p++] = w >> 8 & 0xff;
buf[p++] = h & 0xff; buf[p++] = h >> 8 & 0xff;
// NOTE: No sort flag (unused?).
// TODO(deanm): Support interlace.
buf[p++] = using_local_palette === true ? (0x80 | (min_code_size-1)) : 0;
// - Local Color Table
if (using_local_palette === true) {
for (var i = 0, il = palette.length; i < il; ++i) {
var rgb = palette[i];
buf[p++] = rgb >> 16 & 0xff;
buf[p++] = rgb >> 8 & 0xff;
buf[p++] = rgb & 0xff;
}
}
p = GifWriterOutputLZWCodeStream(
buf, p, min_code_size < 2 ? 2 : min_code_size, indexed_pixels);
return p;
};
this.end = function() {
if (ended === false) {
buf[p++] = 0x3b; // Trailer.
ended = true;
}
return p;
};
this.getOutputBuffer = function() { return buf; };
this.setOutputBuffer = function(v) { buf = v; };
this.getOutputBufferPosition = function() { return p; };
this.setOutputBufferPosition = function(v) { p = v; };
}
// Main compression routine, palette indexes -> LZW code stream.
// |index_stream| must have at least one entry.
function GifWriterOutputLZWCodeStream(buf, p, min_code_size, index_stream) {
buf[p++] = min_code_size;
var cur_subblock = p++; // Pointing at the length field.
var clear_code = 1 << min_code_size;
var code_mask = clear_code - 1;
var eoi_code = clear_code + 1;
var next_code = eoi_code + 1;
var cur_code_size = min_code_size + 1; // Number of bits per code.
var cur_shift = 0;
// We have at most 12-bit codes, so we should have to hold a max of 19
// bits here (and then we would write out).
var cur = 0;
function emit_bytes_to_buffer(bit_block_size) {
while (cur_shift >= bit_block_size) {
buf[p++] = cur & 0xff;
cur >>= 8; cur_shift -= 8;
if (p === cur_subblock + 256) { // Finished a subblock.
buf[cur_subblock] = 255;
cur_subblock = p++;
}
}
}
function emit_code(c) {
cur |= c << cur_shift;
cur_shift += cur_code_size;
emit_bytes_to_buffer(8);
}
// I am not an expert on the topic, and I don't want to write a thesis.
// However, it is good to outline here the basic algorithm and the few data
// structures and optimizations here that make this implementation fast.
// The basic idea behind LZW is to build a table of previously seen runs
// addressed by a short id (herein called output code). All data is
// referenced by a code, which represents one or more values from the
// original input stream. All input bytes can be referenced as the same
// value as an output code. So if you didn't want any compression, you
// could more or less just output the original bytes as codes (there are
// some details to this, but it is the idea). In order to achieve
// compression, values greater then the input range (codes can be up to
// 12-bit while input only 8-bit) represent a sequence of previously seen
// inputs. The decompressor is able to build the same mapping while
// decoding, so there is always a shared common knowledge between the
// encoding and decoder, which is also important for "timing" aspects like
// how to handle variable bit width code encoding.
//
// One obvious but very important consequence of the table system is there
// is always a unique id (at most 12-bits) to map the runs. 'A' might be
// 4, then 'AA' might be 10, 'AAA' 11, 'AAAA' 12, etc. This relationship
// can be used for an effecient lookup strategy for the code mapping. We
// need to know if a run has been seen before, and be able to map that run
// to the output code. Since we start with known unique ids (input bytes),
// and then from those build more unique ids (table entries), we can
// continue this chain (almost like a linked list) to always have small
// integer values that represent the current byte chains in the encoder.
// This means instead of tracking the input bytes (AAAABCD) to know our
// current state, we can track the table entry for AAAABC (it is guaranteed
// to exist by the nature of the algorithm) and the next character D.
// Therefor the tuple of (table_entry, byte) is guaranteed to also be
// unique. This allows us to create a simple lookup key for mapping input
// sequences to codes (table indices) without having to store or search
// any of the code sequences. So if 'AAAA' has a table entry of 12, the
// tuple of ('AAAA', K) for any input byte K will be unique, and can be our
// key. This leads to a integer value at most 20-bits, which can always
// fit in an SMI value and be used as a fast sparse array / object key.
// Output code for the current contents of the index buffer.
var ib_code = index_stream[0] & code_mask; // Load first input index.
var code_table = { }; // Key'd on our 20-bit "tuple".
emit_code(clear_code); // Spec says first code should be a clear code.
// First index already loaded, process the rest of the stream.
for (var i = 1, il = index_stream.length; i < il; ++i) {
var k = index_stream[i] & code_mask;
var cur_key = ib_code << 8 | k; // (prev, k) unique tuple.
var cur_code = code_table[cur_key]; // buffer + k.
// Check if we have to create a new code table entry.
if (cur_code === undefined) { // We don't have buffer + k.
// Emit index buffer (without k).
// This is an inline version of emit_code, because this is the core
// writing routine of the compressor (and V8 cannot inline emit_code
// because it is a closure here in a different context). Additionally
// we can call emit_byte_to_buffer less often, because we can have
// 30-bits (from our 31-bit signed SMI), and we know our codes will only
// be 12-bits, so can safely have 18-bits there without overflow.
// emit_code(ib_code);
cur |= ib_code << cur_shift;
cur_shift += cur_code_size;
while (cur_shift >= 8) {
buf[p++] = cur & 0xff;
cur >>= 8; cur_shift -= 8;
if (p === cur_subblock + 256) { // Finished a subblock.
buf[cur_subblock] = 255;
cur_subblock = p++;
}
}
if (next_code === 4096) { // Table full, need a clear.
emit_code(clear_code);
next_code = eoi_code + 1;
cur_code_size = min_code_size + 1;
code_table = { };
} else { // Table not full, insert a new entry.
// Increase our variable bit code sizes if necessary. This is a bit
// tricky as it is based on "timing" between the encoding and
// decoder. From the encoders perspective this should happen after
// we've already emitted the index buffer and are about to create the
// first table entry that would overflow our current code bit size.
if (next_code >= (1 << cur_code_size)) ++cur_code_size;
code_table[cur_key] = next_code++; // Insert into code table.
}
ib_code = k; // Index buffer to single input k.
} else {
ib_code = cur_code; // Index buffer to sequence in code table.
}
}
emit_code(ib_code); // There will still be something in the index buffer.
emit_code(eoi_code); // End Of Information.
// Flush / finalize the sub-blocks stream to the buffer.
emit_bytes_to_buffer(1);
// Finish the sub-blocks, writing out any unfinished lengths and
// terminating with a sub-block of length 0. If we have already started
// but not yet used a sub-block it can just become the terminator.
if (cur_subblock + 1 === p) { // Started but unused.
buf[cur_subblock] = 0;
} else { // Started and used, write length and additional terminator block.
buf[cur_subblock] = p - cur_subblock - 1;
buf[p++] = 0;
}
return p;
}
function GifReader(buf) {
var p = 0;
// - Header (GIF87a or GIF89a).
if (buf[p++] !== 0x47 || buf[p++] !== 0x49 || buf[p++] !== 0x46 ||
buf[p++] !== 0x38 || (buf[p++]+1 & 0xfd) !== 0x38 || buf[p++] !== 0x61) {
throw new Error("Invalid GIF 87a/89a header.");
}
// - Logical Screen Descriptor.
var width = buf[p++] | buf[p++] << 8;
var height = buf[p++] | buf[p++] << 8;
var pf0 = buf[p++]; // <Packed Fields>.
var global_palette_flag = pf0 >> 7;
var num_global_colors_pow2 = pf0 & 0x7;
var num_global_colors = 1 << (num_global_colors_pow2 + 1);
var background = buf[p++];
buf[p++]; // Pixel aspect ratio (unused?).
var global_palette_offset = null;
var global_palette_size = null;
if (global_palette_flag) {
global_palette_offset = p;
global_palette_size = num_global_colors;
p += num_global_colors * 3; // Seek past palette.
}
var no_eof = true;
var frames = [ ];
var delay = 0;
var transparent_index = null;
var disposal = 0; // 0 - No disposal specified.
var loop_count = null;
this.width = width;
this.height = height;
while (no_eof && p < buf.length) {
switch (buf[p++]) {
case 0x21: // Graphics Control Extension Block
switch (buf[p++]) {
case 0xff: // Application specific block
// Try if it's a Netscape block (with animation loop counter).
if (buf[p ] !== 0x0b || // 21 FF already read, check block size.
// NETSCAPE2.0
buf[p+1 ] == 0x4e && buf[p+2 ] == 0x45 && buf[p+3 ] == 0x54 &&
buf[p+4 ] == 0x53 && buf[p+5 ] == 0x43 && buf[p+6 ] == 0x41 &&
buf[p+7 ] == 0x50 && buf[p+8 ] == 0x45 && buf[p+9 ] == 0x32 &&
buf[p+10] == 0x2e && buf[p+11] == 0x30 &&
// Sub-block
buf[p+12] == 0x03 && buf[p+13] == 0x01 && buf[p+16] == 0) {
p += 14;
loop_count = buf[p++] | buf[p++] << 8;
p++; // Skip terminator.
} else { // We don't know what it is, just try to get past it.
p += 12;
while (true) { // Seek through subblocks.
var block_size = buf[p++];
// Bad block size (ex: undefined from an out of bounds read).
if (!(block_size >= 0)) throw Error("Invalid block size");
if (block_size === 0) break; // 0 size is terminator
p += block_size;
}
}
break;
case 0xf9: // Graphics Control Extension
if (buf[p++] !== 0x4 || buf[p+4] !== 0)
throw new Error("Invalid graphics extension block.");
var pf1 = buf[p++];
delay = buf[p++] | buf[p++] << 8;
transparent_index = buf[p++];
if ((pf1 & 1) === 0) transparent_index = null;
disposal = pf1 >> 2 & 0x7;
p++; // Skip terminator.
break;
case 0xfe: // Comment Extension.
while (true) { // Seek through subblocks.
var block_size = buf[p++];
// Bad block size (ex: undefined from an out of bounds read).
if (!(block_size >= 0)) throw Error("Invalid block size");
if (block_size === 0) break; // 0 size is terminator
// console.log(buf.slice(p, p+block_size).toString('ascii'));
p += block_size;
}
break;
default:
throw new Error(
"Unknown graphic control label: 0x" + buf[p-1].toString(16));
}
break;
case 0x2c: // Image Descriptor.
var x = buf[p++] | buf[p++] << 8;
var y = buf[p++] | buf[p++] << 8;
var w = buf[p++] | buf[p++] << 8;
var h = buf[p++] | buf[p++] << 8;
var pf2 = buf[p++];
var local_palette_flag = pf2 >> 7;
var interlace_flag = pf2 >> 6 & 1;
var num_local_colors_pow2 = pf2 & 0x7;
var num_local_colors = 1 << (num_local_colors_pow2 + 1);
var palette_offset = global_palette_offset;
var palette_size = global_palette_size;
var has_local_palette = false;
if (local_palette_flag) {
var has_local_palette = true;
palette_offset = p; // Override with local palette.
palette_size = num_local_colors;
p += num_local_colors * 3; // Seek past palette.
}
var data_offset = p;
p++; // codesize
while (true) {
var block_size = buf[p++];
// Bad block size (ex: undefined from an out of bounds read).
if (!(block_size >= 0)) throw Error("Invalid block size");
if (block_size === 0) break; // 0 size is terminator
p += block_size;
}
frames.push({x: x, y: y, width: w, height: h,
has_local_palette: has_local_palette,
palette_offset: palette_offset,
palette_size: palette_size,
data_offset: data_offset,
data_length: p - data_offset,
transparent_index: transparent_index,
interlaced: !!interlace_flag,
delay: delay,
disposal: disposal});
break;
case 0x3b: // Trailer Marker (end of file).
no_eof = false;
break;
default:
throw new Error("Unknown gif block: 0x" + buf[p-1].toString(16));
break;
}
}
this.numFrames = function() {
return frames.length;
};
this.loopCount = function() {
return loop_count;
};
this.frameInfo = function(frame_num) {
if (frame_num < 0 || frame_num >= frames.length)
throw new Error("Frame index out of range.");
return frames[frame_num];
}
this.decodeAndBlitFrameBGRA = function(frame_num, pixels) {
var frame = this.frameInfo(frame_num);
var num_pixels = frame.width * frame.height;
var index_stream = new Uint8Array(num_pixels); // At most 8-bit indices.
GifReaderLZWOutputIndexStream(
buf, frame.data_offset, index_stream, num_pixels);
var palette_offset = frame.palette_offset;
// NOTE(deanm): It seems to be much faster to compare index to 256 than
// to === null. Not sure why, but CompareStub_EQ_STRICT shows up high in
// the profile, not sure if it's related to using a Uint8Array.
var trans = frame.transparent_index;
if (trans === null) trans = 256;
// We are possibly just blitting to a portion of the entire frame.
// That is a subrect within the framerect, so the additional pixels
// must be skipped over after we finished a scanline.
var framewidth = frame.width;
var framestride = width - framewidth;
var xleft = framewidth; // Number of subrect pixels left in scanline.
// Output indicies of the top left and bottom right corners of the subrect.
var opbeg = ((frame.y * width) + frame.x) * 4;
var opend = ((frame.y + frame.height) * width + frame.x) * 4;
var op = opbeg;
var scanstride = framestride * 4;
// Use scanstride to skip past the rows when interlacing. This is skipping
// 7 rows for the first two passes, then 3 then 1.
if (frame.interlaced === true) {
scanstride += width * 4 * 7; // Pass 1.
}
var interlaceskip = 8; // Tracking the row interval in the current pass.
for (var i = 0, il = index_stream.length; i < il; ++i) {
var index = index_stream[i];
if (xleft === 0) { // Beginning of new scan line
op += scanstride;
xleft = framewidth;
if (op >= opend) { // Catch the wrap to switch passes when interlacing.
scanstride = framestride * 4 + width * 4 * (interlaceskip-1);
// interlaceskip / 2 * 4 is interlaceskip << 1.
op = opbeg + (framewidth + framestride) * (interlaceskip << 1);
interlaceskip >>= 1;
}
}
if (index === trans) {
op += 4;
} else {
var r = buf[palette_offset + index * 3];
var g = buf[palette_offset + index * 3 + 1];
var b = buf[palette_offset + index * 3 + 2];
pixels[op++] = b;
pixels[op++] = g;
pixels[op++] = r;
pixels[op++] = 255;
}
--xleft;
}
};
// I will go to copy and paste hell one day...
this.decodeAndBlitFrameRGBA = function(frame_num, pixels) {
var frame = this.frameInfo(frame_num);
var num_pixels = frame.width * frame.height;
var index_stream = new Uint8Array(num_pixels); // At most 8-bit indices.
GifReaderLZWOutputIndexStream(
buf, frame.data_offset, index_stream, num_pixels);
var palette_offset = frame.palette_offset;
// NOTE(deanm): It seems to be much faster to compare index to 256 than
// to === null. Not sure why, but CompareStub_EQ_STRICT shows up high in
// the profile, not sure if it's related to using a Uint8Array.
var trans = frame.transparent_index;
if (trans === null) trans = 256;
// We are possibly just blitting to a portion of the entire frame.
// That is a subrect within the framerect, so the additional pixels
// must be skipped over after we finished a scanline.
var framewidth = frame.width;
var framestride = width - framewidth;
var xleft = framewidth; // Number of subrect pixels left in scanline.
// Output indicies of the top left and bottom right corners of the subrect.
var opbeg = ((frame.y * width) + frame.x) * 4;
var opend = ((frame.y + frame.height) * width + frame.x) * 4;
var op = opbeg;
var scanstride = framestride * 4;
// Use scanstride to skip past the rows when interlacing. This is skipping
// 7 rows for the first two passes, then 3 then 1.
if (frame.interlaced === true) {
scanstride += width * 4 * 7; // Pass 1.
}
var interlaceskip = 8; // Tracking the row interval in the current pass.
for (var i = 0, il = index_stream.length; i < il; ++i) {
var index = index_stream[i];
if (xleft === 0) { // Beginning of new scan line
op += scanstride;
xleft = framewidth;
if (op >= opend) { // Catch the wrap to switch passes when interlacing.
scanstride = framestride * 4 + width * 4 * (interlaceskip-1);
// interlaceskip / 2 * 4 is interlaceskip << 1.
op = opbeg + (framewidth + framestride) * (interlaceskip << 1);
interlaceskip >>= 1;
}
}
if (index === trans) {
op += 4;
} else {
var r = buf[palette_offset + index * 3];
var g = buf[palette_offset + index * 3 + 1];
var b = buf[palette_offset + index * 3 + 2];
pixels[op++] = r;
pixels[op++] = g;
pixels[op++] = b;
pixels[op++] = 255;
}
--xleft;
}
};
}
function GifReaderLZWOutputIndexStream(code_stream, p, output, output_length) {
var min_code_size = code_stream[p++];
var clear_code = 1 << min_code_size;
var eoi_code = clear_code + 1;
var next_code = eoi_code + 1;
var cur_code_size = min_code_size + 1; // Number of bits per code.
// NOTE: This shares the same name as the encoder, but has a different
// meaning here. Here this masks each code coming from the code stream.
var code_mask = (1 << cur_code_size) - 1;
var cur_shift = 0;
var cur = 0;
var op = 0; // Output pointer.
var subblock_size = code_stream[p++];
// TODO(deanm): Would using a TypedArray be any faster? At least it would
// solve the fast mode / backing store uncertainty.
// var code_table = Array(4096);
var code_table = new Int32Array(4096); // Can be signed, we only use 20 bits.
var prev_code = null; // Track code-1.
while (true) {
// Read up to two bytes, making sure we always 12-bits for max sized code.
while (cur_shift < 16) {
if (subblock_size === 0) break; // No more data to be read.
cur |= code_stream[p++] << cur_shift;
cur_shift += 8;
if (subblock_size === 1) { // Never let it get to 0 to hold logic above.
subblock_size = code_stream[p++]; // Next subblock.
} else {
--subblock_size;
}
}
// TODO(deanm): We should never really get here, we should have received
// and EOI.
if (cur_shift < cur_code_size)
break;
var code = cur & code_mask;
cur >>= cur_code_size;
cur_shift -= cur_code_size;
// TODO(deanm): Maybe should check that the first code was a clear code,
// at least this is what you're supposed to do. But actually our encoder
// now doesn't emit a clear code first anyway.
if (code === clear_code) {
// We don't actually have to clear the table. This could be a good idea
// for greater error checking, but we don't really do any anyway. We
// will just track it with next_code and overwrite old entries.
next_code = eoi_code + 1;
cur_code_size = min_code_size + 1;
code_mask = (1 << cur_code_size) - 1;
// Don't update prev_code ?
prev_code = null;
continue;
} else if (code === eoi_code) {
break;
}
// We have a similar situation as the decoder, where we want to store
// variable length entries (code table entries), but we want to do in a
// faster manner than an array of arrays. The code below stores sort of a
// linked list within the code table, and then "chases" through it to
// construct the dictionary entries. When a new entry is created, just the
// last byte is stored, and the rest (prefix) of the entry is only
// referenced by its table entry. Then the code chases through the
// prefixes until it reaches a single byte code. We have to chase twice,
// first to compute the length, and then to actually copy the data to the
// output (backwards, since we know the length). The alternative would be
// storing something in an intermediate stack, but that doesn't make any
// more sense. I implemented an approach where it also stored the length
// in the code table, although it's a bit tricky because you run out of
// bits (12 + 12 + 8), but I didn't measure much improvements (the table
// entries are generally not the long). Even when I created benchmarks for
// very long table entries the complexity did not seem worth it.
// The code table stores the prefix entry in 12 bits and then the suffix
// byte in 8 bits, so each entry is 20 bits.
var chase_code = code < next_code ? code : prev_code;
// Chase what we will output, either {CODE} or {CODE-1}.
var chase_length = 0;
var chase = chase_code;
while (chase > clear_code) {
chase = code_table[chase] >> 8;
++chase_length;
}
var k = chase;
var op_end = op + chase_length + (chase_code !== code ? 1 : 0);
if (op_end > output_length) {
console.log("Warning, gif stream longer than expected.");
return;
}
// Already have the first byte from the chase, might as well write it fast.
output[op++] = k;
op += chase_length;
var b = op; // Track pointer, writing backwards.
if (chase_code !== code) // The case of emitting {CODE-1} + k.
output[op++] = k;
chase = chase_code;
while (chase_length--) {
chase = code_table[chase];
output[--b] = chase & 0xff; // Write backwards.
chase >>= 8; // Pull down to the prefix code.
}
if (prev_code !== null && next_code < 4096) {
code_table[next_code++] = prev_code << 8 | k;
// TODO(deanm): Figure out this clearing vs code growth logic better. I
// have an feeling that it should just happen somewhere else, for now it
// is awkward between when we grow past the max and then hit a clear code.
// For now just check if we hit the max 12-bits (then a clear code should
// follow, also of course encoded in 12-bits).
if (next_code >= code_mask+1 && cur_code_size < 12) {
++cur_code_size;
code_mask = code_mask << 1 | 1;
}
}
prev_code = code;
}
if (op !== output_length) {
console.log("Warning, gif stream shorter than expected.");
}
return output;
}
// CommonJS.
try { exports.GifWriter = GifWriter; exports.GifReader = GifReader } catch(e) {}
},{}],19:[function(require,module,exports){
(function (process){(function (){
// 'path' module extracted from Node.js v8.11.1 (only the posix part)
// transplited with Babel
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
function assertPath(path) {
if (typeof path !== 'string') {
throw new TypeError('Path must be a string. Received ' + JSON.stringify(path));
}
}
// Resolves . and .. elements in a path with directory names
function normalizeStringPosix(path, allowAboveRoot) {
var res = '';
var lastSegmentLength = 0;
var lastSlash = -1;
var dots = 0;
var code;
for (var i = 0; i <= path.length; ++i) {
if (i < path.length)
code = path.charCodeAt(i);
else if (code === 47 /*/*/)
break;
else
code = 47 /*/*/;
if (code === 47 /*/*/) {
if (lastSlash === i - 1 || dots === 1) {
// NOOP
} else if (lastSlash !== i - 1 && dots === 2) {
if (res.length < 2 || lastSegmentLength !== 2 || res.charCodeAt(res.length - 1) !== 46 /*.*/ || res.charCodeAt(res.length - 2) !== 46 /*.*/) {
if (res.length > 2) {
var lastSlashIndex = res.lastIndexOf('/');
if (lastSlashIndex !== res.length - 1) {
if (lastSlashIndex === -1) {
res = '';
lastSegmentLength = 0;
} else {
res = res.slice(0, lastSlashIndex);
lastSegmentLength = res.length - 1 - res.lastIndexOf('/');
}
lastSlash = i;
dots = 0;
continue;
}
} else if (res.length === 2 || res.length === 1) {
res = '';
lastSegmentLength = 0;
lastSlash = i;
dots = 0;
continue;
}
}
if (allowAboveRoot) {
if (res.length > 0)
res += '/..';
else
res = '..';
lastSegmentLength = 2;
}
} else {
if (res.length > 0)
res += '/' + path.slice(lastSlash + 1, i);
else
res = path.slice(lastSlash + 1, i);
lastSegmentLength = i - lastSlash - 1;
}
lastSlash = i;
dots = 0;
} else if (code === 46 /*.*/ && dots !== -1) {
++dots;
} else {
dots = -1;
}
}
return res;
}
function _format(sep, pathObject) {
var dir = pathObject.dir || pathObject.root;
var base = pathObject.base || (pathObject.name || '') + (pathObject.ext || '');
if (!dir) {
return base;
}
if (dir === pathObject.root) {
return dir + base;
}
return dir + sep + base;
}
var posix = {
// path.resolve([from ...], to)
resolve: function resolve() {
var resolvedPath = '';
var resolvedAbsolute = false;
var cwd;
for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
var path;
if (i >= 0)
path = arguments[i];
else {
if (cwd === undefined)
cwd = process.cwd();
path = cwd;
}
assertPath(path);
// Skip empty entries
if (path.length === 0) {
continue;
}
resolvedPath = path + '/' + resolvedPath;
resolvedAbsolute = path.charCodeAt(0) === 47 /*/*/;
}
// At this point the path should be resolved to a full absolute path, but
// handle relative paths to be safe (might happen when process.cwd() fails)
// Normalize the path
resolvedPath = normalizeStringPosix(resolvedPath, !resolvedAbsolute);
if (resolvedAbsolute) {
if (resolvedPath.length > 0)
return '/' + resolvedPath;
else
return '/';
} else if (resolvedPath.length > 0) {
return resolvedPath;
} else {
return '.';
}
},
normalize: function normalize(path) {
assertPath(path);
if (path.length === 0) return '.';
var isAbsolute = path.charCodeAt(0) === 47 /*/*/;
var trailingSeparator = path.charCodeAt(path.length - 1) === 47 /*/*/;
// Normalize the path
path = normalizeStringPosix(path, !isAbsolute);
if (path.length === 0 && !isAbsolute) path = '.';
if (path.length > 0 && trailingSeparator) path += '/';
if (isAbsolute) return '/' + path;
return path;
},
isAbsolute: function isAbsolute(path) {
assertPath(path);
return path.length > 0 && path.charCodeAt(0) === 47 /*/*/;
},
join: function join() {
if (arguments.length === 0)
return '.';
var joined;
for (var i = 0; i < arguments.length; ++i) {
var arg = arguments[i];
assertPath(arg);
if (arg.length > 0) {
if (joined === undefined)
joined = arg;
else
joined += '/' + arg;
}
}
if (joined === undefined)
return '.';
return posix.normalize(joined);
},
relative: function relative(from, to) {
assertPath(from);
assertPath(to);
if (from === to) return '';
from = posix.resolve(from);
to = posix.resolve(to);
if (from === to) return '';
// Trim any leading backslashes
var fromStart = 1;
for (; fromStart < from.length; ++fromStart) {
if (from.charCodeAt(fromStart) !== 47 /*/*/)
break;
}
var fromEnd = from.length;
var fromLen = fromEnd - fromStart;
// Trim any leading backslashes
var toStart = 1;
for (; toStart < to.length; ++toStart) {
if (to.charCodeAt(toStart) !== 47 /*/*/)
break;
}
var toEnd = to.length;
var toLen = toEnd - toStart;
// Compare paths to find the longest common path from root
var length = fromLen < toLen ? fromLen : toLen;
var lastCommonSep = -1;
var i = 0;
for (; i <= length; ++i) {
if (i === length) {
if (toLen > length) {
if (to.charCodeAt(toStart + i) === 47 /*/*/) {
// We get here if `from` is the exact base path for `to`.
// For example: from='/foo/bar'; to='/foo/bar/baz'
return to.slice(toStart + i + 1);
} else if (i === 0) {
// We get here if `from` is the root
// For example: from='/'; to='/foo'
return to.slice(toStart + i);
}
} else if (fromLen > length) {
if (from.charCodeAt(fromStart + i) === 47 /*/*/) {
// We get here if `to` is the exact base path for `from`.
// For example: from='/foo/bar/baz'; to='/foo/bar'
lastCommonSep = i;
} else if (i === 0) {
// We get here if `to` is the root.
// For example: from='/foo'; to='/'
lastCommonSep = 0;
}
}
break;
}
var fromCode = from.charCodeAt(fromStart + i);
var toCode = to.charCodeAt(toStart + i);
if (fromCode !== toCode)
break;
else if (fromCode === 47 /*/*/)
lastCommonSep = i;
}
var out = '';
// Generate the relative path based on the path difference between `to`
// and `from`
for (i = fromStart + lastCommonSep + 1; i <= fromEnd; ++i) {
if (i === fromEnd || from.charCodeAt(i) === 47 /*/*/) {
if (out.length === 0)
out += '..';
else
out += '/..';
}
}
// Lastly, append the rest of the destination (`to`) path that comes after
// the common path parts
if (out.length > 0)
return out + to.slice(toStart + lastCommonSep);
else {
toStart += lastCommonSep;
if (to.charCodeAt(toStart) === 47 /*/*/)
++toStart;
return to.slice(toStart);
}
},
_makeLong: function _makeLong(path) {
return path;
},
dirname: function dirname(path) {
assertPath(path);
if (path.length === 0) return '.';
var code = path.charCodeAt(0);
var hasRoot = code === 47 /*/*/;
var end = -1;
var matchedSlash = true;
for (var i = path.length - 1; i >= 1; --i) {
code = path.charCodeAt(i);
if (code === 47 /*/*/) {
if (!matchedSlash) {
end = i;
break;
}
} else {
// We saw the first non-path separator
matchedSlash = false;
}
}
if (end === -1) return hasRoot ? '/' : '.';
if (hasRoot && end === 1) return '//';
return path.slice(0, end);
},
basename: function basename(path, ext) {
if (ext !== undefined && typeof ext !== 'string') throw new TypeError('"ext" argument must be a string');
assertPath(path);
var start = 0;
var end = -1;
var matchedSlash = true;
var i;
if (ext !== undefined && ext.length > 0 && ext.length <= path.length) {
if (ext.length === path.length && ext === path) return '';
var extIdx = ext.length - 1;
var firstNonSlashEnd = -1;
for (i = path.length - 1; i >= 0; --i) {
var code = path.charCodeAt(i);
if (code === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
start = i + 1;
break;
}
} else {
if (firstNonSlashEnd === -1) {
// We saw the first non-path separator, remember this index in case
// we need it if the extension ends up not matching
matchedSlash = false;
firstNonSlashEnd = i + 1;
}
if (extIdx >= 0) {
// Try to match the explicit extension
if (code === ext.charCodeAt(extIdx)) {
if (--extIdx === -1) {
// We matched the extension, so mark this as the end of our path
// component
end = i;
}
} else {
// Extension does not match, so our result is the entire path
// component
extIdx = -1;
end = firstNonSlashEnd;
}
}
}
}
if (start === end) end = firstNonSlashEnd;else if (end === -1) end = path.length;
return path.slice(start, end);
} else {
for (i = path.length - 1; i >= 0; --i) {
if (path.charCodeAt(i) === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
start = i + 1;
break;
}
} else if (end === -1) {
// We saw the first non-path separator, mark this as the end of our
// path component
matchedSlash = false;
end = i + 1;
}
}
if (end === -1) return '';
return path.slice(start, end);
}
},
extname: function extname(path) {
assertPath(path);
var startDot = -1;
var startPart = 0;
var end = -1;
var matchedSlash = true;
// Track the state of characters (if any) we see before our first dot and
// after any path separator we find
var preDotState = 0;
for (var i = path.length - 1; i >= 0; --i) {
var code = path.charCodeAt(i);
if (code === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
startPart = i + 1;
break;
}
continue;
}
if (end === -1) {
// We saw the first non-path separator, mark this as the end of our
// extension
matchedSlash = false;
end = i + 1;
}
if (code === 46 /*.*/) {
// If this is our first dot, mark it as the start of our extension
if (startDot === -1)
startDot = i;
else if (preDotState !== 1)
preDotState = 1;
} else if (startDot !== -1) {
// We saw a non-dot and non-path separator before our dot, so we should
// have a good chance at having a non-empty extension
preDotState = -1;
}
}
if (startDot === -1 || end === -1 ||
// We saw a non-dot character immediately before the dot
preDotState === 0 ||
// The (right-most) trimmed path component is exactly '..'
preDotState === 1 && startDot === end - 1 && startDot === startPart + 1) {
return '';
}
return path.slice(startDot, end);
},
format: function format(pathObject) {
if (pathObject === null || typeof pathObject !== 'object') {
throw new TypeError('The "pathObject" argument must be of type Object. Received type ' + typeof pathObject);
}
return _format('/', pathObject);
},
parse: function parse(path) {
assertPath(path);
var ret = { root: '', dir: '', base: '', ext: '', name: '' };
if (path.length === 0) return ret;
var code = path.charCodeAt(0);
var isAbsolute = code === 47 /*/*/;
var start;
if (isAbsolute) {
ret.root = '/';
start = 1;
} else {
start = 0;
}
var startDot = -1;
var startPart = 0;
var end = -1;
var matchedSlash = true;
var i = path.length - 1;
// Track the state of characters (if any) we see before our first dot and
// after any path separator we find
var preDotState = 0;
// Get non-dir info
for (; i >= start; --i) {
code = path.charCodeAt(i);
if (code === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
startPart = i + 1;
break;
}
continue;
}
if (end === -1) {
// We saw the first non-path separator, mark this as the end of our
// extension
matchedSlash = false;
end = i + 1;
}
if (code === 46 /*.*/) {
// If this is our first dot, mark it as the start of our extension
if (startDot === -1) startDot = i;else if (preDotState !== 1) preDotState = 1;
} else if (startDot !== -1) {
// We saw a non-dot and non-path separator before our dot, so we should
// have a good chance at having a non-empty extension
preDotState = -1;
}
}
if (startDot === -1 || end === -1 ||
// We saw a non-dot character immediately before the dot
preDotState === 0 ||
// The (right-most) trimmed path component is exactly '..'
preDotState === 1 && startDot === end - 1 && startDot === startPart + 1) {
if (end !== -1) {
if (startPart === 0 && isAbsolute) ret.base = ret.name = path.slice(1, end);else ret.base = ret.name = path.slice(startPart, end);
}
} else {
if (startPart === 0 && isAbsolute) {
ret.name = path.slice(1, startDot);
ret.base = path.slice(1, end);
} else {
ret.name = path.slice(startPart, startDot);
ret.base = path.slice(startPart, end);
}
ret.ext = path.slice(startDot, end);
}
if (startPart > 0) ret.dir = path.slice(0, startPart - 1);else if (isAbsolute) ret.dir = '/';
return ret;
},
sep: '/',
delimiter: ':',
win32: null,
posix: null
};
posix.posix = posix;
module.exports = posix;
}).call(this)}).call(this,require('_process'))
},{"_process":20}],20:[function(require,module,exports){
// shim for using process in browser
var process = module.exports = {};
// cached from whatever global is present so that test runners that stub it
// don't break things. But we need to wrap it in a try catch in case it is
// wrapped in strict mode code which doesn't define any globals. It's inside a
// function because try/catches deoptimize in certain engines.
var cachedSetTimeout;
var cachedClearTimeout;
function defaultSetTimout() {
throw new Error('setTimeout has not been defined');
}
function defaultClearTimeout () {
throw new Error('clearTimeout has not been defined');
}
(function () {
try {
if (typeof setTimeout === 'function') {
cachedSetTimeout = setTimeout;
} else {
cachedSetTimeout = defaultSetTimout;
}
} catch (e) {
cachedSetTimeout = defaultSetTimout;
}
try {
if (typeof clearTimeout === 'function') {
cachedClearTimeout = clearTimeout;
} else {
cachedClearTimeout = defaultClearTimeout;
}
} catch (e) {
cachedClearTimeout = defaultClearTimeout;
}
} ())
function runTimeout(fun) {
if (cachedSetTimeout === setTimeout) {
//normal enviroments in sane situations
return setTimeout(fun, 0);
}
// if setTimeout wasn't available but was latter defined
if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) {
cachedSetTimeout = setTimeout;
return setTimeout(fun, 0);
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedSetTimeout(fun, 0);
} catch(e){
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedSetTimeout.call(null, fun, 0);
} catch(e){
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error
return cachedSetTimeout.call(this, fun, 0);
}
}
}
function runClearTimeout(marker) {
if (cachedClearTimeout === clearTimeout) {
//normal enviroments in sane situations
return clearTimeout(marker);
}
// if clearTimeout wasn't available but was latter defined
if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) {
cachedClearTimeout = clearTimeout;
return clearTimeout(marker);
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedClearTimeout(marker);
} catch (e){
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedClearTimeout.call(null, marker);
} catch (e){
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error.
// Some versions of I.E. have different rules for clearTimeout vs setTimeout
return cachedClearTimeout.call(this, marker);
}
}
}
var queue = [];
var draining = false;
var currentQueue;
var queueIndex = -1;
function cleanUpNextTick() {
if (!draining || !currentQueue) {
return;
}
draining = false;
if (currentQueue.length) {
queue = currentQueue.concat(queue);
} else {
queueIndex = -1;
}
if (queue.length) {
drainQueue();
}
}
function drainQueue() {
if (draining) {
return;
}
var timeout = runTimeout(cleanUpNextTick);
draining = true;
var len = queue.length;
while(len) {
currentQueue = queue;
queue = [];
while (++queueIndex < len) {
if (currentQueue) {
currentQueue[queueIndex].run();
}
}
queueIndex = -1;
len = queue.length;
}
currentQueue = null;
draining = false;
runClearTimeout(timeout);
}
process.nextTick = function (fun) {
var args = new Array(arguments.length - 1);
if (arguments.length > 1) {
for (var i = 1; i < arguments.length; i++) {
args[i - 1] = arguments[i];
}
}
queue.push(new Item(fun, args));
if (queue.length === 1 && !draining) {
runTimeout(drainQueue);
}
};
// v8 likes predictible objects
function Item(fun, array) {
this.fun = fun;
this.array = array;
}
Item.prototype.run = function () {
this.fun.apply(null, this.array);
};
process.title = 'browser';
process.browser = true;
process.env = {};
process.argv = [];
process.version = ''; // empty string to avoid regexp issues
process.versions = {};
function noop() {}
process.on = noop;
process.addListener = noop;
process.once = noop;
process.off = noop;
process.removeListener = noop;
process.removeAllListeners = noop;
process.emit = noop;
process.prependListener = noop;
process.prependOnceListener = noop;
process.listeners = function (name) { return [] }
process.binding = function (name) {
throw new Error('process.binding is not supported');
};
process.cwd = function () { return '/' };
process.chdir = function (dir) {
throw new Error('process.chdir is not supported');
};
process.umask = function() { return 0; };
},{}],21:[function(require,module,exports){
/*! safe-buffer. MIT License. Feross Aboukhadijeh <https://feross.org/opensource> */
/* eslint-disable node/no-deprecated-api */
var buffer = require('buffer')
var Buffer = buffer.Buffer
// alternative to using Object.keys for old browsers
function copyProps (src, dst) {
for (var key in src) {
dst[key] = src[key]
}
}
if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) {
module.exports = buffer
} else {
// Copy properties from require('buffer')
copyProps(buffer, exports)
exports.Buffer = SafeBuffer
}
function SafeBuffer (arg, encodingOrOffset, length) {
return Buffer(arg, encodingOrOffset, length)
}
SafeBuffer.prototype = Object.create(Buffer.prototype)
// Copy static methods from Buffer
copyProps(Buffer, SafeBuffer)
SafeBuffer.from = function (arg, encodingOrOffset, length) {
if (typeof arg === 'number') {
throw new TypeError('Argument must not be a number')
}
return Buffer(arg, encodingOrOffset, length)
}
SafeBuffer.alloc = function (size, fill, encoding) {
if (typeof size !== 'number') {
throw new TypeError('Argument must be a number')
}
var buf = Buffer(size)
if (fill !== undefined) {
if (typeof encoding === 'string') {
buf.fill(fill, encoding)
} else {
buf.fill(fill)
}
} else {
buf.fill(0)
}
return buf
}
SafeBuffer.allocUnsafe = function (size) {
if (typeof size !== 'number') {
throw new TypeError('Argument must be a number')
}
return Buffer(size)
}
SafeBuffer.allocUnsafeSlow = function (size) {
if (typeof size !== 'number') {
throw new TypeError('Argument must be a number')
}
return buffer.SlowBuffer(size)
}
},{"buffer":4}],22:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
module.exports = Stream;
var EE = require('events').EventEmitter;
var inherits = require('inherits');
inherits(Stream, EE);
Stream.Readable = require('readable-stream/lib/_stream_readable.js');
Stream.Writable = require('readable-stream/lib/_stream_writable.js');
Stream.Duplex = require('readable-stream/lib/_stream_duplex.js');
Stream.Transform = require('readable-stream/lib/_stream_transform.js');
Stream.PassThrough = require('readable-stream/lib/_stream_passthrough.js');
Stream.finished = require('readable-stream/lib/internal/streams/end-of-stream.js')
Stream.pipeline = require('readable-stream/lib/internal/streams/pipeline.js')
// Backwards-compat with node 0.4.x
Stream.Stream = Stream;
// old-style streams. Note that the pipe method (the only relevant
// part of this class) is overridden in the Readable class.
function Stream() {
EE.call(this);
}
Stream.prototype.pipe = function(dest, options) {
var source = this;
function ondata(chunk) {
if (dest.writable) {
if (false === dest.write(chunk) && source.pause) {
source.pause();
}
}
}
source.on('data', ondata);
function ondrain() {
if (source.readable && source.resume) {
source.resume();
}
}
dest.on('drain', ondrain);
// If the 'end' option is not supplied, dest.end() will be called when
// source gets the 'end' or 'close' events. Only dest.end() once.
if (!dest._isStdio && (!options || options.end !== false)) {
source.on('end', onend);
source.on('close', onclose);
}
var didOnEnd = false;
function onend() {
if (didOnEnd) return;
didOnEnd = true;
dest.end();
}
function onclose() {
if (didOnEnd) return;
didOnEnd = true;
if (typeof dest.destroy === 'function') dest.destroy();
}
// don't leave dangling pipes when there are errors.
function onerror(er) {
cleanup();
if (EE.listenerCount(this, 'error') === 0) {
throw er; // Unhandled stream error in pipe.
}
}
source.on('error', onerror);
dest.on('error', onerror);
// remove all the event listeners that were added.
function cleanup() {
source.removeListener('data', ondata);
dest.removeListener('drain', ondrain);
source.removeListener('end', onend);
source.removeListener('close', onclose);
source.removeListener('error', onerror);
dest.removeListener('error', onerror);
source.removeListener('end', cleanup);
source.removeListener('close', cleanup);
dest.removeListener('close', cleanup);
}
source.on('end', cleanup);
source.on('close', cleanup);
dest.on('close', cleanup);
dest.emit('pipe', source);
// Allow for unix-like usage: A.pipe(B).pipe(C)
return dest;
};
},{"events":9,"inherits":12,"readable-stream/lib/_stream_duplex.js":24,"readable-stream/lib/_stream_passthrough.js":25,"readable-stream/lib/_stream_readable.js":26,"readable-stream/lib/_stream_transform.js":27,"readable-stream/lib/_stream_writable.js":28,"readable-stream/lib/internal/streams/end-of-stream.js":32,"readable-stream/lib/internal/streams/pipeline.js":34}],23:[function(require,module,exports){
'use strict';
function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; }
var codes = {};
function createErrorType(code, message, Base) {
if (!Base) {
Base = Error;
}
function getMessage(arg1, arg2, arg3) {
if (typeof message === 'string') {
return message;
} else {
return message(arg1, arg2, arg3);
}
}
var NodeError =
/*#__PURE__*/
function (_Base) {
_inheritsLoose(NodeError, _Base);
function NodeError(arg1, arg2, arg3) {
return _Base.call(this, getMessage(arg1, arg2, arg3)) || this;
}
return NodeError;
}(Base);
NodeError.prototype.name = Base.name;
NodeError.prototype.code = code;
codes[code] = NodeError;
} // https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js
function oneOf(expected, thing) {
if (Array.isArray(expected)) {
var len = expected.length;
expected = expected.map(function (i) {
return String(i);
});
if (len > 2) {
return "one of ".concat(thing, " ").concat(expected.slice(0, len - 1).join(', '), ", or ") + expected[len - 1];
} else if (len === 2) {
return "one of ".concat(thing, " ").concat(expected[0], " or ").concat(expected[1]);
} else {
return "of ".concat(thing, " ").concat(expected[0]);
}
} else {
return "of ".concat(thing, " ").concat(String(expected));
}
} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith
function startsWith(str, search, pos) {
return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search;
} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith
function endsWith(str, search, this_len) {
if (this_len === undefined || this_len > str.length) {
this_len = str.length;
}
return str.substring(this_len - search.length, this_len) === search;
} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes
function includes(str, search, start) {
if (typeof start !== 'number') {
start = 0;
}
if (start + search.length > str.length) {
return false;
} else {
return str.indexOf(search, start) !== -1;
}
}
createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) {
return 'The value "' + value + '" is invalid for option "' + name + '"';
}, TypeError);
createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) {
// determiner: 'must be' or 'must not be'
var determiner;
if (typeof expected === 'string' && startsWith(expected, 'not ')) {
determiner = 'must not be';
expected = expected.replace(/^not /, '');
} else {
determiner = 'must be';
}
var msg;
if (endsWith(name, ' argument')) {
// For cases like 'first argument'
msg = "The ".concat(name, " ").concat(determiner, " ").concat(oneOf(expected, 'type'));
} else {
var type = includes(name, '.') ? 'property' : 'argument';
msg = "The \"".concat(name, "\" ").concat(type, " ").concat(determiner, " ").concat(oneOf(expected, 'type'));
}
msg += ". Received type ".concat(typeof actual);
return msg;
}, TypeError);
createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF');
createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) {
return 'The ' + name + ' method is not implemented';
});
createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close');
createErrorType('ERR_STREAM_DESTROYED', function (name) {
return 'Cannot call ' + name + ' after a stream was destroyed';
});
createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times');
createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable');
createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end');
createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError);
createErrorType('ERR_UNKNOWN_ENCODING', function (arg) {
return 'Unknown encoding: ' + arg;
}, TypeError);
createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event');
module.exports.codes = codes;
},{}],24:[function(require,module,exports){
(function (process){(function (){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a duplex stream is just a stream that is both readable and writable.
// Since JS doesn't have multiple prototypal inheritance, this class
// prototypally inherits from Readable, and then parasitically from
// Writable.
'use strict';
/*<replacement>*/
var objectKeys = Object.keys || function (obj) {
var keys = [];
for (var key in obj) {
keys.push(key);
}
return keys;
};
/*</replacement>*/
module.exports = Duplex;
var Readable = require('./_stream_readable');
var Writable = require('./_stream_writable');
require('inherits')(Duplex, Readable);
{
// Allow the keys array to be GC'ed.
var keys = objectKeys(Writable.prototype);
for (var v = 0; v < keys.length; v++) {
var method = keys[v];
if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method];
}
}
function Duplex(options) {
if (!(this instanceof Duplex)) return new Duplex(options);
Readable.call(this, options);
Writable.call(this, options);
this.allowHalfOpen = true;
if (options) {
if (options.readable === false) this.readable = false;
if (options.writable === false) this.writable = false;
if (options.allowHalfOpen === false) {
this.allowHalfOpen = false;
this.once('end', onend);
}
}
}
Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.highWaterMark;
}
});
Object.defineProperty(Duplex.prototype, 'writableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState && this._writableState.getBuffer();
}
});
Object.defineProperty(Duplex.prototype, 'writableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.length;
}
}); // the no-half-open enforcer
function onend() {
// If the writable side ended, then we're ok.
if (this._writableState.ended) return; // no more data can be written.
// But allow more writes to happen in this tick.
process.nextTick(onEndNT, this);
}
function onEndNT(self) {
self.end();
}
Object.defineProperty(Duplex.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._readableState === undefined || this._writableState === undefined) {
return false;
}
return this._readableState.destroyed && this._writableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (this._readableState === undefined || this._writableState === undefined) {
return;
} // backward compatibility, the user is explicitly
// managing destroyed
this._readableState.destroyed = value;
this._writableState.destroyed = value;
}
});
}).call(this)}).call(this,require('_process'))
},{"./_stream_readable":26,"./_stream_writable":28,"_process":20,"inherits":12}],25:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a passthrough stream.
// basically just the most minimal sort of Transform stream.
// Every written chunk gets output as-is.
'use strict';
module.exports = PassThrough;
var Transform = require('./_stream_transform');
require('inherits')(PassThrough, Transform);
function PassThrough(options) {
if (!(this instanceof PassThrough)) return new PassThrough(options);
Transform.call(this, options);
}
PassThrough.prototype._transform = function (chunk, encoding, cb) {
cb(null, chunk);
};
},{"./_stream_transform":27,"inherits":12}],26:[function(require,module,exports){
(function (process,global){(function (){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
module.exports = Readable;
/*<replacement>*/
var Duplex;
/*</replacement>*/
Readable.ReadableState = ReadableState;
/*<replacement>*/
var EE = require('events').EventEmitter;
var EElistenerCount = function EElistenerCount(emitter, type) {
return emitter.listeners(type).length;
};
/*</replacement>*/
/*<replacement>*/
var Stream = require('./internal/streams/stream');
/*</replacement>*/
var Buffer = require('buffer').Buffer;
var OurUint8Array = global.Uint8Array || function () {};
function _uint8ArrayToBuffer(chunk) {
return Buffer.from(chunk);
}
function _isUint8Array(obj) {
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
}
/*<replacement>*/
var debugUtil = require('util');
var debug;
if (debugUtil && debugUtil.debuglog) {
debug = debugUtil.debuglog('stream');
} else {
debug = function debug() {};
}
/*</replacement>*/
var BufferList = require('./internal/streams/buffer_list');
var destroyImpl = require('./internal/streams/destroy');
var _require = require('./internal/streams/state'),
getHighWaterMark = _require.getHighWaterMark;
var _require$codes = require('../errors').codes,
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT; // Lazy loaded to improve the startup performance.
var StringDecoder;
var createReadableStreamAsyncIterator;
var from;
require('inherits')(Readable, Stream);
var errorOrDestroy = destroyImpl.errorOrDestroy;
var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume'];
function prependListener(emitter, event, fn) {
// Sadly this is not cacheable as some libraries bundle their own
// event emitter implementation with them.
if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); // This is a hack to make sure that our error handler is attached before any
// userland ones. NEVER DO THIS. This is here only because this code needs
// to continue to work with older versions of Node.js that do not include
// the prependListener() method. The goal is to eventually remove this hack.
if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]];
}
function ReadableState(options, stream, isDuplex) {
Duplex = Duplex || require('./_stream_duplex');
options = options || {}; // Duplex streams are both readable and writable, but share
// the same options object.
// However, some cases require setting options to different
// values for the readable and the writable sides of the duplex stream.
// These options can be provided separately as readableXXX and writableXXX.
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag. Used to make read(n) ignore n and to
// make all the buffer merging and length checks go away
this.objectMode = !!options.objectMode;
if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; // the point at which it stops calling _read() to fill the buffer
// Note: 0 is a valid value, means "don't call _read preemptively ever"
this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex); // A linked list is used to store data chunks instead of an array because the
// linked list can remove elements from the beginning faster than
// array.shift()
this.buffer = new BufferList();
this.length = 0;
this.pipes = null;
this.pipesCount = 0;
this.flowing = null;
this.ended = false;
this.endEmitted = false;
this.reading = false; // a flag to be able to tell if the event 'readable'/'data' is emitted
// immediately, or on a later tick. We set this to true at first, because
// any actions that shouldn't happen until "later" should generally also
// not happen before the first read call.
this.sync = true; // whenever we return null, then we set a flag to say
// that we're awaiting a 'readable' event emission.
this.needReadable = false;
this.emittedReadable = false;
this.readableListening = false;
this.resumeScheduled = false;
this.paused = true; // Should close be emitted on destroy. Defaults to true.
this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'end' (and potentially 'finish')
this.autoDestroy = !!options.autoDestroy; // has it been destroyed
this.destroyed = false; // Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8'; // the number of writers that are awaiting a drain event in .pipe()s
this.awaitDrain = 0; // if true, a maybeReadMore has been scheduled
this.readingMore = false;
this.decoder = null;
this.encoding = null;
if (options.encoding) {
if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
this.decoder = new StringDecoder(options.encoding);
this.encoding = options.encoding;
}
}
function Readable(options) {
Duplex = Duplex || require('./_stream_duplex');
if (!(this instanceof Readable)) return new Readable(options); // Checking for a Stream.Duplex instance is faster here instead of inside
// the ReadableState constructor, at least with V8 6.5
var isDuplex = this instanceof Duplex;
this._readableState = new ReadableState(options, this, isDuplex); // legacy
this.readable = true;
if (options) {
if (typeof options.read === 'function') this._read = options.read;
if (typeof options.destroy === 'function') this._destroy = options.destroy;
}
Stream.call(this);
}
Object.defineProperty(Readable.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._readableState === undefined) {
return false;
}
return this._readableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (!this._readableState) {
return;
} // backward compatibility, the user is explicitly
// managing destroyed
this._readableState.destroyed = value;
}
});
Readable.prototype.destroy = destroyImpl.destroy;
Readable.prototype._undestroy = destroyImpl.undestroy;
Readable.prototype._destroy = function (err, cb) {
cb(err);
}; // Manually shove something into the read() buffer.
// This returns true if the highWaterMark has not been hit yet,
// similar to how Writable.write() returns true if you should
// write() some more.
Readable.prototype.push = function (chunk, encoding) {
var state = this._readableState;
var skipChunkCheck;
if (!state.objectMode) {
if (typeof chunk === 'string') {
encoding = encoding || state.defaultEncoding;
if (encoding !== state.encoding) {
chunk = Buffer.from(chunk, encoding);
encoding = '';
}
skipChunkCheck = true;
}
} else {
skipChunkCheck = true;
}
return readableAddChunk(this, chunk, encoding, false, skipChunkCheck);
}; // Unshift should *always* be something directly out of read()
Readable.prototype.unshift = function (chunk) {
return readableAddChunk(this, chunk, null, true, false);
};
function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) {
debug('readableAddChunk', chunk);
var state = stream._readableState;
if (chunk === null) {
state.reading = false;
onEofChunk(stream, state);
} else {
var er;
if (!skipChunkCheck) er = chunkInvalid(state, chunk);
if (er) {
errorOrDestroy(stream, er);
} else if (state.objectMode || chunk && chunk.length > 0) {
if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) {
chunk = _uint8ArrayToBuffer(chunk);
}
if (addToFront) {
if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true);
} else if (state.ended) {
errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF());
} else if (state.destroyed) {
return false;
} else {
state.reading = false;
if (state.decoder && !encoding) {
chunk = state.decoder.write(chunk);
if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state);
} else {
addChunk(stream, state, chunk, false);
}
}
} else if (!addToFront) {
state.reading = false;
maybeReadMore(stream, state);
}
} // We can push more data if we are below the highWaterMark.
// Also, if we have no data yet, we can stand some more bytes.
// This is to work around cases where hwm=0, such as the repl.
return !state.ended && (state.length < state.highWaterMark || state.length === 0);
}
function addChunk(stream, state, chunk, addToFront) {
if (state.flowing && state.length === 0 && !state.sync) {
state.awaitDrain = 0;
stream.emit('data', chunk);
} else {
// update the buffer info.
state.length += state.objectMode ? 1 : chunk.length;
if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk);
if (state.needReadable) emitReadable(stream);
}
maybeReadMore(stream, state);
}
function chunkInvalid(state, chunk) {
var er;
if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk);
}
return er;
}
Readable.prototype.isPaused = function () {
return this._readableState.flowing === false;
}; // backwards compatibility.
Readable.prototype.setEncoding = function (enc) {
if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
var decoder = new StringDecoder(enc);
this._readableState.decoder = decoder; // If setEncoding(null), decoder.encoding equals utf8
this._readableState.encoding = this._readableState.decoder.encoding; // Iterate over current buffer to convert already stored Buffers:
var p = this._readableState.buffer.head;
var content = '';
while (p !== null) {
content += decoder.write(p.data);
p = p.next;
}
this._readableState.buffer.clear();
if (content !== '') this._readableState.buffer.push(content);
this._readableState.length = content.length;
return this;
}; // Don't raise the hwm > 1GB
var MAX_HWM = 0x40000000;
function computeNewHighWaterMark(n) {
if (n >= MAX_HWM) {
// TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE.
n = MAX_HWM;
} else {
// Get the next highest power of 2 to prevent increasing hwm excessively in
// tiny amounts
n--;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
n++;
}
return n;
} // This function is designed to be inlinable, so please take care when making
// changes to the function body.
function howMuchToRead(n, state) {
if (n <= 0 || state.length === 0 && state.ended) return 0;
if (state.objectMode) return 1;
if (n !== n) {
// Only flow one buffer at a time
if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length;
} // If we're asking for more than the current hwm, then raise the hwm.
if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n);
if (n <= state.length) return n; // Don't have enough
if (!state.ended) {
state.needReadable = true;
return 0;
}
return state.length;
} // you can override either this method, or the async _read(n) below.
Readable.prototype.read = function (n) {
debug('read', n);
n = parseInt(n, 10);
var state = this._readableState;
var nOrig = n;
if (n !== 0) state.emittedReadable = false; // if we're doing read(0) to trigger a readable event, but we
// already have a bunch of data in the buffer, then just trigger
// the 'readable' event and move on.
if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) {
debug('read: emitReadable', state.length, state.ended);
if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this);
return null;
}
n = howMuchToRead(n, state); // if we've ended, and we're now clear, then finish it up.
if (n === 0 && state.ended) {
if (state.length === 0) endReadable(this);
return null;
} // All the actual chunk generation logic needs to be
// *below* the call to _read. The reason is that in certain
// synthetic stream cases, such as passthrough streams, _read
// may be a completely synchronous operation which may change
// the state of the read buffer, providing enough data when
// before there was *not* enough.
//
// So, the steps are:
// 1. Figure out what the state of things will be after we do
// a read from the buffer.
//
// 2. If that resulting state will trigger a _read, then call _read.
// Note that this may be asynchronous, or synchronous. Yes, it is
// deeply ugly to write APIs this way, but that still doesn't mean
// that the Readable class should behave improperly, as streams are
// designed to be sync/async agnostic.
// Take note if the _read call is sync or async (ie, if the read call
// has returned yet), so that we know whether or not it's safe to emit
// 'readable' etc.
//
// 3. Actually pull the requested chunks out of the buffer and return.
// if we need a readable event, then we need to do some reading.
var doRead = state.needReadable;
debug('need readable', doRead); // if we currently have less than the highWaterMark, then also read some
if (state.length === 0 || state.length - n < state.highWaterMark) {
doRead = true;
debug('length less than watermark', doRead);
} // however, if we've ended, then there's no point, and if we're already
// reading, then it's unnecessary.
if (state.ended || state.reading) {
doRead = false;
debug('reading or ended', doRead);
} else if (doRead) {
debug('do read');
state.reading = true;
state.sync = true; // if the length is currently zero, then we *need* a readable event.
if (state.length === 0) state.needReadable = true; // call internal read method
this._read(state.highWaterMark);
state.sync = false; // If _read pushed data synchronously, then `reading` will be false,
// and we need to re-evaluate how much data we can return to the user.
if (!state.reading) n = howMuchToRead(nOrig, state);
}
var ret;
if (n > 0) ret = fromList(n, state);else ret = null;
if (ret === null) {
state.needReadable = state.length <= state.highWaterMark;
n = 0;
} else {
state.length -= n;
state.awaitDrain = 0;
}
if (state.length === 0) {
// If we have nothing in the buffer, then we want to know
// as soon as we *do* get something into the buffer.
if (!state.ended) state.needReadable = true; // If we tried to read() past the EOF, then emit end on the next tick.
if (nOrig !== n && state.ended) endReadable(this);
}
if (ret !== null) this.emit('data', ret);
return ret;
};
function onEofChunk(stream, state) {
debug('onEofChunk');
if (state.ended) return;
if (state.decoder) {
var chunk = state.decoder.end();
if (chunk && chunk.length) {
state.buffer.push(chunk);
state.length += state.objectMode ? 1 : chunk.length;
}
}
state.ended = true;
if (state.sync) {
// if we are sync, wait until next tick to emit the data.
// Otherwise we risk emitting data in the flow()
// the readable code triggers during a read() call
emitReadable(stream);
} else {
// emit 'readable' now to make sure it gets picked up.
state.needReadable = false;
if (!state.emittedReadable) {
state.emittedReadable = true;
emitReadable_(stream);
}
}
} // Don't emit readable right away in sync mode, because this can trigger
// another read() call => stack overflow. This way, it might trigger
// a nextTick recursion warning, but that's not so bad.
function emitReadable(stream) {
var state = stream._readableState;
debug('emitReadable', state.needReadable, state.emittedReadable);
state.needReadable = false;
if (!state.emittedReadable) {
debug('emitReadable', state.flowing);
state.emittedReadable = true;
process.nextTick(emitReadable_, stream);
}
}
function emitReadable_(stream) {
var state = stream._readableState;
debug('emitReadable_', state.destroyed, state.length, state.ended);
if (!state.destroyed && (state.length || state.ended)) {
stream.emit('readable');
state.emittedReadable = false;
} // The stream needs another readable event if
// 1. It is not flowing, as the flow mechanism will take
// care of it.
// 2. It is not ended.
// 3. It is below the highWaterMark, so we can schedule
// another readable later.
state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark;
flow(stream);
} // at this point, the user has presumably seen the 'readable' event,
// and called read() to consume some data. that may have triggered
// in turn another _read(n) call, in which case reading = true if
// it's in progress.
// However, if we're not ended, or reading, and the length < hwm,
// then go ahead and try to read some more preemptively.
function maybeReadMore(stream, state) {
if (!state.readingMore) {
state.readingMore = true;
process.nextTick(maybeReadMore_, stream, state);
}
}
function maybeReadMore_(stream, state) {
// Attempt to read more data if we should.
//
// The conditions for reading more data are (one of):
// - Not enough data buffered (state.length < state.highWaterMark). The loop
// is responsible for filling the buffer with enough data if such data
// is available. If highWaterMark is 0 and we are not in the flowing mode
// we should _not_ attempt to buffer any extra data. We'll get more data
// when the stream consumer calls read() instead.
// - No data in the buffer, and the stream is in flowing mode. In this mode
// the loop below is responsible for ensuring read() is called. Failing to
// call read here would abort the flow and there's no other mechanism for
// continuing the flow if the stream consumer has just subscribed to the
// 'data' event.
//
// In addition to the above conditions to keep reading data, the following
// conditions prevent the data from being read:
// - The stream has ended (state.ended).
// - There is already a pending 'read' operation (state.reading). This is a
// case where the the stream has called the implementation defined _read()
// method, but they are processing the call asynchronously and have _not_
// called push() with new data. In this case we skip performing more
// read()s. The execution ends in this method again after the _read() ends
// up calling push() with more data.
while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) {
var len = state.length;
debug('maybeReadMore read 0');
stream.read(0);
if (len === state.length) // didn't get any data, stop spinning.
break;
}
state.readingMore = false;
} // abstract method. to be overridden in specific implementation classes.
// call cb(er, data) where data is <= n in length.
// for virtual (non-string, non-buffer) streams, "length" is somewhat
// arbitrary, and perhaps not very meaningful.
Readable.prototype._read = function (n) {
errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()'));
};
Readable.prototype.pipe = function (dest, pipeOpts) {
var src = this;
var state = this._readableState;
switch (state.pipesCount) {
case 0:
state.pipes = dest;
break;
case 1:
state.pipes = [state.pipes, dest];
break;
default:
state.pipes.push(dest);
break;
}
state.pipesCount += 1;
debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);
var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr;
var endFn = doEnd ? onend : unpipe;
if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn);
dest.on('unpipe', onunpipe);
function onunpipe(readable, unpipeInfo) {
debug('onunpipe');
if (readable === src) {
if (unpipeInfo && unpipeInfo.hasUnpiped === false) {
unpipeInfo.hasUnpiped = true;
cleanup();
}
}
}
function onend() {
debug('onend');
dest.end();
} // when the dest drains, it reduces the awaitDrain counter
// on the source. This would be more elegant with a .once()
// handler in flow(), but adding and removing repeatedly is
// too slow.
var ondrain = pipeOnDrain(src);
dest.on('drain', ondrain);
var cleanedUp = false;
function cleanup() {
debug('cleanup'); // cleanup event handlers once the pipe is broken
dest.removeListener('close', onclose);
dest.removeListener('finish', onfinish);
dest.removeListener('drain', ondrain);
dest.removeListener('error', onerror);
dest.removeListener('unpipe', onunpipe);
src.removeListener('end', onend);
src.removeListener('end', unpipe);
src.removeListener('data', ondata);
cleanedUp = true; // if the reader is waiting for a drain event from this
// specific writer, then it would cause it to never start
// flowing again.
// So, if this is awaiting a drain, then we just call it now.
// If we don't know, then assume that we are waiting for one.
if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain();
}
src.on('data', ondata);
function ondata(chunk) {
debug('ondata');
var ret = dest.write(chunk);
debug('dest.write', ret);
if (ret === false) {
// If the user unpiped during `dest.write()`, it is possible
// to get stuck in a permanently paused state if that write
// also returned false.
// => Check whether `dest` is still a piping destination.
if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) {
debug('false write response, pause', state.awaitDrain);
state.awaitDrain++;
}
src.pause();
}
} // if the dest has an error, then stop piping into it.
// however, don't suppress the throwing behavior for this.
function onerror(er) {
debug('onerror', er);
unpipe();
dest.removeListener('error', onerror);
if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er);
} // Make sure our error handler is attached before userland ones.
prependListener(dest, 'error', onerror); // Both close and finish should trigger unpipe, but only once.
function onclose() {
dest.removeListener('finish', onfinish);
unpipe();
}
dest.once('close', onclose);
function onfinish() {
debug('onfinish');
dest.removeListener('close', onclose);
unpipe();
}
dest.once('finish', onfinish);
function unpipe() {
debug('unpipe');
src.unpipe(dest);
} // tell the dest that it's being piped to
dest.emit('pipe', src); // start the flow if it hasn't been started already.
if (!state.flowing) {
debug('pipe resume');
src.resume();
}
return dest;
};
function pipeOnDrain(src) {
return function pipeOnDrainFunctionResult() {
var state = src._readableState;
debug('pipeOnDrain', state.awaitDrain);
if (state.awaitDrain) state.awaitDrain--;
if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) {
state.flowing = true;
flow(src);
}
};
}
Readable.prototype.unpipe = function (dest) {
var state = this._readableState;
var unpipeInfo = {
hasUnpiped: false
}; // if we're not piping anywhere, then do nothing.
if (state.pipesCount === 0) return this; // just one destination. most common case.
if (state.pipesCount === 1) {
// passed in one, but it's not the right one.
if (dest && dest !== state.pipes) return this;
if (!dest) dest = state.pipes; // got a match.
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
if (dest) dest.emit('unpipe', this, unpipeInfo);
return this;
} // slow case. multiple pipe destinations.
if (!dest) {
// remove all.
var dests = state.pipes;
var len = state.pipesCount;
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
for (var i = 0; i < len; i++) {
dests[i].emit('unpipe', this, {
hasUnpiped: false
});
}
return this;
} // try to find the right one.
var index = indexOf(state.pipes, dest);
if (index === -1) return this;
state.pipes.splice(index, 1);
state.pipesCount -= 1;
if (state.pipesCount === 1) state.pipes = state.pipes[0];
dest.emit('unpipe', this, unpipeInfo);
return this;
}; // set up data events if they are asked for
// Ensure readable listeners eventually get something
Readable.prototype.on = function (ev, fn) {
var res = Stream.prototype.on.call(this, ev, fn);
var state = this._readableState;
if (ev === 'data') {
// update readableListening so that resume() may be a no-op
// a few lines down. This is needed to support once('readable').
state.readableListening = this.listenerCount('readable') > 0; // Try start flowing on next tick if stream isn't explicitly paused
if (state.flowing !== false) this.resume();
} else if (ev === 'readable') {
if (!state.endEmitted && !state.readableListening) {
state.readableListening = state.needReadable = true;
state.flowing = false;
state.emittedReadable = false;
debug('on readable', state.length, state.reading);
if (state.length) {
emitReadable(this);
} else if (!state.reading) {
process.nextTick(nReadingNextTick, this);
}
}
}
return res;
};
Readable.prototype.addListener = Readable.prototype.on;
Readable.prototype.removeListener = function (ev, fn) {
var res = Stream.prototype.removeListener.call(this, ev, fn);
if (ev === 'readable') {
// We need to check if there is someone still listening to
// readable and reset the state. However this needs to happen
// after readable has been emitted but before I/O (nextTick) to
// support once('readable', fn) cycles. This means that calling
// resume within the same tick will have no
// effect.
process.nextTick(updateReadableListening, this);
}
return res;
};
Readable.prototype.removeAllListeners = function (ev) {
var res = Stream.prototype.removeAllListeners.apply(this, arguments);
if (ev === 'readable' || ev === undefined) {
// We need to check if there is someone still listening to
// readable and reset the state. However this needs to happen
// after readable has been emitted but before I/O (nextTick) to
// support once('readable', fn) cycles. This means that calling
// resume within the same tick will have no
// effect.
process.nextTick(updateReadableListening, this);
}
return res;
};
function updateReadableListening(self) {
var state = self._readableState;
state.readableListening = self.listenerCount('readable') > 0;
if (state.resumeScheduled && !state.paused) {
// flowing needs to be set to true now, otherwise
// the upcoming resume will not flow.
state.flowing = true; // crude way to check if we should resume
} else if (self.listenerCount('data') > 0) {
self.resume();
}
}
function nReadingNextTick(self) {
debug('readable nexttick read 0');
self.read(0);
} // pause() and resume() are remnants of the legacy readable stream API
// If the user uses them, then switch into old mode.
Readable.prototype.resume = function () {
var state = this._readableState;
if (!state.flowing) {
debug('resume'); // we flow only if there is no one listening
// for readable, but we still have to call
// resume()
state.flowing = !state.readableListening;
resume(this, state);
}
state.paused = false;
return this;
};
function resume(stream, state) {
if (!state.resumeScheduled) {
state.resumeScheduled = true;
process.nextTick(resume_, stream, state);
}
}
function resume_(stream, state) {
debug('resume', state.reading);
if (!state.reading) {
stream.read(0);
}
state.resumeScheduled = false;
stream.emit('resume');
flow(stream);
if (state.flowing && !state.reading) stream.read(0);
}
Readable.prototype.pause = function () {
debug('call pause flowing=%j', this._readableState.flowing);
if (this._readableState.flowing !== false) {
debug('pause');
this._readableState.flowing = false;
this.emit('pause');
}
this._readableState.paused = true;
return this;
};
function flow(stream) {
var state = stream._readableState;
debug('flow', state.flowing);
while (state.flowing && stream.read() !== null) {
;
}
} // wrap an old-style stream as the async data source.
// This is *not* part of the readable stream interface.
// It is an ugly unfortunate mess of history.
Readable.prototype.wrap = function (stream) {
var _this = this;
var state = this._readableState;
var paused = false;
stream.on('end', function () {
debug('wrapped end');
if (state.decoder && !state.ended) {
var chunk = state.decoder.end();
if (chunk && chunk.length) _this.push(chunk);
}
_this.push(null);
});
stream.on('data', function (chunk) {
debug('wrapped data');
if (state.decoder) chunk = state.decoder.write(chunk); // don't skip over falsy values in objectMode
if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return;
var ret = _this.push(chunk);
if (!ret) {
paused = true;
stream.pause();
}
}); // proxy all the other methods.
// important when wrapping filters and duplexes.
for (var i in stream) {
if (this[i] === undefined && typeof stream[i] === 'function') {
this[i] = function methodWrap(method) {
return function methodWrapReturnFunction() {
return stream[method].apply(stream, arguments);
};
}(i);
}
} // proxy certain important events.
for (var n = 0; n < kProxyEvents.length; n++) {
stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n]));
} // when we try to consume some more bytes, simply unpause the
// underlying stream.
this._read = function (n) {
debug('wrapped _read', n);
if (paused) {
paused = false;
stream.resume();
}
};
return this;
};
if (typeof Symbol === 'function') {
Readable.prototype[Symbol.asyncIterator] = function () {
if (createReadableStreamAsyncIterator === undefined) {
createReadableStreamAsyncIterator = require('./internal/streams/async_iterator');
}
return createReadableStreamAsyncIterator(this);
};
}
Object.defineProperty(Readable.prototype, 'readableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.highWaterMark;
}
});
Object.defineProperty(Readable.prototype, 'readableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState && this._readableState.buffer;
}
});
Object.defineProperty(Readable.prototype, 'readableFlowing', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.flowing;
},
set: function set(state) {
if (this._readableState) {
this._readableState.flowing = state;
}
}
}); // exposed for testing purposes only.
Readable._fromList = fromList;
Object.defineProperty(Readable.prototype, 'readableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.length;
}
}); // Pluck off n bytes from an array of buffers.
// Length is the combined lengths of all the buffers in the list.
// This function is designed to be inlinable, so please take care when making
// changes to the function body.
function fromList(n, state) {
// nothing buffered
if (state.length === 0) return null;
var ret;
if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) {
// read it all, truncate the list
if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length);
state.buffer.clear();
} else {
// read part of list
ret = state.buffer.consume(n, state.decoder);
}
return ret;
}
function endReadable(stream) {
var state = stream._readableState;
debug('endReadable', state.endEmitted);
if (!state.endEmitted) {
state.ended = true;
process.nextTick(endReadableNT, state, stream);
}
}
function endReadableNT(state, stream) {
debug('endReadableNT', state.endEmitted, state.length); // Check that we didn't get one last unshift.
if (!state.endEmitted && state.length === 0) {
state.endEmitted = true;
stream.readable = false;
stream.emit('end');
if (state.autoDestroy) {
// In case of duplex streams we need a way to detect
// if the writable side is ready for autoDestroy as well
var wState = stream._writableState;
if (!wState || wState.autoDestroy && wState.finished) {
stream.destroy();
}
}
}
}
if (typeof Symbol === 'function') {
Readable.from = function (iterable, opts) {
if (from === undefined) {
from = require('./internal/streams/from');
}
return from(Readable, iterable, opts);
};
}
function indexOf(xs, x) {
for (var i = 0, l = xs.length; i < l; i++) {
if (xs[i] === x) return i;
}
return -1;
}
}).call(this)}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{"../errors":23,"./_stream_duplex":24,"./internal/streams/async_iterator":29,"./internal/streams/buffer_list":30,"./internal/streams/destroy":31,"./internal/streams/from":33,"./internal/streams/state":35,"./internal/streams/stream":36,"_process":20,"buffer":4,"events":9,"inherits":12,"string_decoder/":37,"util":3}],27:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a transform stream is a readable/writable stream where you do
// something with the data. Sometimes it's called a "filter",
// but that's not a great name for it, since that implies a thing where
// some bits pass through, and others are simply ignored. (That would
// be a valid example of a transform, of course.)
//
// While the output is causally related to the input, it's not a
// necessarily symmetric or synchronous transformation. For example,
// a zlib stream might take multiple plain-text writes(), and then
// emit a single compressed chunk some time in the future.
//
// Here's how this works:
//
// The Transform stream has all the aspects of the readable and writable
// stream classes. When you write(chunk), that calls _write(chunk,cb)
// internally, and returns false if there's a lot of pending writes
// buffered up. When you call read(), that calls _read(n) until
// there's enough pending readable data buffered up.
//
// In a transform stream, the written data is placed in a buffer. When
// _read(n) is called, it transforms the queued up data, calling the
// buffered _write cb's as it consumes chunks. If consuming a single
// written chunk would result in multiple output chunks, then the first
// outputted bit calls the readcb, and subsequent chunks just go into
// the read buffer, and will cause it to emit 'readable' if necessary.
//
// This way, back-pressure is actually determined by the reading side,
// since _read has to be called to start processing a new chunk. However,
// a pathological inflate type of transform can cause excessive buffering
// here. For example, imagine a stream where every byte of input is
// interpreted as an integer from 0-255, and then results in that many
// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
// 1kb of data being output. In this case, you could write a very small
// amount of input, and end up with a very large amount of output. In
// such a pathological inflating mechanism, there'd be no way to tell
// the system to stop doing the transform. A single 4MB write could
// cause the system to run out of memory.
//
// However, even in such a pathological case, only a single written chunk
// would be consumed, and then the rest would wait (un-transformed) until
// the results of the previous transformed chunk were consumed.
'use strict';
module.exports = Transform;
var _require$codes = require('../errors').codes,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,
ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING,
ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0;
var Duplex = require('./_stream_duplex');
require('inherits')(Transform, Duplex);
function afterTransform(er, data) {
var ts = this._transformState;
ts.transforming = false;
var cb = ts.writecb;
if (cb === null) {
return this.emit('error', new ERR_MULTIPLE_CALLBACK());
}
ts.writechunk = null;
ts.writecb = null;
if (data != null) // single equals check for both `null` and `undefined`
this.push(data);
cb(er);
var rs = this._readableState;
rs.reading = false;
if (rs.needReadable || rs.length < rs.highWaterMark) {
this._read(rs.highWaterMark);
}
}
function Transform(options) {
if (!(this instanceof Transform)) return new Transform(options);
Duplex.call(this, options);
this._transformState = {
afterTransform: afterTransform.bind(this),
needTransform: false,
transforming: false,
writecb: null,
writechunk: null,
writeencoding: null
}; // start out asking for a readable event once data is transformed.
this._readableState.needReadable = true; // we have implemented the _read method, and done the other things
// that Readable wants before the first _read call, so unset the
// sync guard flag.
this._readableState.sync = false;
if (options) {
if (typeof options.transform === 'function') this._transform = options.transform;
if (typeof options.flush === 'function') this._flush = options.flush;
} // When the writable side finishes, then flush out anything remaining.
this.on('prefinish', prefinish);
}
function prefinish() {
var _this = this;
if (typeof this._flush === 'function' && !this._readableState.destroyed) {
this._flush(function (er, data) {
done(_this, er, data);
});
} else {
done(this, null, null);
}
}
Transform.prototype.push = function (chunk, encoding) {
this._transformState.needTransform = false;
return Duplex.prototype.push.call(this, chunk, encoding);
}; // This is the part where you do stuff!
// override this function in implementation classes.
// 'chunk' is an input chunk.
//
// Call `push(newChunk)` to pass along transformed output
// to the readable side. You may call 'push' zero or more times.
//
// Call `cb(err)` when you are done with this chunk. If you pass
// an error, then that'll put the hurt on the whole operation. If you
// never call cb(), then you'll never get another chunk.
Transform.prototype._transform = function (chunk, encoding, cb) {
cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()'));
};
Transform.prototype._write = function (chunk, encoding, cb) {
var ts = this._transformState;
ts.writecb = cb;
ts.writechunk = chunk;
ts.writeencoding = encoding;
if (!ts.transforming) {
var rs = this._readableState;
if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark);
}
}; // Doesn't matter what the args are here.
// _transform does all the work.
// That we got here means that the readable side wants more data.
Transform.prototype._read = function (n) {
var ts = this._transformState;
if (ts.writechunk !== null && !ts.transforming) {
ts.transforming = true;
this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
} else {
// mark that we need a transform, so that any data that comes in
// will get processed, now that we've asked for it.
ts.needTransform = true;
}
};
Transform.prototype._destroy = function (err, cb) {
Duplex.prototype._destroy.call(this, err, function (err2) {
cb(err2);
});
};
function done(stream, er, data) {
if (er) return stream.emit('error', er);
if (data != null) // single equals check for both `null` and `undefined`
stream.push(data); // TODO(BridgeAR): Write a test for these two error cases
// if there's nothing in the write buffer, then that means
// that nothing more will ever be provided
if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0();
if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING();
return stream.push(null);
}
},{"../errors":23,"./_stream_duplex":24,"inherits":12}],28:[function(require,module,exports){
(function (process,global){(function (){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// A bit simpler than readable streams.
// Implement an async ._write(chunk, encoding, cb), and it'll handle all
// the drain event emission and buffering.
'use strict';
module.exports = Writable;
/* <replacement> */
function WriteReq(chunk, encoding, cb) {
this.chunk = chunk;
this.encoding = encoding;
this.callback = cb;
this.next = null;
} // It seems a linked list but it is not
// there will be only 2 of these for each stream
function CorkedRequest(state) {
var _this = this;
this.next = null;
this.entry = null;
this.finish = function () {
onCorkedFinish(_this, state);
};
}
/* </replacement> */
/*<replacement>*/
var Duplex;
/*</replacement>*/
Writable.WritableState = WritableState;
/*<replacement>*/
var internalUtil = {
deprecate: require('util-deprecate')
};
/*</replacement>*/
/*<replacement>*/
var Stream = require('./internal/streams/stream');
/*</replacement>*/
var Buffer = require('buffer').Buffer;
var OurUint8Array = global.Uint8Array || function () {};
function _uint8ArrayToBuffer(chunk) {
return Buffer.from(chunk);
}
function _isUint8Array(obj) {
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
}
var destroyImpl = require('./internal/streams/destroy');
var _require = require('./internal/streams/state'),
getHighWaterMark = _require.getHighWaterMark;
var _require$codes = require('../errors').codes,
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,
ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE,
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED,
ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES,
ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END,
ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING;
var errorOrDestroy = destroyImpl.errorOrDestroy;
require('inherits')(Writable, Stream);
function nop() {}
function WritableState(options, stream, isDuplex) {
Duplex = Duplex || require('./_stream_duplex');
options = options || {}; // Duplex streams are both readable and writable, but share
// the same options object.
// However, some cases require setting options to different
// values for the readable and the writable sides of the duplex stream,
// e.g. options.readableObjectMode vs. options.writableObjectMode, etc.
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag to indicate whether or not this stream
// contains buffers or objects.
this.objectMode = !!options.objectMode;
if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; // the point at which write() starts returning false
// Note: 0 is a valid value, means that we always return false if
// the entire buffer is not flushed immediately on write()
this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex); // if _final has been called
this.finalCalled = false; // drain event flag.
this.needDrain = false; // at the start of calling end()
this.ending = false; // when end() has been called, and returned
this.ended = false; // when 'finish' is emitted
this.finished = false; // has it been destroyed
this.destroyed = false; // should we decode strings into buffers before passing to _write?
// this is here so that some node-core streams can optimize string
// handling at a lower level.
var noDecode = options.decodeStrings === false;
this.decodeStrings = !noDecode; // Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8'; // not an actual buffer we keep track of, but a measurement
// of how much we're waiting to get pushed to some underlying
// socket or file.
this.length = 0; // a flag to see when we're in the middle of a write.
this.writing = false; // when true all writes will be buffered until .uncork() call
this.corked = 0; // a flag to be able to tell if the onwrite cb is called immediately,
// or on a later tick. We set this to true at first, because any
// actions that shouldn't happen until "later" should generally also
// not happen before the first write call.
this.sync = true; // a flag to know if we're processing previously buffered items, which
// may call the _write() callback in the same tick, so that we don't
// end up in an overlapped onwrite situation.
this.bufferProcessing = false; // the callback that's passed to _write(chunk,cb)
this.onwrite = function (er) {
onwrite(stream, er);
}; // the callback that the user supplies to write(chunk,encoding,cb)
this.writecb = null; // the amount that is being written when _write is called.
this.writelen = 0;
this.bufferedRequest = null;
this.lastBufferedRequest = null; // number of pending user-supplied write callbacks
// this must be 0 before 'finish' can be emitted
this.pendingcb = 0; // emit prefinish if the only thing we're waiting for is _write cbs
// This is relevant for synchronous Transform streams
this.prefinished = false; // True if the error was already emitted and should not be thrown again
this.errorEmitted = false; // Should close be emitted on destroy. Defaults to true.
this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'finish' (and potentially 'end')
this.autoDestroy = !!options.autoDestroy; // count buffered requests
this.bufferedRequestCount = 0; // allocate the first CorkedRequest, there is always
// one allocated and free to use, and we maintain at most two
this.corkedRequestsFree = new CorkedRequest(this);
}
WritableState.prototype.getBuffer = function getBuffer() {
var current = this.bufferedRequest;
var out = [];
while (current) {
out.push(current);
current = current.next;
}
return out;
};
(function () {
try {
Object.defineProperty(WritableState.prototype, 'buffer', {
get: internalUtil.deprecate(function writableStateBufferGetter() {
return this.getBuffer();
}, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003')
});
} catch (_) {}
})(); // Test _writableState for inheritance to account for Duplex streams,
// whose prototype chain only points to Readable.
var realHasInstance;
if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') {
realHasInstance = Function.prototype[Symbol.hasInstance];
Object.defineProperty(Writable, Symbol.hasInstance, {
value: function value(object) {
if (realHasInstance.call(this, object)) return true;
if (this !== Writable) return false;
return object && object._writableState instanceof WritableState;
}
});
} else {
realHasInstance = function realHasInstance(object) {
return object instanceof this;
};
}
function Writable(options) {
Duplex = Duplex || require('./_stream_duplex'); // Writable ctor is applied to Duplexes, too.
// `realHasInstance` is necessary because using plain `instanceof`
// would return false, as no `_writableState` property is attached.
// Trying to use the custom `instanceof` for Writable here will also break the
// Node.js LazyTransform implementation, which has a non-trivial getter for
// `_writableState` that would lead to infinite recursion.
// Checking for a Stream.Duplex instance is faster here instead of inside
// the WritableState constructor, at least with V8 6.5
var isDuplex = this instanceof Duplex;
if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options);
this._writableState = new WritableState(options, this, isDuplex); // legacy.
this.writable = true;
if (options) {
if (typeof options.write === 'function') this._write = options.write;
if (typeof options.writev === 'function') this._writev = options.writev;
if (typeof options.destroy === 'function') this._destroy = options.destroy;
if (typeof options.final === 'function') this._final = options.final;
}
Stream.call(this);
} // Otherwise people can pipe Writable streams, which is just wrong.
Writable.prototype.pipe = function () {
errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE());
};
function writeAfterEnd(stream, cb) {
var er = new ERR_STREAM_WRITE_AFTER_END(); // TODO: defer error events consistently everywhere, not just the cb
errorOrDestroy(stream, er);
process.nextTick(cb, er);
} // Checks that a user-supplied chunk is valid, especially for the particular
// mode the stream is in. Currently this means that `null` is never accepted
// and undefined/non-string values are only allowed in object mode.
function validChunk(stream, state, chunk, cb) {
var er;
if (chunk === null) {
er = new ERR_STREAM_NULL_VALUES();
} else if (typeof chunk !== 'string' && !state.objectMode) {
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk);
}
if (er) {
errorOrDestroy(stream, er);
process.nextTick(cb, er);
return false;
}
return true;
}
Writable.prototype.write = function (chunk, encoding, cb) {
var state = this._writableState;
var ret = false;
var isBuf = !state.objectMode && _isUint8Array(chunk);
if (isBuf && !Buffer.isBuffer(chunk)) {
chunk = _uint8ArrayToBuffer(chunk);
}
if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding;
if (typeof cb !== 'function') cb = nop;
if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) {
state.pendingcb++;
ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);
}
return ret;
};
Writable.prototype.cork = function () {
this._writableState.corked++;
};
Writable.prototype.uncork = function () {
var state = this._writableState;
if (state.corked) {
state.corked--;
if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state);
}
};
Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) {
// node::ParseEncoding() requires lower case.
if (typeof encoding === 'string') encoding = encoding.toLowerCase();
if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding);
this._writableState.defaultEncoding = encoding;
return this;
};
Object.defineProperty(Writable.prototype, 'writableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState && this._writableState.getBuffer();
}
});
function decodeChunk(state, chunk, encoding) {
if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') {
chunk = Buffer.from(chunk, encoding);
}
return chunk;
}
Object.defineProperty(Writable.prototype, 'writableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.highWaterMark;
}
}); // if we're already writing something, then just put this
// in the queue, and wait our turn. Otherwise, call _write
// If we return false, then we need a drain event, so set that flag.
function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) {
if (!isBuf) {
var newChunk = decodeChunk(state, chunk, encoding);
if (chunk !== newChunk) {
isBuf = true;
encoding = 'buffer';
chunk = newChunk;
}
}
var len = state.objectMode ? 1 : chunk.length;
state.length += len;
var ret = state.length < state.highWaterMark; // we must ensure that previous needDrain will not be reset to false.
if (!ret) state.needDrain = true;
if (state.writing || state.corked) {
var last = state.lastBufferedRequest;
state.lastBufferedRequest = {
chunk: chunk,
encoding: encoding,
isBuf: isBuf,
callback: cb,
next: null
};
if (last) {
last.next = state.lastBufferedRequest;
} else {
state.bufferedRequest = state.lastBufferedRequest;
}
state.bufferedRequestCount += 1;
} else {
doWrite(stream, state, false, len, chunk, encoding, cb);
}
return ret;
}
function doWrite(stream, state, writev, len, chunk, encoding, cb) {
state.writelen = len;
state.writecb = cb;
state.writing = true;
state.sync = true;
if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite);
state.sync = false;
}
function onwriteError(stream, state, sync, er, cb) {
--state.pendingcb;
if (sync) {
// defer the callback if we are being called synchronously
// to avoid piling up things on the stack
process.nextTick(cb, er); // this can emit finish, and it will always happen
// after error
process.nextTick(finishMaybe, stream, state);
stream._writableState.errorEmitted = true;
errorOrDestroy(stream, er);
} else {
// the caller expect this to happen before if
// it is async
cb(er);
stream._writableState.errorEmitted = true;
errorOrDestroy(stream, er); // this can emit finish, but finish must
// always follow error
finishMaybe(stream, state);
}
}
function onwriteStateUpdate(state) {
state.writing = false;
state.writecb = null;
state.length -= state.writelen;
state.writelen = 0;
}
function onwrite(stream, er) {
var state = stream._writableState;
var sync = state.sync;
var cb = state.writecb;
if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK();
onwriteStateUpdate(state);
if (er) onwriteError(stream, state, sync, er, cb);else {
// Check if we're actually ready to finish, but don't emit yet
var finished = needFinish(state) || stream.destroyed;
if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) {
clearBuffer(stream, state);
}
if (sync) {
process.nextTick(afterWrite, stream, state, finished, cb);
} else {
afterWrite(stream, state, finished, cb);
}
}
}
function afterWrite(stream, state, finished, cb) {
if (!finished) onwriteDrain(stream, state);
state.pendingcb--;
cb();
finishMaybe(stream, state);
} // Must force callback to be called on nextTick, so that we don't
// emit 'drain' before the write() consumer gets the 'false' return
// value, and has a chance to attach a 'drain' listener.
function onwriteDrain(stream, state) {
if (state.length === 0 && state.needDrain) {
state.needDrain = false;
stream.emit('drain');
}
} // if there's something in the buffer waiting, then process it
function clearBuffer(stream, state) {
state.bufferProcessing = true;
var entry = state.bufferedRequest;
if (stream._writev && entry && entry.next) {
// Fast case, write everything using _writev()
var l = state.bufferedRequestCount;
var buffer = new Array(l);
var holder = state.corkedRequestsFree;
holder.entry = entry;
var count = 0;
var allBuffers = true;
while (entry) {
buffer[count] = entry;
if (!entry.isBuf) allBuffers = false;
entry = entry.next;
count += 1;
}
buffer.allBuffers = allBuffers;
doWrite(stream, state, true, state.length, buffer, '', holder.finish); // doWrite is almost always async, defer these to save a bit of time
// as the hot path ends with doWrite
state.pendingcb++;
state.lastBufferedRequest = null;
if (holder.next) {
state.corkedRequestsFree = holder.next;
holder.next = null;
} else {
state.corkedRequestsFree = new CorkedRequest(state);
}
state.bufferedRequestCount = 0;
} else {
// Slow case, write chunks one-by-one
while (entry) {
var chunk = entry.chunk;
var encoding = entry.encoding;
var cb = entry.callback;
var len = state.objectMode ? 1 : chunk.length;
doWrite(stream, state, false, len, chunk, encoding, cb);
entry = entry.next;
state.bufferedRequestCount--; // if we didn't call the onwrite immediately, then
// it means that we need to wait until it does.
// also, that means that the chunk and cb are currently
// being processed, so move the buffer counter past them.
if (state.writing) {
break;
}
}
if (entry === null) state.lastBufferedRequest = null;
}
state.bufferedRequest = entry;
state.bufferProcessing = false;
}
Writable.prototype._write = function (chunk, encoding, cb) {
cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()'));
};
Writable.prototype._writev = null;
Writable.prototype.end = function (chunk, encoding, cb) {
var state = this._writableState;
if (typeof chunk === 'function') {
cb = chunk;
chunk = null;
encoding = null;
} else if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); // .end() fully uncorks
if (state.corked) {
state.corked = 1;
this.uncork();
} // ignore unnecessary end() calls.
if (!state.ending) endWritable(this, state, cb);
return this;
};
Object.defineProperty(Writable.prototype, 'writableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.length;
}
});
function needFinish(state) {
return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing;
}
function callFinal(stream, state) {
stream._final(function (err) {
state.pendingcb--;
if (err) {
errorOrDestroy(stream, err);
}
state.prefinished = true;
stream.emit('prefinish');
finishMaybe(stream, state);
});
}
function prefinish(stream, state) {
if (!state.prefinished && !state.finalCalled) {
if (typeof stream._final === 'function' && !state.destroyed) {
state.pendingcb++;
state.finalCalled = true;
process.nextTick(callFinal, stream, state);
} else {
state.prefinished = true;
stream.emit('prefinish');
}
}
}
function finishMaybe(stream, state) {
var need = needFinish(state);
if (need) {
prefinish(stream, state);
if (state.pendingcb === 0) {
state.finished = true;
stream.emit('finish');
if (state.autoDestroy) {
// In case of duplex streams we need a way to detect
// if the readable side is ready for autoDestroy as well
var rState = stream._readableState;
if (!rState || rState.autoDestroy && rState.endEmitted) {
stream.destroy();
}
}
}
}
return need;
}
function endWritable(stream, state, cb) {
state.ending = true;
finishMaybe(stream, state);
if (cb) {
if (state.finished) process.nextTick(cb);else stream.once('finish', cb);
}
state.ended = true;
stream.writable = false;
}
function onCorkedFinish(corkReq, state, err) {
var entry = corkReq.entry;
corkReq.entry = null;
while (entry) {
var cb = entry.callback;
state.pendingcb--;
cb(err);
entry = entry.next;
} // reuse the free corkReq.
state.corkedRequestsFree.next = corkReq;
}
Object.defineProperty(Writable.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._writableState === undefined) {
return false;
}
return this._writableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (!this._writableState) {
return;
} // backward compatibility, the user is explicitly
// managing destroyed
this._writableState.destroyed = value;
}
});
Writable.prototype.destroy = destroyImpl.destroy;
Writable.prototype._undestroy = destroyImpl.undestroy;
Writable.prototype._destroy = function (err, cb) {
cb(err);
};
}).call(this)}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{"../errors":23,"./_stream_duplex":24,"./internal/streams/destroy":31,"./internal/streams/state":35,"./internal/streams/stream":36,"_process":20,"buffer":4,"inherits":12,"util-deprecate":40}],29:[function(require,module,exports){
(function (process){(function (){
'use strict';
var _Object$setPrototypeO;
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
var finished = require('./end-of-stream');
var kLastResolve = Symbol('lastResolve');
var kLastReject = Symbol('lastReject');
var kError = Symbol('error');
var kEnded = Symbol('ended');
var kLastPromise = Symbol('lastPromise');
var kHandlePromise = Symbol('handlePromise');
var kStream = Symbol('stream');
function createIterResult(value, done) {
return {
value: value,
done: done
};
}
function readAndResolve(iter) {
var resolve = iter[kLastResolve];
if (resolve !== null) {
var data = iter[kStream].read(); // we defer if data is null
// we can be expecting either 'end' or
// 'error'
if (data !== null) {
iter[kLastPromise] = null;
iter[kLastResolve] = null;
iter[kLastReject] = null;
resolve(createIterResult(data, false));
}
}
}
function onReadable(iter) {
// we wait for the next tick, because it might
// emit an error with process.nextTick
process.nextTick(readAndResolve, iter);
}
function wrapForNext(lastPromise, iter) {
return function (resolve, reject) {
lastPromise.then(function () {
if (iter[kEnded]) {
resolve(createIterResult(undefined, true));
return;
}
iter[kHandlePromise](resolve, reject);
}, reject);
};
}
var AsyncIteratorPrototype = Object.getPrototypeOf(function () {});
var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = {
get stream() {
return this[kStream];
},
next: function next() {
var _this = this;
// if we have detected an error in the meanwhile
// reject straight away
var error = this[kError];
if (error !== null) {
return Promise.reject(error);
}
if (this[kEnded]) {
return Promise.resolve(createIterResult(undefined, true));
}
if (this[kStream].destroyed) {
// We need to defer via nextTick because if .destroy(err) is
// called, the error will be emitted via nextTick, and
// we cannot guarantee that there is no error lingering around
// waiting to be emitted.
return new Promise(function (resolve, reject) {
process.nextTick(function () {
if (_this[kError]) {
reject(_this[kError]);
} else {
resolve(createIterResult(undefined, true));
}
});
});
} // if we have multiple next() calls
// we will wait for the previous Promise to finish
// this logic is optimized to support for await loops,
// where next() is only called once at a time
var lastPromise = this[kLastPromise];
var promise;
if (lastPromise) {
promise = new Promise(wrapForNext(lastPromise, this));
} else {
// fast path needed to support multiple this.push()
// without triggering the next() queue
var data = this[kStream].read();
if (data !== null) {
return Promise.resolve(createIterResult(data, false));
}
promise = new Promise(this[kHandlePromise]);
}
this[kLastPromise] = promise;
return promise;
}
}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () {
return this;
}), _defineProperty(_Object$setPrototypeO, "return", function _return() {
var _this2 = this;
// destroy(err, cb) is a private API
// we can guarantee we have that here, because we control the
// Readable class this is attached to
return new Promise(function (resolve, reject) {
_this2[kStream].destroy(null, function (err) {
if (err) {
reject(err);
return;
}
resolve(createIterResult(undefined, true));
});
});
}), _Object$setPrototypeO), AsyncIteratorPrototype);
var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) {
var _Object$create;
var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, {
value: stream,
writable: true
}), _defineProperty(_Object$create, kLastResolve, {
value: null,
writable: true
}), _defineProperty(_Object$create, kLastReject, {
value: null,
writable: true
}), _defineProperty(_Object$create, kError, {
value: null,
writable: true
}), _defineProperty(_Object$create, kEnded, {
value: stream._readableState.endEmitted,
writable: true
}), _defineProperty(_Object$create, kHandlePromise, {
value: function value(resolve, reject) {
var data = iterator[kStream].read();
if (data) {
iterator[kLastPromise] = null;
iterator[kLastResolve] = null;
iterator[kLastReject] = null;
resolve(createIterResult(data, false));
} else {
iterator[kLastResolve] = resolve;
iterator[kLastReject] = reject;
}
},
writable: true
}), _Object$create));
iterator[kLastPromise] = null;
finished(stream, function (err) {
if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') {
var reject = iterator[kLastReject]; // reject if we are waiting for data in the Promise
// returned by next() and store the error
if (reject !== null) {
iterator[kLastPromise] = null;
iterator[kLastResolve] = null;
iterator[kLastReject] = null;
reject(err);
}
iterator[kError] = err;
return;
}
var resolve = iterator[kLastResolve];
if (resolve !== null) {
iterator[kLastPromise] = null;
iterator[kLastResolve] = null;
iterator[kLastReject] = null;
resolve(createIterResult(undefined, true));
}
iterator[kEnded] = true;
});
stream.on('readable', onReadable.bind(null, iterator));
return iterator;
};
module.exports = createReadableStreamAsyncIterator;
}).call(this)}).call(this,require('_process'))
},{"./end-of-stream":32,"_process":20}],30:[function(require,module,exports){
'use strict';
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; }
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }
function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }
var _require = require('buffer'),
Buffer = _require.Buffer;
var _require2 = require('util'),
inspect = _require2.inspect;
var custom = inspect && inspect.custom || 'inspect';
function copyBuffer(src, target, offset) {
Buffer.prototype.copy.call(src, target, offset);
}
module.exports =
/*#__PURE__*/
function () {
function BufferList() {
_classCallCheck(this, BufferList);
this.head = null;
this.tail = null;
this.length = 0;
}
_createClass(BufferList, [{
key: "push",
value: function push(v) {
var entry = {
data: v,
next: null
};
if (this.length > 0) this.tail.next = entry;else this.head = entry;
this.tail = entry;
++this.length;
}
}, {
key: "unshift",
value: function unshift(v) {
var entry = {
data: v,
next: this.head
};
if (this.length === 0) this.tail = entry;
this.head = entry;
++this.length;
}
}, {
key: "shift",
value: function shift() {
if (this.length === 0) return;
var ret = this.head.data;
if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next;
--this.length;
return ret;
}
}, {
key: "clear",
value: function clear() {
this.head = this.tail = null;
this.length = 0;
}
}, {
key: "join",
value: function join(s) {
if (this.length === 0) return '';
var p = this.head;
var ret = '' + p.data;
while (p = p.next) {
ret += s + p.data;
}
return ret;
}
}, {
key: "concat",
value: function concat(n) {
if (this.length === 0) return Buffer.alloc(0);
var ret = Buffer.allocUnsafe(n >>> 0);
var p = this.head;
var i = 0;
while (p) {
copyBuffer(p.data, ret, i);
i += p.data.length;
p = p.next;
}
return ret;
} // Consumes a specified amount of bytes or characters from the buffered data.
}, {
key: "consume",
value: function consume(n, hasStrings) {
var ret;
if (n < this.head.data.length) {
// `slice` is the same for buffers and strings.
ret = this.head.data.slice(0, n);
this.head.data = this.head.data.slice(n);
} else if (n === this.head.data.length) {
// First chunk is a perfect match.
ret = this.shift();
} else {
// Result spans more than one buffer.
ret = hasStrings ? this._getString(n) : this._getBuffer(n);
}
return ret;
}
}, {
key: "first",
value: function first() {
return this.head.data;
} // Consumes a specified amount of characters from the buffered data.
}, {
key: "_getString",
value: function _getString(n) {
var p = this.head;
var c = 1;
var ret = p.data;
n -= ret.length;
while (p = p.next) {
var str = p.data;
var nb = n > str.length ? str.length : n;
if (nb === str.length) ret += str;else ret += str.slice(0, n);
n -= nb;
if (n === 0) {
if (nb === str.length) {
++c;
if (p.next) this.head = p.next;else this.head = this.tail = null;
} else {
this.head = p;
p.data = str.slice(nb);
}
break;
}
++c;
}
this.length -= c;
return ret;
} // Consumes a specified amount of bytes from the buffered data.
}, {
key: "_getBuffer",
value: function _getBuffer(n) {
var ret = Buffer.allocUnsafe(n);
var p = this.head;
var c = 1;
p.data.copy(ret);
n -= p.data.length;
while (p = p.next) {
var buf = p.data;
var nb = n > buf.length ? buf.length : n;
buf.copy(ret, ret.length - n, 0, nb);
n -= nb;
if (n === 0) {
if (nb === buf.length) {
++c;
if (p.next) this.head = p.next;else this.head = this.tail = null;
} else {
this.head = p;
p.data = buf.slice(nb);
}
break;
}
++c;
}
this.length -= c;
return ret;
} // Make sure the linked list only shows the minimal necessary information.
}, {
key: custom,
value: function value(_, options) {
return inspect(this, _objectSpread({}, options, {
// Only inspect one level.
depth: 0,
// It should not recurse.
customInspect: false
}));
}
}]);
return BufferList;
}();
},{"buffer":4,"util":3}],31:[function(require,module,exports){
(function (process){(function (){
'use strict'; // undocumented cb() API, needed for core, not for public API
function destroy(err, cb) {
var _this = this;
var readableDestroyed = this._readableState && this._readableState.destroyed;
var writableDestroyed = this._writableState && this._writableState.destroyed;
if (readableDestroyed || writableDestroyed) {
if (cb) {
cb(err);
} else if (err) {
if (!this._writableState) {
process.nextTick(emitErrorNT, this, err);
} else if (!this._writableState.errorEmitted) {
this._writableState.errorEmitted = true;
process.nextTick(emitErrorNT, this, err);
}
}
return this;
} // we set destroyed to true before firing error callbacks in order
// to make it re-entrance safe in case destroy() is called within callbacks
if (this._readableState) {
this._readableState.destroyed = true;
} // if this is a duplex stream mark the writable part as destroyed as well
if (this._writableState) {
this._writableState.destroyed = true;
}
this._destroy(err || null, function (err) {
if (!cb && err) {
if (!_this._writableState) {
process.nextTick(emitErrorAndCloseNT, _this, err);
} else if (!_this._writableState.errorEmitted) {
_this._writableState.errorEmitted = true;
process.nextTick(emitErrorAndCloseNT, _this, err);
} else {
process.nextTick(emitCloseNT, _this);
}
} else if (cb) {
process.nextTick(emitCloseNT, _this);
cb(err);
} else {
process.nextTick(emitCloseNT, _this);
}
});
return this;
}
function emitErrorAndCloseNT(self, err) {
emitErrorNT(self, err);
emitCloseNT(self);
}
function emitCloseNT(self) {
if (self._writableState && !self._writableState.emitClose) return;
if (self._readableState && !self._readableState.emitClose) return;
self.emit('close');
}
function undestroy() {
if (this._readableState) {
this._readableState.destroyed = false;
this._readableState.reading = false;
this._readableState.ended = false;
this._readableState.endEmitted = false;
}
if (this._writableState) {
this._writableState.destroyed = false;
this._writableState.ended = false;
this._writableState.ending = false;
this._writableState.finalCalled = false;
this._writableState.prefinished = false;
this._writableState.finished = false;
this._writableState.errorEmitted = false;
}
}
function emitErrorNT(self, err) {
self.emit('error', err);
}
function errorOrDestroy(stream, err) {
// We have tests that rely on errors being emitted
// in the same tick, so changing this is semver major.
// For now when you opt-in to autoDestroy we allow
// the error to be emitted nextTick. In a future
// semver major update we should change the default to this.
var rState = stream._readableState;
var wState = stream._writableState;
if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err);
}
module.exports = {
destroy: destroy,
undestroy: undestroy,
errorOrDestroy: errorOrDestroy
};
}).call(this)}).call(this,require('_process'))
},{"_process":20}],32:[function(require,module,exports){
// Ported from https://github.com/mafintosh/end-of-stream with
// permission from the author, Mathias Buus (@mafintosh).
'use strict';
var ERR_STREAM_PREMATURE_CLOSE = require('../../../errors').codes.ERR_STREAM_PREMATURE_CLOSE;
function once(callback) {
var called = false;
return function () {
if (called) return;
called = true;
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
callback.apply(this, args);
};
}
function noop() {}
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === 'function';
}
function eos(stream, opts, callback) {
if (typeof opts === 'function') return eos(stream, null, opts);
if (!opts) opts = {};
callback = once(callback || noop);
var readable = opts.readable || opts.readable !== false && stream.readable;
var writable = opts.writable || opts.writable !== false && stream.writable;
var onlegacyfinish = function onlegacyfinish() {
if (!stream.writable) onfinish();
};
var writableEnded = stream._writableState && stream._writableState.finished;
var onfinish = function onfinish() {
writable = false;
writableEnded = true;
if (!readable) callback.call(stream);
};
var readableEnded = stream._readableState && stream._readableState.endEmitted;
var onend = function onend() {
readable = false;
readableEnded = true;
if (!writable) callback.call(stream);
};
var onerror = function onerror(err) {
callback.call(stream, err);
};
var onclose = function onclose() {
var err;
if (readable && !readableEnded) {
if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
return callback.call(stream, err);
}
if (writable && !writableEnded) {
if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
return callback.call(stream, err);
}
};
var onrequest = function onrequest() {
stream.req.on('finish', onfinish);
};
if (isRequest(stream)) {
stream.on('complete', onfinish);
stream.on('abort', onclose);
if (stream.req) onrequest();else stream.on('request', onrequest);
} else if (writable && !stream._writableState) {
// legacy streams
stream.on('end', onlegacyfinish);
stream.on('close', onlegacyfinish);
}
stream.on('end', onend);
stream.on('finish', onfinish);
if (opts.error !== false) stream.on('error', onerror);
stream.on('close', onclose);
return function () {
stream.removeListener('complete', onfinish);
stream.removeListener('abort', onclose);
stream.removeListener('request', onrequest);
if (stream.req) stream.req.removeListener('finish', onfinish);
stream.removeListener('end', onlegacyfinish);
stream.removeListener('close', onlegacyfinish);
stream.removeListener('finish', onfinish);
stream.removeListener('end', onend);
stream.removeListener('error', onerror);
stream.removeListener('close', onclose);
};
}
module.exports = eos;
},{"../../../errors":23}],33:[function(require,module,exports){
module.exports = function () {
throw new Error('Readable.from is not available in the browser')
};
},{}],34:[function(require,module,exports){
// Ported from https://github.com/mafintosh/pump with
// permission from the author, Mathias Buus (@mafintosh).
'use strict';
var eos;
function once(callback) {
var called = false;
return function () {
if (called) return;
called = true;
callback.apply(void 0, arguments);
};
}
var _require$codes = require('../../../errors').codes,
ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS,
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED;
function noop(err) {
// Rethrow the error if it exists to avoid swallowing it
if (err) throw err;
}
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === 'function';
}
function destroyer(stream, reading, writing, callback) {
callback = once(callback);
var closed = false;
stream.on('close', function () {
closed = true;
});
if (eos === undefined) eos = require('./end-of-stream');
eos(stream, {
readable: reading,
writable: writing
}, function (err) {
if (err) return callback(err);
closed = true;
callback();
});
var destroyed = false;
return function (err) {
if (closed) return;
if (destroyed) return;
destroyed = true; // request.destroy just do .end - .abort is what we want
if (isRequest(stream)) return stream.abort();
if (typeof stream.destroy === 'function') return stream.destroy();
callback(err || new ERR_STREAM_DESTROYED('pipe'));
};
}
function call(fn) {
fn();
}
function pipe(from, to) {
return from.pipe(to);
}
function popCallback(streams) {
if (!streams.length) return noop;
if (typeof streams[streams.length - 1] !== 'function') return noop;
return streams.pop();
}
function pipeline() {
for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) {
streams[_key] = arguments[_key];
}
var callback = popCallback(streams);
if (Array.isArray(streams[0])) streams = streams[0];
if (streams.length < 2) {
throw new ERR_MISSING_ARGS('streams');
}
var error;
var destroys = streams.map(function (stream, i) {
var reading = i < streams.length - 1;
var writing = i > 0;
return destroyer(stream, reading, writing, function (err) {
if (!error) error = err;
if (err) destroys.forEach(call);
if (reading) return;
destroys.forEach(call);
callback(error);
});
});
return streams.reduce(pipe);
}
module.exports = pipeline;
},{"../../../errors":23,"./end-of-stream":32}],35:[function(require,module,exports){
'use strict';
var ERR_INVALID_OPT_VALUE = require('../../../errors').codes.ERR_INVALID_OPT_VALUE;
function highWaterMarkFrom(options, isDuplex, duplexKey) {
return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null;
}
function getHighWaterMark(state, options, duplexKey, isDuplex) {
var hwm = highWaterMarkFrom(options, isDuplex, duplexKey);
if (hwm != null) {
if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) {
var name = isDuplex ? duplexKey : 'highWaterMark';
throw new ERR_INVALID_OPT_VALUE(name, hwm);
}
return Math.floor(hwm);
} // Default value
return state.objectMode ? 16 : 16 * 1024;
}
module.exports = {
getHighWaterMark: getHighWaterMark
};
},{"../../../errors":23}],36:[function(require,module,exports){
module.exports = require('events').EventEmitter;
},{"events":9}],37:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
/*<replacement>*/
var Buffer = require('safe-buffer').Buffer;
/*</replacement>*/
var isEncoding = Buffer.isEncoding || function (encoding) {
encoding = '' + encoding;
switch (encoding && encoding.toLowerCase()) {
case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw':
return true;
default:
return false;
}
};
function _normalizeEncoding(enc) {
if (!enc) return 'utf8';
var retried;
while (true) {
switch (enc) {
case 'utf8':
case 'utf-8':
return 'utf8';
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return 'utf16le';
case 'latin1':
case 'binary':
return 'latin1';
case 'base64':
case 'ascii':
case 'hex':
return enc;
default:
if (retried) return; // undefined
enc = ('' + enc).toLowerCase();
retried = true;
}
}
};
// Do not cache `Buffer.isEncoding` when checking encoding names as some
// modules monkey-patch it to support additional encodings
function normalizeEncoding(enc) {
var nenc = _normalizeEncoding(enc);
if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc);
return nenc || enc;
}
// StringDecoder provides an interface for efficiently splitting a series of
// buffers into a series of JS strings without breaking apart multi-byte
// characters.
exports.StringDecoder = StringDecoder;
function StringDecoder(encoding) {
this.encoding = normalizeEncoding(encoding);
var nb;
switch (this.encoding) {
case 'utf16le':
this.text = utf16Text;
this.end = utf16End;
nb = 4;
break;
case 'utf8':
this.fillLast = utf8FillLast;
nb = 4;
break;
case 'base64':
this.text = base64Text;
this.end = base64End;
nb = 3;
break;
default:
this.write = simpleWrite;
this.end = simpleEnd;
return;
}
this.lastNeed = 0;
this.lastTotal = 0;
this.lastChar = Buffer.allocUnsafe(nb);
}
StringDecoder.prototype.write = function (buf) {
if (buf.length === 0) return '';
var r;
var i;
if (this.lastNeed) {
r = this.fillLast(buf);
if (r === undefined) return '';
i = this.lastNeed;
this.lastNeed = 0;
} else {
i = 0;
}
if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i);
return r || '';
};
StringDecoder.prototype.end = utf8End;
// Returns only complete characters in a Buffer
StringDecoder.prototype.text = utf8Text;
// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer
StringDecoder.prototype.fillLast = function (buf) {
if (this.lastNeed <= buf.length) {
buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed);
return this.lastChar.toString(this.encoding, 0, this.lastTotal);
}
buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length);
this.lastNeed -= buf.length;
};
// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a
// continuation byte. If an invalid byte is detected, -2 is returned.
function utf8CheckByte(byte) {
if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4;
return byte >> 6 === 0x02 ? -1 : -2;
}
// Checks at most 3 bytes at the end of a Buffer in order to detect an
// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4)
// needed to complete the UTF-8 character (if applicable) are returned.
function utf8CheckIncomplete(self, buf, i) {
var j = buf.length - 1;
if (j < i) return 0;
var nb = utf8CheckByte(buf[j]);
if (nb >= 0) {
if (nb > 0) self.lastNeed = nb - 1;
return nb;
}
if (--j < i || nb === -2) return 0;
nb = utf8CheckByte(buf[j]);
if (nb >= 0) {
if (nb > 0) self.lastNeed = nb - 2;
return nb;
}
if (--j < i || nb === -2) return 0;
nb = utf8CheckByte(buf[j]);
if (nb >= 0) {
if (nb > 0) {
if (nb === 2) nb = 0;else self.lastNeed = nb - 3;
}
return nb;
}
return 0;
}
// Validates as many continuation bytes for a multi-byte UTF-8 character as
// needed or are available. If we see a non-continuation byte where we expect
// one, we "replace" the validated continuation bytes we've seen so far with
// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding
// behavior. The continuation byte check is included three times in the case
// where all of the continuation bytes for a character exist in the same buffer.
// It is also done this way as a slight performance increase instead of using a
// loop.
function utf8CheckExtraBytes(self, buf, p) {
if ((buf[0] & 0xC0) !== 0x80) {
self.lastNeed = 0;
return '\ufffd';
}
if (self.lastNeed > 1 && buf.length > 1) {
if ((buf[1] & 0xC0) !== 0x80) {
self.lastNeed = 1;
return '\ufffd';
}
if (self.lastNeed > 2 && buf.length > 2) {
if ((buf[2] & 0xC0) !== 0x80) {
self.lastNeed = 2;
return '\ufffd';
}
}
}
}
// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer.
function utf8FillLast(buf) {
var p = this.lastTotal - this.lastNeed;
var r = utf8CheckExtraBytes(this, buf, p);
if (r !== undefined) return r;
if (this.lastNeed <= buf.length) {
buf.copy(this.lastChar, p, 0, this.lastNeed);
return this.lastChar.toString(this.encoding, 0, this.lastTotal);
}
buf.copy(this.lastChar, p, 0, buf.length);
this.lastNeed -= buf.length;
}
// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a
// partial character, the character's bytes are buffered until the required
// number of bytes are available.
function utf8Text(buf, i) {
var total = utf8CheckIncomplete(this, buf, i);
if (!this.lastNeed) return buf.toString('utf8', i);
this.lastTotal = total;
var end = buf.length - (total - this.lastNeed);
buf.copy(this.lastChar, 0, end);
return buf.toString('utf8', i, end);
}
// For UTF-8, a replacement character is added when ending on a partial
// character.
function utf8End(buf) {
var r = buf && buf.length ? this.write(buf) : '';
if (this.lastNeed) return r + '\ufffd';
return r;
}
// UTF-16LE typically needs two bytes per character, but even if we have an even
// number of bytes available, we need to check if we end on a leading/high
// surrogate. In that case, we need to wait for the next two bytes in order to
// decode the last character properly.
function utf16Text(buf, i) {
if ((buf.length - i) % 2 === 0) {
var r = buf.toString('utf16le', i);
if (r) {
var c = r.charCodeAt(r.length - 1);
if (c >= 0xD800 && c <= 0xDBFF) {
this.lastNeed = 2;
this.lastTotal = 4;
this.lastChar[0] = buf[buf.length - 2];
this.lastChar[1] = buf[buf.length - 1];
return r.slice(0, -1);
}
}
return r;
}
this.lastNeed = 1;
this.lastTotal = 2;
this.lastChar[0] = buf[buf.length - 1];
return buf.toString('utf16le', i, buf.length - 1);
}
// For UTF-16LE we do not explicitly append special replacement characters if we
// end on a partial character, we simply let v8 handle that.
function utf16End(buf) {
var r = buf && buf.length ? this.write(buf) : '';
if (this.lastNeed) {
var end = this.lastTotal - this.lastNeed;
return r + this.lastChar.toString('utf16le', 0, end);
}
return r;
}
function base64Text(buf, i) {
var n = (buf.length - i) % 3;
if (n === 0) return buf.toString('base64', i);
this.lastNeed = 3 - n;
this.lastTotal = 3;
if (n === 1) {
this.lastChar[0] = buf[buf.length - 1];
} else {
this.lastChar[0] = buf[buf.length - 2];
this.lastChar[1] = buf[buf.length - 1];
}
return buf.toString('base64', i, buf.length - n);
}
function base64End(buf) {
var r = buf && buf.length ? this.write(buf) : '';
if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed);
return r;
}
// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex)
function simpleWrite(buf) {
return buf.toString(this.encoding);
}
function simpleEnd(buf) {
return buf && buf.length ? this.write(buf) : '';
}
},{"safe-buffer":21}],38:[function(require,module,exports){
(function (process){(function (){
var Stream = require('stream')
// through
//
// a stream that does nothing but re-emit the input.
// useful for aggregating a series of changing but not ending streams into one stream)
exports = module.exports = through
through.through = through
//create a readable writable stream.
function through (write, end, opts) {
write = write || function (data) { this.queue(data) }
end = end || function () { this.queue(null) }
var ended = false, destroyed = false, buffer = [], _ended = false
var stream = new Stream()
stream.readable = stream.writable = true
stream.paused = false
// stream.autoPause = !(opts && opts.autoPause === false)
stream.autoDestroy = !(opts && opts.autoDestroy === false)
stream.write = function (data) {
write.call(this, data)
return !stream.paused
}
function drain() {
while(buffer.length && !stream.paused) {
var data = buffer.shift()
if(null === data)
return stream.emit('end')
else
stream.emit('data', data)
}
}
stream.queue = stream.push = function (data) {
// console.error(ended)
if(_ended) return stream
if(data === null) _ended = true
buffer.push(data)
drain()
return stream
}
//this will be registered as the first 'end' listener
//must call destroy next tick, to make sure we're after any
//stream piped from here.
//this is only a problem if end is not emitted synchronously.
//a nicer way to do this is to make sure this is the last listener for 'end'
stream.on('end', function () {
stream.readable = false
if(!stream.writable && stream.autoDestroy)
process.nextTick(function () {
stream.destroy()
})
})
function _end () {
stream.writable = false
end.call(stream)
if(!stream.readable && stream.autoDestroy)
stream.destroy()
}
stream.end = function (data) {
if(ended) return
ended = true
if(arguments.length) stream.write(data)
_end() // will emit or queue
return stream
}
stream.destroy = function () {
if(destroyed) return
destroyed = true
ended = true
buffer.length = 0
stream.writable = stream.readable = false
stream.emit('close')
return stream
}
stream.pause = function () {
if(stream.paused) return
stream.paused = true
return stream
}
stream.resume = function () {
if(stream.paused) {
stream.paused = false
stream.emit('resume')
}
drain()
//may have become paused again,
//as drain emits 'data'.
if(!stream.paused)
stream.emit('drain')
return stream
}
return stream
}
}).call(this)}).call(this,require('_process'))
},{"_process":20,"stream":22}],39:[function(require,module,exports){
"use strict"
function unique_pred(list, compare) {
var ptr = 1
, len = list.length
, a=list[0], b=list[0]
for(var i=1; i<len; ++i) {
b = a
a = list[i]
if(compare(a, b)) {
if(i === ptr) {
ptr++
continue
}
list[ptr++] = a
}
}
list.length = ptr
return list
}
function unique_eq(list) {
var ptr = 1
, len = list.length
, a=list[0], b = list[0]
for(var i=1; i<len; ++i, b=a) {
b = a
a = list[i]
if(a !== b) {
if(i === ptr) {
ptr++
continue
}
list[ptr++] = a
}
}
list.length = ptr
return list
}
function unique(list, compare, sorted) {
if(list.length === 0) {
return list
}
if(compare) {
if(!sorted) {
list.sort(compare)
}
return unique_pred(list, compare)
}
if(!sorted) {
list.sort()
}
return unique_eq(list)
}
module.exports = unique
},{}],40:[function(require,module,exports){
(function (global){(function (){
/**
* Module exports.
*/
module.exports = deprecate;
/**
* Mark that a method should not be used.
* Returns a modified function which warns once by default.
*
* If `localStorage.noDeprecation = true` is set, then it is a no-op.
*
* If `localStorage.throwDeprecation = true` is set, then deprecated functions
* will throw an Error when invoked.
*
* If `localStorage.traceDeprecation = true` is set, then deprecated functions
* will invoke `console.trace()` instead of `console.error()`.
*
* @param {Function} fn - the function to deprecate
* @param {String} msg - the string to print to the console when `fn` is invoked
* @returns {Function} a new "deprecated" version of `fn`
* @api public
*/
function deprecate (fn, msg) {
if (config('noDeprecation')) {
return fn;
}
var warned = false;
function deprecated() {
if (!warned) {
if (config('throwDeprecation')) {
throw new Error(msg);
} else if (config('traceDeprecation')) {
console.trace(msg);
} else {
console.warn(msg);
}
warned = true;
}
return fn.apply(this, arguments);
}
return deprecated;
}
/**
* Checks `localStorage` for boolean values for the given `name`.
*
* @param {String} name
* @returns {Boolean}
* @api private
*/
function config (name) {
// accessing global.localStorage can trigger a DOMException in sandboxed iframes
try {
if (!global.localStorage) return false;
} catch (_) {
return false;
}
var val = global.localStorage[name];
if (null == val) return false;
return String(val).toLowerCase() === 'true';
}
}).call(this)}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}]},{},[1]);