Dev (#12)
* health check * Update Dockerfile * simplifying the deployment * Update Bot.js makes the find team command public * test (#9) * Dev (#7) * health check * Update Dockerfile * simplifying the deployment * Dev (#8) * health check * Update Dockerfile * simplifying the deployment * Update Bot.js makes the find team command public * Update PlayerService.js * massive update???? could break stuff * Update Bot.js update
This commit is contained in:
117
node_modules/undici/lib/dispatcher/client-h1.js
generated
vendored
117
node_modules/undici/lib/dispatcher/client-h1.js
generated
vendored
@@ -85,35 +85,35 @@ async function lazyllhttp () {
|
||||
return 0
|
||||
},
|
||||
wasm_on_status: (p, at, len) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
const start = at - currentBufferPtr + currentBufferRef.byteOffset
|
||||
return currentParser.onStatus(new FastBuffer(currentBufferRef.buffer, start, len)) || 0
|
||||
},
|
||||
wasm_on_message_begin: (p) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
return currentParser.onMessageBegin() || 0
|
||||
},
|
||||
wasm_on_header_field: (p, at, len) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
const start = at - currentBufferPtr + currentBufferRef.byteOffset
|
||||
return currentParser.onHeaderField(new FastBuffer(currentBufferRef.buffer, start, len)) || 0
|
||||
},
|
||||
wasm_on_header_value: (p, at, len) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
const start = at - currentBufferPtr + currentBufferRef.byteOffset
|
||||
return currentParser.onHeaderValue(new FastBuffer(currentBufferRef.buffer, start, len)) || 0
|
||||
},
|
||||
wasm_on_headers_complete: (p, statusCode, upgrade, shouldKeepAlive) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
return currentParser.onHeadersComplete(statusCode, Boolean(upgrade), Boolean(shouldKeepAlive)) || 0
|
||||
},
|
||||
wasm_on_body: (p, at, len) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
const start = at - currentBufferPtr + currentBufferRef.byteOffset
|
||||
return currentParser.onBody(new FastBuffer(currentBufferRef.buffer, start, len)) || 0
|
||||
},
|
||||
wasm_on_message_complete: (p) => {
|
||||
assert.strictEqual(currentParser.ptr, p)
|
||||
assert(currentParser.ptr === p)
|
||||
return currentParser.onMessageComplete() || 0
|
||||
}
|
||||
|
||||
@@ -131,9 +131,17 @@ let currentBufferRef = null
|
||||
let currentBufferSize = 0
|
||||
let currentBufferPtr = null
|
||||
|
||||
const TIMEOUT_HEADERS = 1
|
||||
const TIMEOUT_BODY = 2
|
||||
const TIMEOUT_IDLE = 3
|
||||
const USE_NATIVE_TIMER = 0
|
||||
const USE_FAST_TIMER = 1
|
||||
|
||||
// Use fast timers for headers and body to take eventual event loop
|
||||
// latency into account.
|
||||
const TIMEOUT_HEADERS = 2 | USE_FAST_TIMER
|
||||
const TIMEOUT_BODY = 4 | USE_FAST_TIMER
|
||||
|
||||
// Use native timers to ignore event loop latency for keep-alive
|
||||
// handling.
|
||||
const TIMEOUT_KEEP_ALIVE = 8 | USE_NATIVE_TIMER
|
||||
|
||||
class Parser {
|
||||
constructor (client, socket, { exports }) {
|
||||
@@ -164,26 +172,39 @@ class Parser {
|
||||
this.maxResponseSize = client[kMaxResponseSize]
|
||||
}
|
||||
|
||||
setTimeout (value, type) {
|
||||
this.timeoutType = type
|
||||
if (value !== this.timeoutValue) {
|
||||
timers.clearTimeout(this.timeout)
|
||||
if (value) {
|
||||
this.timeout = timers.setTimeout(onParserTimeout, value, this)
|
||||
// istanbul ignore else: only for jest
|
||||
if (this.timeout.unref) {
|
||||
this.timeout.unref()
|
||||
}
|
||||
} else {
|
||||
setTimeout (delay, type) {
|
||||
// If the existing timer and the new timer are of different timer type
|
||||
// (fast or native) or have different delay, we need to clear the existing
|
||||
// timer and set a new one.
|
||||
if (
|
||||
delay !== this.timeoutValue ||
|
||||
(type & USE_FAST_TIMER) ^ (this.timeoutType & USE_FAST_TIMER)
|
||||
) {
|
||||
// If a timeout is already set, clear it with clearTimeout of the fast
|
||||
// timer implementation, as it can clear fast and native timers.
|
||||
if (this.timeout) {
|
||||
timers.clearTimeout(this.timeout)
|
||||
this.timeout = null
|
||||
}
|
||||
this.timeoutValue = value
|
||||
|
||||
if (delay) {
|
||||
if (type & USE_FAST_TIMER) {
|
||||
this.timeout = timers.setFastTimeout(onParserTimeout, delay, new WeakRef(this))
|
||||
} else {
|
||||
this.timeout = setTimeout(onParserTimeout, delay, new WeakRef(this))
|
||||
this.timeout.unref()
|
||||
}
|
||||
}
|
||||
|
||||
this.timeoutValue = delay
|
||||
} else if (this.timeout) {
|
||||
// istanbul ignore else: only for jest
|
||||
if (this.timeout.refresh) {
|
||||
this.timeout.refresh()
|
||||
}
|
||||
}
|
||||
|
||||
this.timeoutType = type
|
||||
}
|
||||
|
||||
resume () {
|
||||
@@ -288,7 +309,7 @@ class Parser {
|
||||
this.llhttp.llhttp_free(this.ptr)
|
||||
this.ptr = null
|
||||
|
||||
timers.clearTimeout(this.timeout)
|
||||
this.timeout && timers.clearTimeout(this.timeout)
|
||||
this.timeout = null
|
||||
this.timeoutValue = null
|
||||
this.timeoutType = null
|
||||
@@ -363,20 +384,19 @@ class Parser {
|
||||
const { upgrade, client, socket, headers, statusCode } = this
|
||||
|
||||
assert(upgrade)
|
||||
assert(client[kSocket] === socket)
|
||||
assert(!socket.destroyed)
|
||||
assert(!this.paused)
|
||||
assert((headers.length & 1) === 0)
|
||||
|
||||
const request = client[kQueue][client[kRunningIdx]]
|
||||
assert(request)
|
||||
|
||||
assert(!socket.destroyed)
|
||||
assert(socket === client[kSocket])
|
||||
assert(!this.paused)
|
||||
assert(request.upgrade || request.method === 'CONNECT')
|
||||
|
||||
this.statusCode = null
|
||||
this.statusText = ''
|
||||
this.shouldKeepAlive = null
|
||||
|
||||
assert(this.headers.length % 2 === 0)
|
||||
this.headers = []
|
||||
this.headersSize = 0
|
||||
|
||||
@@ -433,7 +453,7 @@ class Parser {
|
||||
return -1
|
||||
}
|
||||
|
||||
assert.strictEqual(this.timeoutType, TIMEOUT_HEADERS)
|
||||
assert(this.timeoutType === TIMEOUT_HEADERS)
|
||||
|
||||
this.statusCode = statusCode
|
||||
this.shouldKeepAlive = (
|
||||
@@ -466,7 +486,7 @@ class Parser {
|
||||
return 2
|
||||
}
|
||||
|
||||
assert(this.headers.length % 2 === 0)
|
||||
assert((this.headers.length & 1) === 0)
|
||||
this.headers = []
|
||||
this.headersSize = 0
|
||||
|
||||
@@ -523,7 +543,7 @@ class Parser {
|
||||
const request = client[kQueue][client[kRunningIdx]]
|
||||
assert(request)
|
||||
|
||||
assert.strictEqual(this.timeoutType, TIMEOUT_BODY)
|
||||
assert(this.timeoutType === TIMEOUT_BODY)
|
||||
if (this.timeout) {
|
||||
// istanbul ignore else: only for jest
|
||||
if (this.timeout.refresh) {
|
||||
@@ -556,11 +576,12 @@ class Parser {
|
||||
return
|
||||
}
|
||||
|
||||
assert(statusCode >= 100)
|
||||
assert((this.headers.length & 1) === 0)
|
||||
|
||||
const request = client[kQueue][client[kRunningIdx]]
|
||||
assert(request)
|
||||
|
||||
assert(statusCode >= 100)
|
||||
|
||||
this.statusCode = null
|
||||
this.statusText = ''
|
||||
this.bytesRead = 0
|
||||
@@ -568,7 +589,6 @@ class Parser {
|
||||
this.keepAlive = ''
|
||||
this.connection = ''
|
||||
|
||||
assert(this.headers.length % 2 === 0)
|
||||
this.headers = []
|
||||
this.headersSize = 0
|
||||
|
||||
@@ -587,7 +607,7 @@ class Parser {
|
||||
client[kQueue][client[kRunningIdx]++] = null
|
||||
|
||||
if (socket[kWriting]) {
|
||||
assert.strictEqual(client[kRunning], 0)
|
||||
assert(client[kRunning] === 0)
|
||||
// Response completed before request.
|
||||
util.destroy(socket, new InformationalError('reset'))
|
||||
return constants.ERROR.PAUSED
|
||||
@@ -613,19 +633,19 @@ class Parser {
|
||||
}
|
||||
|
||||
function onParserTimeout (parser) {
|
||||
const { socket, timeoutType, client } = parser
|
||||
const { socket, timeoutType, client, paused } = parser.deref()
|
||||
|
||||
/* istanbul ignore else */
|
||||
if (timeoutType === TIMEOUT_HEADERS) {
|
||||
if (!socket[kWriting] || socket.writableNeedDrain || client[kRunning] > 1) {
|
||||
assert(!parser.paused, 'cannot be paused while waiting for headers')
|
||||
assert(!paused, 'cannot be paused while waiting for headers')
|
||||
util.destroy(socket, new HeadersTimeoutError())
|
||||
}
|
||||
} else if (timeoutType === TIMEOUT_BODY) {
|
||||
if (!parser.paused) {
|
||||
if (!paused) {
|
||||
util.destroy(socket, new BodyTimeoutError())
|
||||
}
|
||||
} else if (timeoutType === TIMEOUT_IDLE) {
|
||||
} else if (timeoutType === TIMEOUT_KEEP_ALIVE) {
|
||||
assert(client[kRunning] === 0 && client[kKeepAliveTimeoutValue])
|
||||
util.destroy(socket, new InformationalError('socket idle timeout'))
|
||||
}
|
||||
@@ -646,10 +666,10 @@ async function connectH1 (client, socket) {
|
||||
socket[kParser] = new Parser(client, socket, llhttpInstance)
|
||||
|
||||
addListener(socket, 'error', function (err) {
|
||||
const parser = this[kParser]
|
||||
|
||||
assert(err.code !== 'ERR_TLS_CERT_ALTNAME_INVALID')
|
||||
|
||||
const parser = this[kParser]
|
||||
|
||||
// On Mac OS, we get an ECONNRESET even if there is a full body to be forwarded
|
||||
// to the user.
|
||||
if (err.code === 'ECONNRESET' && parser.statusCode && !parser.shouldKeepAlive) {
|
||||
@@ -803,8 +823,8 @@ function resumeH1 (client) {
|
||||
}
|
||||
|
||||
if (client[kSize] === 0) {
|
||||
if (socket[kParser].timeoutType !== TIMEOUT_IDLE) {
|
||||
socket[kParser].setTimeout(client[kKeepAliveTimeoutValue], TIMEOUT_IDLE)
|
||||
if (socket[kParser].timeoutType !== TIMEOUT_KEEP_ALIVE) {
|
||||
socket[kParser].setTimeout(client[kKeepAliveTimeoutValue], TIMEOUT_KEEP_ALIVE)
|
||||
}
|
||||
} else if (client[kRunning] > 0 && socket[kParser].statusCode < 200) {
|
||||
if (socket[kParser].timeoutType !== TIMEOUT_HEADERS) {
|
||||
@@ -840,7 +860,10 @@ function writeH1 (client, request) {
|
||||
const expectsPayload = (
|
||||
method === 'PUT' ||
|
||||
method === 'POST' ||
|
||||
method === 'PATCH'
|
||||
method === 'PATCH' ||
|
||||
method === 'QUERY' ||
|
||||
method === 'PROPFIND' ||
|
||||
method === 'PROPPATCH'
|
||||
)
|
||||
|
||||
if (util.isFormDataLike(body)) {
|
||||
@@ -1119,7 +1142,7 @@ function writeBuffer (abort, body, client, request, socket, contentLength, heade
|
||||
socket.uncork()
|
||||
request.onBodySent(body)
|
||||
|
||||
if (!expectsPayload) {
|
||||
if (!expectsPayload && request.reset !== false) {
|
||||
socket[kReset] = true
|
||||
}
|
||||
}
|
||||
@@ -1149,7 +1172,7 @@ async function writeBlob (abort, body, client, request, socket, contentLength, h
|
||||
request.onBodySent(buffer)
|
||||
request.onRequestSent()
|
||||
|
||||
if (!expectsPayload) {
|
||||
if (!expectsPayload && request.reset !== false) {
|
||||
socket[kReset] = true
|
||||
}
|
||||
|
||||
@@ -1250,7 +1273,7 @@ class AsyncWriter {
|
||||
socket.cork()
|
||||
|
||||
if (bytesWritten === 0) {
|
||||
if (!expectsPayload) {
|
||||
if (!expectsPayload && request.reset !== false) {
|
||||
socket[kReset] = true
|
||||
}
|
||||
|
||||
|
||||
91
node_modules/undici/lib/dispatcher/client-h2.js
generated
vendored
91
node_modules/undici/lib/dispatcher/client-h2.js
generated
vendored
@@ -24,11 +24,15 @@ const {
|
||||
kOnError,
|
||||
kMaxConcurrentStreams,
|
||||
kHTTP2Session,
|
||||
kResume
|
||||
kResume,
|
||||
kSize,
|
||||
kHTTPContext
|
||||
} = require('../core/symbols.js')
|
||||
|
||||
const kOpenStreams = Symbol('open streams')
|
||||
|
||||
let extractBody
|
||||
|
||||
// Experimental
|
||||
let h2ExperimentalWarned = false
|
||||
|
||||
@@ -160,11 +164,10 @@ async function connectH2 (client, socket) {
|
||||
version: 'h2',
|
||||
defaultPipelining: Infinity,
|
||||
write (...args) {
|
||||
// TODO (fix): return
|
||||
writeH2(client, ...args)
|
||||
return writeH2(client, ...args)
|
||||
},
|
||||
resume () {
|
||||
|
||||
resumeH2(client)
|
||||
},
|
||||
destroy (err, callback) {
|
||||
if (closed) {
|
||||
@@ -183,6 +186,20 @@ async function connectH2 (client, socket) {
|
||||
}
|
||||
}
|
||||
|
||||
function resumeH2 (client) {
|
||||
const socket = client[kSocket]
|
||||
|
||||
if (socket?.destroyed === false) {
|
||||
if (client[kSize] === 0 && client[kMaxConcurrentStreams] === 0) {
|
||||
socket.unref()
|
||||
client[kHTTP2Session].unref()
|
||||
} else {
|
||||
socket.ref()
|
||||
client[kHTTP2Session].ref()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function onHttp2SessionError (err) {
|
||||
assert(err.code !== 'ERR_TLS_CERT_ALTNAME_INVALID')
|
||||
|
||||
@@ -210,17 +227,33 @@ function onHttp2SessionEnd () {
|
||||
* along with the socket right away
|
||||
*/
|
||||
function onHTTP2GoAway (code) {
|
||||
const err = new RequestAbortedError(`HTTP/2: "GOAWAY" frame received with code ${code}`)
|
||||
// We cannot recover, so best to close the session and the socket
|
||||
const err = this[kError] || new SocketError(`HTTP/2: "GOAWAY" frame received with code ${code}`, util.getSocketInfo(this))
|
||||
const client = this[kClient]
|
||||
|
||||
// We need to trigger the close cycle right away
|
||||
// We need to destroy the session and the socket
|
||||
// Requests should be failed with the error after the current one is handled
|
||||
this[kSocket][kError] = err
|
||||
this[kClient][kOnError](err)
|
||||
client[kSocket] = null
|
||||
client[kHTTPContext] = null
|
||||
|
||||
this.unref()
|
||||
if (this[kHTTP2Session] != null) {
|
||||
this[kHTTP2Session].destroy(err)
|
||||
this[kHTTP2Session] = null
|
||||
}
|
||||
|
||||
util.destroy(this[kSocket], err)
|
||||
|
||||
// Fail head of pipeline.
|
||||
if (client[kRunningIdx] < client[kQueue].length) {
|
||||
const request = client[kQueue][client[kRunningIdx]]
|
||||
client[kQueue][client[kRunningIdx]++] = null
|
||||
util.errorRequest(client, request, err)
|
||||
client[kPendingIdx] = client[kRunningIdx]
|
||||
}
|
||||
|
||||
assert(client[kRunning] === 0)
|
||||
|
||||
client.emit('disconnect', client[kUrl], [client], err)
|
||||
|
||||
client[kResume]()
|
||||
}
|
||||
|
||||
// https://www.rfc-editor.org/rfc/rfc7230#section-3.3.2
|
||||
@@ -230,17 +263,14 @@ function shouldSendContentLength (method) {
|
||||
|
||||
function writeH2 (client, request) {
|
||||
const session = client[kHTTP2Session]
|
||||
const { body, method, path, host, upgrade, expectContinue, signal, headers: reqHeaders } = request
|
||||
const { method, path, host, upgrade, expectContinue, signal, headers: reqHeaders } = request
|
||||
let { body } = request
|
||||
|
||||
if (upgrade) {
|
||||
util.errorRequest(client, request, new Error('Upgrade not supported for H2'))
|
||||
return false
|
||||
}
|
||||
|
||||
if (request.aborted) {
|
||||
return false
|
||||
}
|
||||
|
||||
const headers = {}
|
||||
for (let n = 0; n < reqHeaders.length; n += 2) {
|
||||
const key = reqHeaders[n + 0]
|
||||
@@ -283,6 +313,8 @@ function writeH2 (client, request) {
|
||||
// We do not destroy the socket as we can continue using the session
|
||||
// the stream get's destroyed and the session remains to create new streams
|
||||
util.destroy(body, err)
|
||||
client[kQueue][client[kRunningIdx]++] = null
|
||||
client[kResume]()
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -293,6 +325,10 @@ function writeH2 (client, request) {
|
||||
util.errorRequest(client, request, err)
|
||||
}
|
||||
|
||||
if (request.aborted) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (method === 'CONNECT') {
|
||||
session.ref()
|
||||
// We are already connected, streams are pending, first request
|
||||
@@ -304,10 +340,12 @@ function writeH2 (client, request) {
|
||||
if (stream.id && !stream.pending) {
|
||||
request.onUpgrade(null, null, stream)
|
||||
++session[kOpenStreams]
|
||||
client[kQueue][client[kRunningIdx]++] = null
|
||||
} else {
|
||||
stream.once('ready', () => {
|
||||
request.onUpgrade(null, null, stream)
|
||||
++session[kOpenStreams]
|
||||
client[kQueue][client[kRunningIdx]++] = null
|
||||
})
|
||||
}
|
||||
|
||||
@@ -347,6 +385,16 @@ function writeH2 (client, request) {
|
||||
|
||||
let contentLength = util.bodyLength(body)
|
||||
|
||||
if (util.isFormDataLike(body)) {
|
||||
extractBody ??= require('../web/fetch/body.js').extractBody
|
||||
|
||||
const [bodyStream, contentType] = extractBody(body)
|
||||
headers['content-type'] = contentType
|
||||
|
||||
body = bodyStream.stream
|
||||
contentLength = bodyStream.length
|
||||
}
|
||||
|
||||
if (contentLength == null) {
|
||||
contentLength = request.contentLength
|
||||
}
|
||||
@@ -428,17 +476,20 @@ function writeH2 (client, request) {
|
||||
// Present specially when using pipeline or stream
|
||||
if (stream.state?.state == null || stream.state.state < 6) {
|
||||
request.onComplete([])
|
||||
return
|
||||
}
|
||||
|
||||
// Stream is closed or half-closed-remote (6), decrement counter and cleanup
|
||||
// It does not have sense to continue working with the stream as we do not
|
||||
// have yet RST_STREAM support on client-side
|
||||
if (session[kOpenStreams] === 0) {
|
||||
// Stream is closed or half-closed-remote (6), decrement counter and cleanup
|
||||
// It does not have sense to continue working with the stream as we do not
|
||||
// have yet RST_STREAM support on client-side
|
||||
|
||||
session.unref()
|
||||
}
|
||||
|
||||
abort(new InformationalError('HTTP/2: stream half-closed (remote)'))
|
||||
client[kQueue][client[kRunningIdx]++] = null
|
||||
client[kPendingIdx] = client[kRunningIdx]
|
||||
client[kResume]()
|
||||
})
|
||||
|
||||
stream.once('close', () => {
|
||||
|
||||
10
node_modules/undici/lib/dispatcher/client.js
generated
vendored
10
node_modules/undici/lib/dispatcher/client.js
generated
vendored
@@ -63,6 +63,8 @@ let deprecatedInterceptorWarned = false
|
||||
|
||||
const kClosedResolve = Symbol('kClosedResolve')
|
||||
|
||||
const noop = () => {}
|
||||
|
||||
function getPipelining (client) {
|
||||
return client[kPipelining] ?? client[kHTTPContext]?.defaultPipelining ?? 1
|
||||
}
|
||||
@@ -385,6 +387,10 @@ function onError (client, err) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Client} client
|
||||
* @returns
|
||||
*/
|
||||
async function connect (client) {
|
||||
assert(!client[kConnecting])
|
||||
assert(!client[kHTTPContext])
|
||||
@@ -438,7 +444,7 @@ async function connect (client) {
|
||||
})
|
||||
|
||||
if (client.destroyed) {
|
||||
util.destroy(socket.on('error', () => {}), new ClientDestroyedError())
|
||||
util.destroy(socket.on('error', noop), new ClientDestroyedError())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -449,7 +455,7 @@ async function connect (client) {
|
||||
? await connectH2(client, socket)
|
||||
: await connectH1(client, socket)
|
||||
} catch (err) {
|
||||
socket.destroy().on('error', () => {})
|
||||
socket.destroy().on('error', noop)
|
||||
throw err
|
||||
}
|
||||
|
||||
|
||||
6
node_modules/undici/lib/dispatcher/pool-base.js
generated
vendored
6
node_modules/undici/lib/dispatcher/pool-base.js
generated
vendored
@@ -113,9 +113,9 @@ class PoolBase extends DispatcherBase {
|
||||
|
||||
async [kClose] () {
|
||||
if (this[kQueue].isEmpty()) {
|
||||
return Promise.all(this[kClients].map(c => c.close()))
|
||||
await Promise.all(this[kClients].map(c => c.close()))
|
||||
} else {
|
||||
return new Promise((resolve) => {
|
||||
await new Promise((resolve) => {
|
||||
this[kClosedResolve] = resolve
|
||||
})
|
||||
}
|
||||
@@ -130,7 +130,7 @@ class PoolBase extends DispatcherBase {
|
||||
item.handler.onError(err)
|
||||
}
|
||||
|
||||
return Promise.all(this[kClients].map(c => c.destroy(err)))
|
||||
await Promise.all(this[kClients].map(c => c.destroy(err)))
|
||||
}
|
||||
|
||||
[kDispatch] (opts, handler) {
|
||||
|
||||
14
node_modules/undici/lib/dispatcher/pool.js
generated
vendored
14
node_modules/undici/lib/dispatcher/pool.js
generated
vendored
@@ -73,6 +73,20 @@ class Pool extends PoolBase {
|
||||
? { ...options.interceptors }
|
||||
: undefined
|
||||
this[kFactory] = factory
|
||||
|
||||
this.on('connectionError', (origin, targets, error) => {
|
||||
// If a connection error occurs, we remove the client from the pool,
|
||||
// and emit a connectionError event. They will not be re-used.
|
||||
// Fixes https://github.com/nodejs/undici/issues/3895
|
||||
for (const target of targets) {
|
||||
// Do not use kRemoveClient here, as it will close the client,
|
||||
// but the client cannot be closed in this state.
|
||||
const idx = this[kClients].indexOf(target)
|
||||
if (idx !== -1) {
|
||||
this[kClients].splice(idx, 1)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
[kGetDispatcher] () {
|
||||
|
||||
4
node_modules/undici/lib/dispatcher/proxy-agent.js
generated
vendored
4
node_modules/undici/lib/dispatcher/proxy-agent.js
generated
vendored
@@ -23,6 +23,8 @@ function defaultFactory (origin, opts) {
|
||||
return new Pool(origin, opts)
|
||||
}
|
||||
|
||||
const noop = () => {}
|
||||
|
||||
class ProxyAgent extends DispatcherBase {
|
||||
constructor (opts) {
|
||||
super()
|
||||
@@ -81,7 +83,7 @@ class ProxyAgent extends DispatcherBase {
|
||||
servername: this[kProxyTls]?.servername || proxyHostname
|
||||
})
|
||||
if (statusCode !== 200) {
|
||||
socket.on('error', () => {}).destroy()
|
||||
socket.on('error', noop).destroy()
|
||||
callback(new RequestAbortedError(`Proxy response (${statusCode}) !== 200 when HTTP Tunneling`))
|
||||
}
|
||||
if (opts.protocol !== 'https:') {
|
||||
|
||||
Reference in New Issue
Block a user