mirror of
https://github.com/imezx/Warp.git
synced 2025-04-24 23:20:02 +00:00
460 lines
18 KiB
JavaScript
460 lines
18 KiB
JavaScript
import { MethodEnum } from '@algolia/requester-common';
|
|
|
|
function createMappedRequestOptions(requestOptions, timeout) {
|
|
const options = requestOptions || {};
|
|
const data = options.data || {};
|
|
Object.keys(options).forEach(key => {
|
|
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) {
|
|
data[key] = options[key]; // eslint-disable-line functional/immutable-data
|
|
}
|
|
});
|
|
return {
|
|
data: Object.entries(data).length > 0 ? data : undefined,
|
|
timeout: options.timeout || timeout,
|
|
headers: options.headers || {},
|
|
queryParameters: options.queryParameters || {},
|
|
cacheable: options.cacheable,
|
|
};
|
|
}
|
|
|
|
const CallEnum = {
|
|
/**
|
|
* If the host is read only.
|
|
*/
|
|
Read: 1,
|
|
/**
|
|
* If the host is write only.
|
|
*/
|
|
Write: 2,
|
|
/**
|
|
* If the host is both read and write.
|
|
*/
|
|
Any: 3,
|
|
};
|
|
|
|
const HostStatusEnum = {
|
|
Up: 1,
|
|
Down: 2,
|
|
Timeouted: 3,
|
|
};
|
|
|
|
// By default, API Clients at Algolia have expiration delay
|
|
// of 5 mins. In the JavaScript client, we have 2 mins.
|
|
const EXPIRATION_DELAY = 2 * 60 * 1000;
|
|
function createStatefulHost(host, status = HostStatusEnum.Up) {
|
|
return {
|
|
...host,
|
|
status,
|
|
lastUpdate: Date.now(),
|
|
};
|
|
}
|
|
function isStatefulHostUp(host) {
|
|
return host.status === HostStatusEnum.Up || Date.now() - host.lastUpdate > EXPIRATION_DELAY;
|
|
}
|
|
function isStatefulHostTimeouted(host) {
|
|
return (host.status === HostStatusEnum.Timeouted && Date.now() - host.lastUpdate <= EXPIRATION_DELAY);
|
|
}
|
|
|
|
function createStatelessHost(options) {
|
|
if (typeof options === 'string') {
|
|
return {
|
|
protocol: 'https',
|
|
url: options,
|
|
accept: CallEnum.Any,
|
|
};
|
|
}
|
|
return {
|
|
protocol: options.protocol || 'https',
|
|
url: options.url,
|
|
accept: options.accept || CallEnum.Any,
|
|
};
|
|
}
|
|
|
|
function createRetryableOptions(hostsCache, statelessHosts) {
|
|
return Promise.all(statelessHosts.map(statelessHost => {
|
|
return hostsCache.get(statelessHost, () => {
|
|
return Promise.resolve(createStatefulHost(statelessHost));
|
|
});
|
|
})).then(statefulHosts => {
|
|
const hostsUp = statefulHosts.filter(host => isStatefulHostUp(host));
|
|
const hostsTimeouted = statefulHosts.filter(host => isStatefulHostTimeouted(host));
|
|
/**
|
|
* Note, we put the hosts that previously timeouted on the end of the list.
|
|
*/
|
|
const hostsAvailable = [...hostsUp, ...hostsTimeouted];
|
|
const statelessHostsAvailable = hostsAvailable.length > 0
|
|
? hostsAvailable.map(host => createStatelessHost(host))
|
|
: statelessHosts;
|
|
return {
|
|
getTimeout(timeoutsCount, baseTimeout) {
|
|
/**
|
|
* Imagine that you have 4 hosts, if timeouts will increase
|
|
* on the following way: 1 (timeouted) > 4 (timeouted) > 5 (200)
|
|
*
|
|
* Note that, the very next request, we start from the previous timeout
|
|
*
|
|
* 5 (timeouted) > 6 (timeouted) > 7 ...
|
|
*
|
|
* This strategy may need to be reviewed, but is the strategy on the our
|
|
* current v3 version.
|
|
*/
|
|
const timeoutMultiplier = hostsTimeouted.length === 0 && timeoutsCount === 0
|
|
? 1
|
|
: hostsTimeouted.length + 3 + timeoutsCount;
|
|
return timeoutMultiplier * baseTimeout;
|
|
},
|
|
statelessHosts: statelessHostsAvailable,
|
|
};
|
|
});
|
|
}
|
|
|
|
const isNetworkError = ({ isTimedOut, status }) => {
|
|
return !isTimedOut && ~~status === 0;
|
|
};
|
|
const isRetryable = (response) => {
|
|
const status = response.status;
|
|
const isTimedOut = response.isTimedOut;
|
|
return (isTimedOut || isNetworkError(response) || (~~(status / 100) !== 2 && ~~(status / 100) !== 4));
|
|
};
|
|
const isSuccess = ({ status }) => {
|
|
return ~~(status / 100) === 2;
|
|
};
|
|
const retryDecision = (response, outcomes) => {
|
|
if (isRetryable(response)) {
|
|
return outcomes.onRetry(response);
|
|
}
|
|
if (isSuccess(response)) {
|
|
return outcomes.onSuccess(response);
|
|
}
|
|
return outcomes.onFail(response);
|
|
};
|
|
|
|
function retryableRequest(transporter, statelessHosts, request, requestOptions) {
|
|
const stackTrace = []; // eslint-disable-line functional/prefer-readonly-type
|
|
/**
|
|
* First we prepare the payload that do not depend from hosts.
|
|
*/
|
|
const data = serializeData(request, requestOptions);
|
|
const headers = serializeHeaders(transporter, requestOptions);
|
|
const method = request.method;
|
|
// On `GET`, the data is proxied to query parameters.
|
|
const dataQueryParameters = request.method !== MethodEnum.Get
|
|
? {}
|
|
: {
|
|
...request.data,
|
|
...requestOptions.data,
|
|
};
|
|
const queryParameters = {
|
|
'x-algolia-agent': transporter.userAgent.value,
|
|
...transporter.queryParameters,
|
|
...dataQueryParameters,
|
|
...requestOptions.queryParameters,
|
|
};
|
|
let timeoutsCount = 0; // eslint-disable-line functional/no-let
|
|
const retry = (hosts, // eslint-disable-line functional/prefer-readonly-type
|
|
getTimeout) => {
|
|
/**
|
|
* We iterate on each host, until there is no host left.
|
|
*/
|
|
const host = hosts.pop(); // eslint-disable-line functional/immutable-data
|
|
if (host === undefined) {
|
|
throw createRetryError(stackTraceWithoutCredentials(stackTrace));
|
|
}
|
|
const payload = {
|
|
data,
|
|
headers,
|
|
method,
|
|
url: serializeUrl(host, request.path, queryParameters),
|
|
connectTimeout: getTimeout(timeoutsCount, transporter.timeouts.connect),
|
|
responseTimeout: getTimeout(timeoutsCount, requestOptions.timeout),
|
|
};
|
|
/**
|
|
* The stackFrame is pushed to the stackTrace so we
|
|
* can have information about onRetry and onFailure
|
|
* decisions.
|
|
*/
|
|
const pushToStackTrace = (response) => {
|
|
const stackFrame = {
|
|
request: payload,
|
|
response,
|
|
host,
|
|
triesLeft: hosts.length,
|
|
};
|
|
// eslint-disable-next-line functional/immutable-data
|
|
stackTrace.push(stackFrame);
|
|
return stackFrame;
|
|
};
|
|
const decisions = {
|
|
onSuccess: response => deserializeSuccess(response),
|
|
onRetry(response) {
|
|
const stackFrame = pushToStackTrace(response);
|
|
/**
|
|
* If response is a timeout, we increaset the number of
|
|
* timeouts so we can increase the timeout later.
|
|
*/
|
|
if (response.isTimedOut) {
|
|
timeoutsCount++;
|
|
}
|
|
return Promise.all([
|
|
/**
|
|
* Failures are individually send the logger, allowing
|
|
* the end user to debug / store stack frames even
|
|
* when a retry error does not happen.
|
|
*/
|
|
transporter.logger.info('Retryable failure', stackFrameWithoutCredentials(stackFrame)),
|
|
/**
|
|
* We also store the state of the host in failure cases. If the host, is
|
|
* down it will remain down for the next 2 minutes. In a timeout situation,
|
|
* this host will be added end of the list of hosts on the next request.
|
|
*/
|
|
transporter.hostsCache.set(host, createStatefulHost(host, response.isTimedOut ? HostStatusEnum.Timeouted : HostStatusEnum.Down)),
|
|
]).then(() => retry(hosts, getTimeout));
|
|
},
|
|
onFail(response) {
|
|
pushToStackTrace(response);
|
|
throw deserializeFailure(response, stackTraceWithoutCredentials(stackTrace));
|
|
},
|
|
};
|
|
return transporter.requester.send(payload).then(response => {
|
|
return retryDecision(response, decisions);
|
|
});
|
|
};
|
|
/**
|
|
* Finally, for each retryable host perform request until we got a non
|
|
* retryable response. Some notes here:
|
|
*
|
|
* 1. The reverse here is applied so we can apply a `pop` later on => more performant.
|
|
* 2. We also get from the retryable options a timeout multiplier that is tailored
|
|
* for the current context.
|
|
*/
|
|
return createRetryableOptions(transporter.hostsCache, statelessHosts).then(options => {
|
|
return retry([...options.statelessHosts].reverse(), options.getTimeout);
|
|
});
|
|
}
|
|
|
|
function createTransporter(options) {
|
|
const { hostsCache, logger, requester, requestsCache, responsesCache, timeouts, userAgent, hosts, queryParameters, headers, } = options;
|
|
const transporter = {
|
|
hostsCache,
|
|
logger,
|
|
requester,
|
|
requestsCache,
|
|
responsesCache,
|
|
timeouts,
|
|
userAgent,
|
|
headers,
|
|
queryParameters,
|
|
hosts: hosts.map(host => createStatelessHost(host)),
|
|
read(request, requestOptions) {
|
|
/**
|
|
* First, we compute the user request options. Now, keep in mind,
|
|
* that using request options the user is able to modified the intire
|
|
* payload of the request. Such as headers, query parameters, and others.
|
|
*/
|
|
const mappedRequestOptions = createMappedRequestOptions(requestOptions, transporter.timeouts.read);
|
|
const createRetryableRequest = () => {
|
|
/**
|
|
* Then, we prepare a function factory that contains the construction of
|
|
* the retryable request. At this point, we may *not* perform the actual
|
|
* request. But we want to have the function factory ready.
|
|
*/
|
|
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions);
|
|
};
|
|
/**
|
|
* Once we have the function factory ready, we need to determine of the
|
|
* request is "cacheable" - should be cached. Note that, once again,
|
|
* the user can force this option.
|
|
*/
|
|
const cacheable = mappedRequestOptions.cacheable !== undefined
|
|
? mappedRequestOptions.cacheable
|
|
: request.cacheable;
|
|
/**
|
|
* If is not "cacheable", we immediatly trigger the retryable request, no
|
|
* need to check cache implementations.
|
|
*/
|
|
if (cacheable !== true) {
|
|
return createRetryableRequest();
|
|
}
|
|
/**
|
|
* If the request is "cacheable", we need to first compute the key to ask
|
|
* the cache implementations if this request is on progress or if the
|
|
* response already exists on the cache.
|
|
*/
|
|
const key = {
|
|
request,
|
|
mappedRequestOptions,
|
|
transporter: {
|
|
queryParameters: transporter.queryParameters,
|
|
headers: transporter.headers,
|
|
},
|
|
};
|
|
/**
|
|
* With the computed key, we first ask the responses cache
|
|
* implemention if this request was been resolved before.
|
|
*/
|
|
return transporter.responsesCache.get(key, () => {
|
|
/**
|
|
* If the request has never resolved before, we actually ask if there
|
|
* is a current request with the same key on progress.
|
|
*/
|
|
return transporter.requestsCache.get(key, () => {
|
|
return (transporter.requestsCache
|
|
/**
|
|
* Finally, if there is no request in progress with the same key,
|
|
* this `createRetryableRequest()` will actually trigger the
|
|
* retryable request.
|
|
*/
|
|
.set(key, createRetryableRequest())
|
|
.then(response => Promise.all([transporter.requestsCache.delete(key), response]), err => Promise.all([transporter.requestsCache.delete(key), Promise.reject(err)]))
|
|
.then(([_, response]) => response));
|
|
});
|
|
}, {
|
|
/**
|
|
* Of course, once we get this response back from the server, we
|
|
* tell response cache to actually store the received response
|
|
* to be used later.
|
|
*/
|
|
miss: response => transporter.responsesCache.set(key, response),
|
|
});
|
|
},
|
|
write(request, requestOptions) {
|
|
/**
|
|
* On write requests, no cache mechanisms are applied, and we
|
|
* proxy the request immediately to the requester.
|
|
*/
|
|
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, createMappedRequestOptions(requestOptions, transporter.timeouts.write));
|
|
},
|
|
};
|
|
return transporter;
|
|
}
|
|
|
|
function createUserAgent(version) {
|
|
const userAgent = {
|
|
value: `Algolia for JavaScript (${version})`,
|
|
add(options) {
|
|
const addedUserAgent = `; ${options.segment}${options.version !== undefined ? ` (${options.version})` : ''}`;
|
|
if (userAgent.value.indexOf(addedUserAgent) === -1) {
|
|
// eslint-disable-next-line functional/immutable-data
|
|
userAgent.value = `${userAgent.value}${addedUserAgent}`;
|
|
}
|
|
return userAgent;
|
|
},
|
|
};
|
|
return userAgent;
|
|
}
|
|
|
|
function deserializeSuccess(response) {
|
|
// eslint-disable-next-line functional/no-try-statement
|
|
try {
|
|
return JSON.parse(response.content);
|
|
}
|
|
catch (e) {
|
|
throw createDeserializationError(e.message, response);
|
|
}
|
|
}
|
|
function deserializeFailure({ content, status }, stackFrame) {
|
|
// eslint-disable-next-line functional/no-let
|
|
let message = content;
|
|
// eslint-disable-next-line functional/no-try-statement
|
|
try {
|
|
message = JSON.parse(content).message;
|
|
}
|
|
catch (e) {
|
|
// ..
|
|
}
|
|
return createApiError(message, status, stackFrame);
|
|
}
|
|
|
|
// eslint-disable-next-line functional/prefer-readonly-type
|
|
function encode(format, ...args) {
|
|
// eslint-disable-next-line functional/no-let
|
|
let i = 0;
|
|
return format.replace(/%s/g, () => encodeURIComponent(args[i++]));
|
|
}
|
|
|
|
function serializeUrl(host, path, queryParameters) {
|
|
const queryParametersAsString = serializeQueryParameters(queryParameters);
|
|
// eslint-disable-next-line functional/no-let
|
|
let url = `${host.protocol}://${host.url}/${path.charAt(0) === '/' ? path.substr(1) : path}`;
|
|
if (queryParametersAsString.length) {
|
|
url += `?${queryParametersAsString}`;
|
|
}
|
|
return url;
|
|
}
|
|
function serializeQueryParameters(parameters) {
|
|
const isObjectOrArray = (value) => Object.prototype.toString.call(value) === '[object Object]' ||
|
|
Object.prototype.toString.call(value) === '[object Array]';
|
|
return Object.keys(parameters)
|
|
.map(key => encode('%s=%s', key, isObjectOrArray(parameters[key]) ? JSON.stringify(parameters[key]) : parameters[key]))
|
|
.join('&');
|
|
}
|
|
function serializeData(request, requestOptions) {
|
|
if (request.method === MethodEnum.Get ||
|
|
(request.data === undefined && requestOptions.data === undefined)) {
|
|
return undefined;
|
|
}
|
|
const data = Array.isArray(request.data)
|
|
? request.data
|
|
: { ...request.data, ...requestOptions.data };
|
|
return JSON.stringify(data);
|
|
}
|
|
function serializeHeaders(transporter, requestOptions) {
|
|
const headers = {
|
|
...transporter.headers,
|
|
...requestOptions.headers,
|
|
};
|
|
const serializedHeaders = {};
|
|
Object.keys(headers).forEach(header => {
|
|
const value = headers[header];
|
|
// @ts-ignore
|
|
// eslint-disable-next-line functional/immutable-data
|
|
serializedHeaders[header.toLowerCase()] = value;
|
|
});
|
|
return serializedHeaders;
|
|
}
|
|
|
|
function stackTraceWithoutCredentials(stackTrace) {
|
|
return stackTrace.map(stackFrame => stackFrameWithoutCredentials(stackFrame));
|
|
}
|
|
function stackFrameWithoutCredentials(stackFrame) {
|
|
const modifiedHeaders = stackFrame.request.headers['x-algolia-api-key']
|
|
? { 'x-algolia-api-key': '*****' }
|
|
: {};
|
|
return {
|
|
...stackFrame,
|
|
request: {
|
|
...stackFrame.request,
|
|
headers: {
|
|
...stackFrame.request.headers,
|
|
...modifiedHeaders,
|
|
},
|
|
},
|
|
};
|
|
}
|
|
|
|
function createApiError(message, status, transporterStackTrace) {
|
|
return {
|
|
name: 'ApiError',
|
|
message,
|
|
status,
|
|
transporterStackTrace,
|
|
};
|
|
}
|
|
|
|
function createDeserializationError(message, response) {
|
|
return {
|
|
name: 'DeserializationError',
|
|
message,
|
|
response,
|
|
};
|
|
}
|
|
|
|
function createRetryError(transporterStackTrace) {
|
|
return {
|
|
name: 'RetryError',
|
|
message: 'Unreachable hosts - your application id may be incorrect. If the error persists, contact support@algolia.com.',
|
|
transporterStackTrace,
|
|
};
|
|
}
|
|
|
|
export { CallEnum, HostStatusEnum, createApiError, createDeserializationError, createMappedRequestOptions, createRetryError, createStatefulHost, createStatelessHost, createTransporter, createUserAgent, deserializeFailure, deserializeSuccess, isStatefulHostTimeouted, isStatefulHostUp, serializeData, serializeHeaders, serializeQueryParameters, serializeUrl, stackFrameWithoutCredentials, stackTraceWithoutCredentials };
|