function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ewm
|
def ewm(
self,
com: float | None = None,
span: float | None = None,
halflife: float | str | Timedelta | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
times: np.ndarray | Series | None = None,
method: str = "single",
) -> ExponentialMovingWindowGroupby:
"""
Return an ewm grouper, providing ewm functionality per group.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass.
Alternative to ``span``, ``halflife``, and ``alpha``.
span : float, optional
Specify decay in terms of span.
halflife : float, str, or Timedelta, optional
Specify decay in terms of half-life.
alpha : float, optional
Specify smoothing factor directly.
min_periods : int, default 0
Minimum number of observations in the window required to have a value;
otherwise, result is ``np.nan``.
adjust : bool, default True
Divide by decaying adjustment factor to account for imbalance in
relative weights.
ignore_na : bool, default False
Ignore missing values when calculating weights.
times : str or array-like of datetime64, optional
Times corresponding to the observations.
method : {'single', 'table'}, default 'single'
Execute the operation per group independently (``'single'``) or over the
entire object before regrouping (``'table'``). Only applicable to
``mean()``, and only when using ``engine='numba'``.
Returns
-------
pandas.api.typing.ExponentialMovingWindowGroupby
An object that supports exponentially weighted moving transformations over
each group.
See Also
--------
Series.ewm : EWM transformations for Series.
DataFrame.ewm : EWM transformations for DataFrames.
Series.groupby : Apply a function groupby to a Series.
DataFrame.groupby : Apply a function groupby.
Examples
--------
>>> df = pd.DataFrame(
... {
... "Class": ["A", "A", "A", "B", "B", "B"],
... "Value": [10, 20, 30, 40, 50, 60],
... }
... )
>>> df
Class Value
0 A 10
1 A 20
2 A 30
3 B 40
4 B 50
5 B 60
>>> df.groupby("Class").ewm(com=0.5).mean()
Value
Class
A 0 10.000000
1 17.500000
2 26.153846
B 3 40.000000
4 47.500000
5 56.153846
"""
from pandas.core.window import ExponentialMovingWindowGroupby
return ExponentialMovingWindowGroupby(
self._selected_obj,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
times=times,
method=method,
_grouper=self._grouper,
)
|
Return an ewm grouper, providing ewm functionality per group.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass.
Alternative to ``span``, ``halflife``, and ``alpha``.
span : float, optional
Specify decay in terms of span.
halflife : float, str, or Timedelta, optional
Specify decay in terms of half-life.
alpha : float, optional
Specify smoothing factor directly.
min_periods : int, default 0
Minimum number of observations in the window required to have a value;
otherwise, result is ``np.nan``.
adjust : bool, default True
Divide by decaying adjustment factor to account for imbalance in
relative weights.
ignore_na : bool, default False
Ignore missing values when calculating weights.
times : str or array-like of datetime64, optional
Times corresponding to the observations.
method : {'single', 'table'}, default 'single'
Execute the operation per group independently (``'single'``) or over the
entire object before regrouping (``'table'``). Only applicable to
``mean()``, and only when using ``engine='numba'``.
Returns
-------
pandas.api.typing.ExponentialMovingWindowGroupby
An object that supports exponentially weighted moving transformations over
each group.
See Also
--------
Series.ewm : EWM transformations for Series.
DataFrame.ewm : EWM transformations for DataFrames.
Series.groupby : Apply a function groupby to a Series.
DataFrame.groupby : Apply a function groupby.
Examples
--------
>>> df = pd.DataFrame(
... {
... "Class": ["A", "A", "A", "B", "B", "B"],
... "Value": [10, 20, 30, 40, 50, 60],
... }
... )
>>> df
Class Value
0 A 10
1 A 20
2 A 30
3 B 40
4 B 50
5 B 60
>>> df.groupby("Class").ewm(com=0.5).mean()
Value
Class
A 0 10.000000
1 17.500000
2 26.153846
B 3 40.000000
4 47.500000
5 56.153846
|
python
|
pandas/core/groupby/groupby.py
| 3,907
|
[
"self",
"com",
"span",
"halflife",
"alpha",
"min_periods",
"adjust",
"ignore_na",
"times",
"method"
] |
ExponentialMovingWindowGroupby
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isSameInstant
|
public static boolean isSameInstant(final Calendar cal1, final Calendar cal2) {
Objects.requireNonNull(cal1, "cal1");
Objects.requireNonNull(cal2, "cal2");
return cal1.getTime().getTime() == cal2.getTime().getTime();
}
|
Tests whether two calendar objects represent the same instant in time.
<p>This method compares the long millisecond time of the two objects.</p>
@param cal1 the first calendar, not altered, not null.
@param cal2 the second calendar, not altered, not null.
@return true if they represent the same millisecond instant.
@throws NullPointerException if either date is {@code null}.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 899
|
[
"cal1",
"cal2"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getCipherFromParameters
|
private static Cipher getCipherFromParameters(String dekHeaderValue, char[] password) throws GeneralSecurityException, IOException {
final String padding = "PKCS5Padding";
final SecretKey encryptionKey;
final String[] valueTokens = dekHeaderValue.split(",");
if (valueTokens.length != 2) {
throw new IOException("Malformed PEM file, DEK-Info PEM header is invalid");
}
final String algorithm = valueTokens[0];
final String ivString = valueTokens[1];
final byte[] iv;
try {
iv = hexStringToByteArray(ivString);
} catch (IllegalArgumentException e) {
throw new IOException("Malformed PEM file, DEK-Info IV is invalid", e);
}
if ("DES-CBC".equals(algorithm)) {
byte[] key = generateOpenSslKey(password, iv, 8);
encryptionKey = new SecretKeySpec(key, DEPRECATED_DES_ALGORITHM);
} else if ("DES-EDE3-CBC".equals(algorithm)) {
byte[] key = generateOpenSslKey(password, iv, 24);
encryptionKey = new SecretKeySpec(key, DEPRECATED_DES_EDE_ALGORITHM);
} else if ("AES-128-CBC".equals(algorithm)) {
byte[] key = generateOpenSslKey(password, iv, 16);
encryptionKey = new SecretKeySpec(key, "AES");
} else if ("AES-192-CBC".equals(algorithm)) {
byte[] key = generateOpenSslKey(password, iv, 24);
encryptionKey = new SecretKeySpec(key, "AES");
} else if ("AES-256-CBC".equals(algorithm)) {
byte[] key = generateOpenSslKey(password, iv, 32);
encryptionKey = new SecretKeySpec(key, "AES");
} else {
throw new GeneralSecurityException("Private Key encrypted with unsupported algorithm [" + algorithm + "]");
}
String transformation = encryptionKey.getAlgorithm() + "/" + "CBC" + "/" + padding;
Cipher cipher = Cipher.getInstance(transformation);
cipher.init(Cipher.DECRYPT_MODE, encryptionKey, new IvParameterSpec(iv));
return cipher;
}
|
Creates a {@link Cipher} from the contents of the DEK-Info header of a PEM file. RFC 1421 indicates that supported algorithms are
defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3
different variants of 128, 192, 256 bit keys )
@param dekHeaderValue The value of the DEK-Info PEM header
@param password The password with which the key is encrypted
@return a cipher of the appropriate algorithm and parameters to be used for decryption
@throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate
for the cipher
@throws IOException if the DEK-Info PEM header is invalid
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
| 501
|
[
"dekHeaderValue",
"password"
] |
Cipher
| true
| 8
| 7.44
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
skipParameterStart
|
function skipParameterStart(): boolean {
if (isModifierKind(token())) {
// Skip modifiers
parseModifiers(/*allowDecorators*/ false);
}
if (isIdentifier() || token() === SyntaxKind.ThisKeyword) {
nextToken();
return true;
}
if (token() === SyntaxKind.OpenBracketToken || token() === SyntaxKind.OpenBraceToken) {
// Return true if we can parse an array or object binding pattern with no errors
const previousErrorCount = parseDiagnostics.length;
parseIdentifierOrPattern();
return previousErrorCount === parseDiagnostics.length;
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,863
|
[] | true
| 6
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
completeValidation
|
private void completeValidation() {
if (hasPosition()) {
transitionState(FetchStates.FETCHING, () -> this.nextRetryTimeMs = null);
}
}
|
Clear the awaiting validation state and enter fetching.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 1,141
|
[] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
tensorsolve
|
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=x.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> import numpy as np
>>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4))
>>> rng = np.random.default_rng()
>>> b = rng.normal(size=(2*3, 4))
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an - b.ndim):]
prod = 1
for k in oldshape:
prod *= k
if a.size != prod ** 2:
raise LinAlgError(
"Input arrays must satisfy the requirement \
prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
)
a = a.reshape(prod, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
|
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=x.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> import numpy as np
>>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4))
>>> rng = np.random.default_rng()
>>> b = rng.normal(size=(2*3, 4))
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
|
python
|
numpy/linalg/_linalg.py
| 282
|
[
"a",
"b",
"axes"
] | false
| 5
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
compareTo
|
@Override
public int compareTo(final Triple<L, M, R> other) {
return new CompareToBuilder().append(getLeft(), other.getLeft())
.append(getMiddle(), other.getMiddle())
.append(getRight(), other.getRight()).toComparison();
}
|
Compares the triple based on the left element, followed by the middle element,
finally the right element.
The types must be {@link Comparable}.
@param other the other triple, not null.
@return negative if this is less, zero if equal, positive if greater.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Triple.java
| 115
|
[
"other"
] | true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getStringRep
|
public String getStringRep() {
if (duration < 0 && TimeUnit.MILLISECONDS == timeUnit) {
return Long.toString(duration);
}
return switch (timeUnit) {
case NANOSECONDS -> duration + "nanos";
case MICROSECONDS -> duration + "micros";
case MILLISECONDS -> duration + "ms";
case SECONDS -> duration + "s";
case MINUTES -> duration + "m";
case HOURS -> duration + "h";
case DAYS -> duration + "d";
};
}
|
Returns a {@link String} representation of the current {@link TimeValue}.
Note that this method might produce fractional time values (ex 1.6m) which cannot be
parsed by method like {@link TimeValue#parse(String, String, String, String)}. The number of
fractional decimals (up to 10 maximum) are truncated to the number of fraction pieces
specified.
Also note that the maximum string value that will be generated is
{@code 106751.9d} due to the way that values are internally converted
to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds)
@param fractionPieces the number of decimal places to include
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 342
|
[] |
String
| true
| 3
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
fit
|
def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
self._random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
if self.algorithm == "lloyd" or self.n_clusters == 1:
self._kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
self._kmeans_single = _kmeans_single_elkan
# Subtract of mean of X for more accurate distance computations
if not sp.issparse(X):
self._X_mean = X.mean(axis=0)
X -= self._X_mean
# Initialize the hierarchical clusters tree
self._bisecting_tree = _BisectingTree(
indices=np.arange(X.shape[0]),
center=X.mean(axis=0),
score=0,
)
x_squared_norms = row_norms(X, squared=True)
for _ in range(self.n_clusters - 1):
# Chose cluster to bisect
cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
# Split this cluster into 2 subclusters
self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect)
# Aggregate final labels and centers from the bisecting tree
self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()):
self.labels_[cluster_node.indices] = i
self.cluster_centers_[i] = cluster_node.center
cluster_node.label = i # label final clusters for future prediction
cluster_node.indices = None # release memory
# Restore original data
if not sp.issparse(X):
X += self._X_mean
self.cluster_centers_ += self._X_mean
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
self.inertia_ = _inertia(
X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads
)
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
|
python
|
sklearn/cluster/_bisect_k_means.py
| 363
|
[
"self",
"X",
"y",
"sample_weight"
] | false
| 9
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
split
|
public Deque<ProducerBatch> split(int splitBatchSize) {
RecordBatch recordBatch = validateAndGetRecordBatch();
Deque<ProducerBatch> batches = splitRecordsIntoBatches(recordBatch, splitBatchSize);
finalizeSplitBatches(batches);
return batches;
}
|
Finalize the state of a batch. Final state, once set, is immutable. This function may be called
once or twice on a batch. It may be called twice if
1. An inflight batch expires before a response from the broker is received. The batch's final
state is set to FAILED. But it could succeed on the broker and second time around batch.done() may
try to set SUCCEEDED final state.
2. If a transaction abortion happens or if the producer is closed forcefully, the final state is
ABORTED but again it could succeed if broker responds with a success.
Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged.
Attempted transitions from one failure state to the same or a different failed state are ignored.
Attempted transitions from SUCCEEDED to the same or a failed state throw an exception.
@param baseOffset The base offset of the messages assigned by the server
@param logAppendTime The log append time or -1 if CreateTime is being used
@param topLevelException The exception that occurred (or null if the request was successful)
@param recordExceptions Record exception function mapping batchIndex to the respective record exception
@return true if the batch was completed successfully and false if the batch was previously aborted
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 323
|
[
"splitBatchSize"
] | true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createInternal
|
static KafkaAdminClient createInternal(
AdminClientConfig config,
TimeoutProcessorFactory timeoutProcessorFactory,
HostResolver hostResolver
) {
Metrics metrics = null;
NetworkClient networkClient = null;
Time time = Time.SYSTEM;
String clientId = generateClientId(config);
ApiVersions apiVersions = new ApiVersions();
LogContext logContext = createLogContext(clientId);
Optional<ClientTelemetryReporter> clientTelemetryReporter;
try {
// Since we only request node information, it's safe to pass true for allowAutoTopicCreation (and it
// simplifies communication with older brokers)
AdminBootstrapAddresses adminAddresses = AdminBootstrapAddresses.fromConfig(config);
AdminMetadataManager metadataManager = new AdminMetadataManager(logContext,
config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG),
config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG),
adminAddresses.usingBootstrapControllers());
metadataManager.update(Cluster.bootstrap(adminAddresses.addresses()), time.milliseconds());
List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(clientId, config);
clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config);
clientTelemetryReporter.ifPresent(reporters::add);
Map<String, String> metricTags = Collections.singletonMap("client-id", clientId);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG))
.timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
.recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG)))
.tags(metricTags);
MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX,
config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX));
metrics = new Metrics(metricConfig, reporters, time, metricsContext);
networkClient = ClientUtils.createNetworkClient(config,
clientId,
metrics,
"admin-client",
logContext,
apiVersions,
time,
1,
(int) TimeUnit.HOURS.toMillis(1),
null,
metadataManager.updater(),
(hostResolver == null) ? new DefaultHostResolver() : hostResolver,
null,
clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null));
return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient,
timeoutProcessorFactory, logContext, clientTelemetryReporter);
} catch (Throwable exc) {
closeQuietly(metrics, "Metrics");
closeQuietly(networkClient, "NetworkClient");
throw new KafkaException("Failed to create new KafkaAdminClient", exc);
}
}
|
Pretty-print an exception.
@param throwable The exception.
@return A compact human-readable string.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 519
|
[
"config",
"timeoutProcessorFactory",
"hostResolver"
] |
KafkaAdminClient
| true
| 3
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
sensor
|
public Sensor sensor(String name, Sensor.RecordingLevel recordingLevel) {
return sensor(name, null, recordingLevel, (Sensor[]) null);
}
|
Get or create a sensor with the given unique name and no parent sensors and with a given
recording level.
@param name The sensor name.
@param recordingLevel The recording level.
@return The sensor
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
| 336
|
[
"name",
"recordingLevel"
] |
Sensor
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
_reference_dates
|
def _reference_dates(
self, start_date: Timestamp, end_date: Timestamp
) -> DatetimeIndex:
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day)
)
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day)
)
# Don't process unnecessary holidays
dates = date_range(
start=reference_start_date,
end=reference_end_date,
freq=year_offset,
tz=start_date.tz,
)
return dates
|
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
|
python
|
pandas/tseries/holiday.py
| 361
|
[
"self",
"start_date",
"end_date"
] |
DatetimeIndex
| true
| 3
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
andCondition
|
public Builder andCondition(Class<? extends Annotation> condition, Object... details) {
Assert.notNull(condition, "'condition' must not be null");
return andCondition("@" + ClassUtils.getShortName(condition), details);
}
|
Return a new builder to construct a new {@link ConditionMessage} based on the
instance and a new condition outcome.
@param condition the condition
@param details details of the condition
@return a {@link Builder} builder
@see #andCondition(String, Object...)
@see #forCondition(Class, Object...)
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 112
|
[
"condition"
] |
Builder
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getIteratee
|
function getIteratee() {
var result = lodash.iteratee || iteratee;
result = result === iteratee ? baseIteratee : result;
return arguments.length ? result(arguments[0], arguments[1]) : result;
}
|
Gets the appropriate "iteratee" function. If `_.iteratee` is customized,
this function returns the custom method, otherwise it returns `baseIteratee`.
If arguments are provided, the chosen function is invoked with them and
its result is returned.
@private
@param {*} [value] The value to convert to an iteratee.
@param {number} [arity] The arity of the created iteratee.
@returns {Function} Returns the chosen function or its result.
|
javascript
|
lodash.js
| 6,026
|
[] | false
| 4
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getResourceLocation
|
private String getResourceLocation(ConfigDataLocationResolverContext context,
ConfigDataLocation configDataLocation) {
String resourceLocation = configDataLocation.getNonPrefixedValue(PREFIX);
boolean isFixedPath = resourceLocation.startsWith("/") || URL_PREFIX.matcher(resourceLocation).matches();
if (isFixedPath) {
return resourceLocation;
}
ConfigDataResource parent = context.getParent();
if (parent instanceof StandardConfigDataResource resource) {
String parentResourceLocation = resource.getReference().getResourceLocation();
String parentDirectory = parentResourceLocation.substring(0, parentResourceLocation.lastIndexOf("/") + 1);
return parentDirectory + resourceLocation;
}
return resourceLocation;
}
|
Create a new {@link StandardConfigDataLocationResolver} instance.
@param logFactory the factory for loggers to use
@param binder a binder backed by the initial {@link Environment}
@param resourceLoader a {@link ResourceLoader} used to load resources
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataLocationResolver.java
| 165
|
[
"context",
"configDataLocation"
] |
String
| true
| 4
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
fillHoleAt
|
int fillHoleAt(int index) {
int minGrandchildIndex;
while ((minGrandchildIndex = findMinGrandChild(index)) > 0) {
queue[index] = elementData(minGrandchildIndex);
index = minGrandchildIndex;
}
return index;
}
|
Fills the hole at {@code index} by moving in the least of its grandchildren to this position,
then recursively filling the new hole created.
@return the position of the new hole (where the lowest grandchild moved from, that had no
grandchild to replace it)
|
java
|
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
| 720
|
[
"index"
] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
afterPropertiesSet
|
@Override
public void afterPropertiesSet() {
if (getCacheManager() == null) {
setCacheManager(Caching.getCachingProvider().getCacheManager());
}
super.afterPropertiesSet();
}
|
Return whether this cache manager accepts and converts {@code null} values
for all of its caches.
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/JCacheCacheManager.java
| 98
|
[] |
void
| true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
resolveEntity
|
@Override
public @Nullable InputSource resolveEntity(@Nullable String publicId, @Nullable String systemId) throws IOException {
if (logger.isTraceEnabled()) {
logger.trace("Trying to resolve XML entity with public id [" + publicId +
"] and system id [" + systemId + "]");
}
if (systemId != null) {
String resourceLocation = getSchemaMappings().get(systemId);
if (resourceLocation == null && systemId.startsWith("https:")) {
// Retrieve canonical http schema mapping even for https declaration
resourceLocation = getSchemaMappings().get("http:" + systemId.substring(6));
}
if (resourceLocation != null) {
Resource resource = new ClassPathResource(resourceLocation, this.classLoader);
try {
InputSource source = new InputSource(resource.getInputStream());
source.setPublicId(publicId);
source.setSystemId(systemId);
if (logger.isTraceEnabled()) {
logger.trace("Found XML schema [" + systemId + "] in classpath: " + resourceLocation);
}
return source;
}
catch (FileNotFoundException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Could not find XML schema [" + systemId + "]: " + resource, ex);
}
}
}
}
// Fall back to the parser's default behavior.
return null;
}
|
Loads the schema URL → schema file location mappings using the given
mapping file pattern.
@param classLoader the ClassLoader to use for loading
(can be {@code null} to use the default ClassLoader)
@param schemaMappingsLocation the location of the file that defines schema mappings
(must not be empty)
@see PropertiesLoaderUtils#loadAllProperties(String, ClassLoader)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/PluggableSchemaResolver.java
| 105
|
[
"publicId",
"systemId"
] |
InputSource
| true
| 9
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
createBeanFactory
|
protected DefaultListableBeanFactory createBeanFactory() {
return new DefaultListableBeanFactory(getInternalParentBeanFactory());
}
|
Create an internal bean factory for this context.
Called for each {@link #refresh()} attempt.
<p>The default implementation creates a
{@link org.springframework.beans.factory.support.DefaultListableBeanFactory}
with the {@linkplain #getInternalParentBeanFactory() internal bean factory} of this
context's parent as parent bean factory. Can be overridden in subclasses,
for example to customize DefaultListableBeanFactory's settings.
@return the bean factory for this context
@see org.springframework.beans.factory.support.DefaultListableBeanFactory#setAllowBeanDefinitionOverriding
@see org.springframework.beans.factory.support.DefaultListableBeanFactory#setAllowEagerClassLoading
@see org.springframework.beans.factory.support.DefaultListableBeanFactory#setAllowCircularReferences
@see org.springframework.beans.factory.support.DefaultListableBeanFactory#setAllowRawInjectionDespiteWrapping
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractRefreshableApplicationContext.java
| 195
|
[] |
DefaultListableBeanFactory
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
collapseFrom
|
public String collapseFrom(CharSequence sequence, char replacement) {
// This implementation avoids unnecessary allocation.
int len = sequence.length();
for (int i = 0; i < len; i++) {
char c = sequence.charAt(i);
if (matches(c)) {
if (c == replacement && (i == len - 1 || !matches(sequence.charAt(i + 1)))) {
// a no-op replacement
i++;
} else {
StringBuilder builder = new StringBuilder(len).append(sequence, 0, i).append(replacement);
return finishCollapseFrom(sequence, i + 1, len, replacement, builder, true);
}
}
}
// no replacement needed
return sequence.toString();
}
|
Returns a string copy of the input character sequence, with each group of consecutive matching
BMP characters replaced by a single replacement character. For example:
{@snippet :
CharMatcher.anyOf("eko").collapseFrom("bookkeeper", '-')
}
... returns {@code "b-p-r"}.
<p>The default implementation uses {@link #indexIn(CharSequence)} to find the first matching
character, then iterates the remainder of the sequence calling {@link #matches(char)} for each
character.
@param sequence the character sequence to replace matching groups of characters in
@param replacement the character to append to the result string in place of each group of
matching characters in {@code sequence}
@return the new string
|
java
|
android/guava/src/com/google/common/base/CharMatcher.java
| 840
|
[
"sequence",
"replacement"
] |
String
| true
| 6
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
timeToNextUpdate
|
public synchronized long timeToNextUpdate(long nowMs) {
long timeToExpire = updateRequested() ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0);
return Math.max(timeToExpire, timeToAllowUpdate(nowMs));
}
|
The next time to update the cluster info is the maximum of the time the current info will expire and the time the
current info can be updated (i.e. backoff time has elapsed). If an update has been requested, the metadata
expiry time is now.
@param nowMs current time in ms
@return remaining time in ms till updating the cluster info
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 178
|
[
"nowMs"
] | true
| 2
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
namingPattern
|
public Builder namingPattern(final String namingPattern) {
this.namingPattern = Objects.requireNonNull(namingPattern, "pattern");
return this;
}
|
Sets the naming pattern to be used by the new {@code
BasicThreadFactory}.
@param namingPattern the naming pattern (must not be <strong>null</strong>)
@return a reference to this {@link Builder}
@throws NullPointerException if the naming pattern is <strong>null</strong>
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BasicThreadFactory.java
| 175
|
[
"namingPattern"
] |
Builder
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getAndAdd
|
public float getAndAdd(final float operand) {
final float last = value;
this.value += operand;
return last;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately prior to the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@return the value associated with this instance immediately before the operand was added.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableFloat.java
| 227
|
[
"operand"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
transformAndEmitVariableDeclarationList
|
function transformAndEmitVariableDeclarationList(node: VariableDeclarationList): VariableDeclarationList | undefined {
for (const variable of node.declarations) {
const name = factory.cloneNode(variable.name as Identifier);
setCommentRange(name, variable.name);
hoistVariableDeclaration(name);
}
const variables = getInitializedVariables(node);
const numVariables = variables.length;
let variablesWritten = 0;
let pendingExpressions: Expression[] = [];
while (variablesWritten < numVariables) {
for (let i = variablesWritten; i < numVariables; i++) {
const variable = variables[i];
if (containsYield(variable.initializer) && pendingExpressions.length > 0) {
break;
}
pendingExpressions.push(transformInitializedVariable(variable));
}
if (pendingExpressions.length) {
emitStatement(factory.createExpressionStatement(factory.inlineExpressions(pendingExpressions)));
variablesWritten += pendingExpressions.length;
pendingExpressions = [];
}
}
return undefined;
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 1,356
|
[
"node"
] | true
| 6
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
consumingIterable
|
public static <T extends @Nullable Object> Iterable<T> consumingIterable(Iterable<T> iterable) {
checkNotNull(iterable);
return new FluentIterable<T>() {
@Override
public Iterator<T> iterator() {
return (iterable instanceof Queue)
? new ConsumingQueueIterator<>((Queue<T>) iterable)
: Iterators.consumingIterator(iterable.iterator());
}
@Override
public String toString() {
return "Iterables.consumingIterable(...)";
}
};
}
|
Returns a view of the supplied iterable that wraps each generated {@link Iterator} through
{@link Iterators#consumingIterator(Iterator)}.
<p>Note: If {@code iterable} is a {@link Queue}, the returned iterable will instead use {@link
Queue#isEmpty} and {@link Queue#remove()}, since {@link Queue}'s iteration order is undefined.
Calling {@link Iterator#hasNext()} on a generated iterator from the returned iterable may cause
an item to be immediately dequeued for return on a subsequent call to {@link Iterator#next()}.
<p>Whether the input {@code iterable} is a {@link Queue} or not, the returned {@code Iterable}
is not thread-safe.
@param iterable the iterable to wrap
@return a view of the supplied iterable that wraps each generated iterator through {@link
Iterators#consumingIterator(Iterator)}; for queues, an iterable that generates iterators
that return and consume the queue's elements in queue order
@see Iterators#consumingIterator(Iterator)
@since 2.0
|
java
|
android/guava/src/com/google/common/collect/Iterables.java
| 1,019
|
[
"iterable"
] | true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
generateBeanClassCode
|
private CodeBlock generateBeanClassCode(String targetPackage, Class<?> beanClass) {
if (Modifier.isPublic(beanClass.getModifiers()) || targetPackage.equals(beanClass.getPackageName())) {
return CodeBlock.of("$T.class", beanClass);
}
else {
return CodeBlock.of("$S", beanClass.getName());
}
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 139
|
[
"targetPackage",
"beanClass"
] |
CodeBlock
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
markRenderEnd
|
function markRenderEnd() {
if (state.render === EventPhase.InProgress) {
performance.mark('render/end');
state.render = EventPhase.Finished;
}
}
|
Mark the end of the animation frame performing the rendering.
|
typescript
|
src/vs/base/browser/performance.ts
| 120
|
[] | false
| 2
| 6.08
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
_version_header
|
def _version_header(cls) -> bytes:
"""Generate the version header bytes.
Returns:
A byte string representing the current cache version header.
"""
return sha256(str(cls._version).encode()).digest()[: cls._version_header_length]
|
Generate the version header bytes.
Returns:
A byte string representing the current cache version header.
|
python
|
torch/_inductor/runtime/caching/implementations.py
| 223
|
[
"cls"
] |
bytes
| true
| 1
| 6.24
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
size
|
@Override
public int size() {
Map<K, V> delegate = delegateOrNull();
return (delegate != null) ? delegate.size() : size;
}
|
Updates the index an iterator is pointing to after a call to remove: returns the index of the
entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
index that *was* the next entry that would be looked at.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 878
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
bucketCount
|
default int bucketCount() {
int count = 0;
BucketIterator it = iterator();
while (it.hasNext()) {
count++;
it.advance();
}
return count;
}
|
Returns the number of buckets. Note that this operation might require iterating over all buckets, and therefore is not cheap.
@return the number of buckets
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogram.java
| 151
|
[] | true
| 2
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
instantiateEnvironmentPostProcessor
|
private EnvironmentPostProcessor instantiateEnvironmentPostProcessor(String postProcessorClassName,
@Nullable ClassLoader classLoader) {
try {
Class<?> initializerClass = ClassUtils.resolveClassName(postProcessorClassName, classLoader);
Assert.isAssignable(EnvironmentPostProcessor.class, initializerClass);
return (EnvironmentPostProcessor) BeanUtils.instantiateClass(initializerClass);
}
catch (BeanInstantiationException ex) {
throw new IllegalArgumentException(
"Failed to instantiate EnvironmentPostProcessor: " + postProcessorClassName, ex);
}
}
|
Factory method that creates an {@link EnvironmentPostProcessorApplicationListener}
with a specific {@link EnvironmentPostProcessorsFactory}.
@param postProcessorsFactory the environment post processor factory
@return an {@link EnvironmentPostProcessorApplicationListener} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorApplicationListener.java
| 174
|
[
"postProcessorClassName",
"classLoader"
] |
EnvironmentPostProcessor
| true
| 2
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
splitByWholeSeparator
|
public static String[] splitByWholeSeparator(final String str, final String separator) {
return splitByWholeSeparatorWorker(str, separator, -1, false);
}
|
Splits the provided text into an array, separator string specified.
<p>
The separator(s) will not be included in the returned String array. Adjacent separators are treated as one separator.
</p>
<p>
A {@code null} input String returns {@code null}. A {@code null} separator splits on whitespace.
</p>
<pre>
StringUtils.splitByWholeSeparator(null, *) = null
StringUtils.splitByWholeSeparator("", *) = []
StringUtils.splitByWholeSeparator("ab de fg", null) = ["ab", "de", "fg"]
StringUtils.splitByWholeSeparator("ab de fg", null) = ["ab", "de", "fg"]
StringUtils.splitByWholeSeparator("ab:cd:ef", ":") = ["ab", "cd", "ef"]
StringUtils.splitByWholeSeparator("ab-!-cd-!-ef", "-!-") = ["ab", "cd", "ef"]
</pre>
@param str the String to parse, may be null.
@param separator String containing the String to be used as a delimiter, {@code null} splits on whitespace.
@return an array of parsed Strings, {@code null} if null String was input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,247
|
[
"str",
"separator"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
formatTimeStamp
|
private static WritableJson formatTimeStamp(Instant timeStamp) {
return (out) -> out.append(new BigDecimal(timeStamp.getEpochMillisecond()).movePointLeft(3).toPlainString());
}
|
GELF requires "seconds since UNIX epoch with optional <b>decimal places for
milliseconds</b>". To comply with this requirement, we format a POSIX timestamp
with millisecond precision as e.g. "1725459730385" -> "1725459730.385"
@param timeStamp the timestamp of the log message.
@return the timestamp formatted as string with millisecond precision
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/GraylogExtendedLogFormatStructuredLogFormatter.java
| 121
|
[
"timeStamp"
] |
WritableJson
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
mean
|
def mean(self, *args, update=None, update_times=None, **kwargs):
"""
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
"""
result_kwargs = {}
is_frame = self._selected_obj.ndim == 2
if update_times is not None:
raise NotImplementedError("update_times is not implemented.")
update_deltas = np.ones(
max(self._selected_obj.shape[-1] - 1, 0), dtype=np.float64
)
if update is not None:
if self._mean.last_ewm is None:
raise ValueError(
"Must call mean with update=None first before passing update"
)
result_from = 1
result_kwargs["index"] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs["columns"] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs["name"] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs["index"] = self._selected_obj.index
if is_frame:
result_kwargs["columns"] = self._selected_obj.columns
else:
result_kwargs["name"] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(
**get_jit_arguments(self.engine_kwargs)
)
result = self._mean.run_ewm(
np_array if is_frame else np_array[:, np.newaxis],
update_deltas,
self.min_periods,
ewma_func,
)
if not is_frame:
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
|
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
|
python
|
pandas/core/window/ewm.py
| 1,016
|
[
"self",
"update",
"update_times"
] | false
| 11
| 7.6
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
withLogger
|
public SELF withLogger(Class<?> loggerSource) {
return withLogger(LogFactory.getLog(loggerSource));
}
|
Use the specified logger source to report any lambda failures.
@param loggerSource the logger source to use
@return this instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java
| 129
|
[
"loggerSource"
] |
SELF
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isAopProxy
|
@Contract("null -> false")
public static boolean isAopProxy(@Nullable Object object) {
return (object instanceof SpringProxy && (Proxy.isProxyClass(object.getClass()) ||
object.getClass().getName().contains(ClassUtils.CGLIB_CLASS_SEPARATOR)));
}
|
Check whether the given object is a JDK dynamic proxy or a CGLIB proxy.
<p>This method additionally checks if the given object is an instance
of {@link SpringProxy}.
@param object the object to check
@see #isJdkDynamicProxy
@see #isCglibProxy
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/AopUtils.java
| 81
|
[
"object"
] | true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
stripToNull
|
public static String stripToNull(String str) {
if (str == null) {
return null;
}
str = strip(str, null);
return str.isEmpty() ? null : str; // NOSONARLINT str cannot be null here
}
|
Strips whitespace from the start and end of a String returning {@code null} if the String is empty ("") after the strip.
<p>
This is similar to {@link #trimToNull(String)} but removes whitespace. Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.stripToNull(null) = null
StringUtils.stripToNull("") = null
StringUtils.stripToNull(" ") = null
StringUtils.stripToNull("abc") = "abc"
StringUtils.stripToNull(" abc") = "abc"
StringUtils.stripToNull("abc ") = "abc"
StringUtils.stripToNull(" abc ") = "abc"
StringUtils.stripToNull(" ab c ") = "ab c"
</pre>
@param str the String to be stripped, may be null.
@return the stripped String, {@code null} if whitespace, empty or null String input.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,066
|
[
"str"
] |
String
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
size
|
@Override
public long size() {
if (mergingDigest != null) {
return mergingDigest.size();
}
return sortingDigest.size();
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 150
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
isSignedJarFile
|
public static boolean isSignedJarFile(@Nullable File file) throws IOException {
if (file == null) {
return false;
}
try (JarFile jarFile = new JarFile(file)) {
if (hasDigestEntry(jarFile.getManifest())) {
return true;
}
}
return false;
}
|
Returns {@code true} if the given jar file has been signed.
@param file the file to check
@return if the file has been signed
@throws IOException on IO error
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/FileUtils.java
| 70
|
[
"file"
] | true
| 3
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
createConstEqualsRequireDeclaration
|
function createConstEqualsRequireDeclaration(name: string | ObjectBindingPattern, quotedModuleSpecifier: StringLiteral): RequireVariableStatement {
return factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList([
factory.createVariableDeclaration(
typeof name === "string" ? factory.createIdentifier(name) : name,
/*exclamationToken*/ undefined,
/*type*/ undefined,
factory.createCallExpression(factory.createIdentifier("require"), /*typeArguments*/ undefined, [quotedModuleSpecifier]),
),
], NodeFlags.Const),
) as RequireVariableStatement;
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 2,116
|
[
"name",
"quotedModuleSpecifier"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
createTopics
|
CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options);
|
Create a batch of new topics.
<p>
This operation is not transactional so it may succeed for some topics while fail for others.
<p>
It may take several seconds after {@link CreateTopicsResult} returns
success for all the brokers to become aware that the topics have been created.
During this time, {@link #listTopics()} and {@link #describeTopics(Collection)}
may not return information about the new topics.
<p>
This operation is supported by brokers with version 0.10.1.0 or higher. The validateOnly option is supported
from version 0.10.2.0.
@param newTopics The new topics to create.
@param options The options to use when creating the new topics.
@return The CreateTopicsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 201
|
[
"newTopics",
"options"
] |
CreateTopicsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
median
|
@SafeVarargs
public static <T> T median(final Comparator<T> comparator, final T... items) {
Validate.notEmpty(items, "null/empty items");
Validate.noNullElements(items);
Objects.requireNonNull(comparator, "comparator");
final TreeSet<T> treeSet = new TreeSet<>(comparator);
Collections.addAll(treeSet, items);
return (T) treeSet.toArray()[(treeSet.size() - 1) / 2];
}
|
Finds the "best guess" middle value among comparables. If there is an even
number of total values, the lower of the two middle values will be returned.
@param <T> type of values processed by this method.
@param comparator to use for comparisons.
@param items to compare.
@return T at middle position.
@throws NullPointerException if items or comparator is {@code null}.
@throws IllegalArgumentException if items is empty or contains {@code null} values.
@since 3.0.1
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 1,056
|
[
"comparator"
] |
T
| true
| 1
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
bound
|
private static double bound(double v) {
if (v <= 0) {
return 0;
} else if (v >= 1) {
return 1;
} else {
return v;
}
}
|
Approximates asin to within about 1e-6. This approximation works by breaking the range from 0 to 1 into 5 regions
for all but the region nearest 1, rational polynomial models get us a very good approximation of asin and by
interpolating as we move from region to region, we can guarantee continuity and we happen to get monotonicity as
well. for the values near 1, we just use Math.asin as our region "approximation".
@param x sin(theta)
@return theta
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/ScaleFunction.java
| 664
|
[
"v"
] | true
| 3
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
toStringBase
|
@Override
public String toStringBase() {
return super.toStringBase() +
", timestampsToSearch=" + timestampsToSearch +
", requireTimestamps=" + requireTimestamps;
}
|
Build result representing that no offsets were found as part of the current event.
@return Map containing all the partitions the event was trying to get offsets for, and
null {@link OffsetAndTimestamp} as value
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java
| 72
|
[] |
String
| true
| 1
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
compare
|
@Override
public int compare(Advisor o1, Advisor o2) {
int advisorPrecedence = this.advisorComparator.compare(o1, o2);
if (advisorPrecedence == SAME_PRECEDENCE && declaredInSameAspect(o1, o2)) {
advisorPrecedence = comparePrecedenceWithinAspect(o1, o2);
}
return advisorPrecedence;
}
|
Create an {@code AspectJPrecedenceComparator}, using the given {@link Comparator}
for comparing {@link org.springframework.aop.Advisor} instances.
@param advisorComparator the {@code Comparator} to use for advisors
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/autoproxy/AspectJPrecedenceComparator.java
| 81
|
[
"o1",
"o2"
] | true
| 3
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
releaseWaiters
|
final void releaseWaiters() {
Waiter head = gasWaiters(Waiter.TOMBSTONE);
for (Waiter currentWaiter = head; currentWaiter != null; currentWaiter = currentWaiter.next) {
currentWaiter.unpark();
}
}
|
Releases all threads in the {@link #waitersField} list, and clears the list.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFutureState.java
| 86
|
[] |
void
| true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
convertProperty
|
protected String convertProperty(String propertyName, String propertyValue) {
return convertPropertyValue(propertyValue);
}
|
Convert the given property from the properties source to the value
which should be applied.
<p>The default implementation calls {@link #convertPropertyValue(String)}.
@param propertyName the name of the property that the value is defined for
@param propertyValue the original value from the properties source
@return the converted value, to be used for processing
@see #convertPropertyValue(String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/PropertyResourceConfigurer.java
| 122
|
[
"propertyName",
"propertyValue"
] |
String
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nullToEmpty
|
public static float[] nullToEmpty(final float[] array) {
return isEmpty(array) ? EMPTY_FLOAT_ARRAY : array;
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,451
|
[
"array"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
copy
|
@J2ktIncompatible
@CanIgnoreReturnValue
public static long copy(ReadableByteChannel from, WritableByteChannel to) throws IOException {
checkNotNull(from);
checkNotNull(to);
if (from instanceof FileChannel) {
FileChannel sourceChannel = (FileChannel) from;
long oldPosition = sourceChannel.position();
long position = oldPosition;
long copied;
do {
copied = sourceChannel.transferTo(position, ZERO_COPY_CHUNK_SIZE, to);
position += copied;
sourceChannel.position(position);
} while (copied > 0 || position < sourceChannel.size());
return position - oldPosition;
}
ByteBuffer buf = ByteBuffer.wrap(createBuffer());
long total = 0;
while (from.read(buf) != -1) {
Java8Compatibility.flip(buf);
while (buf.hasRemaining()) {
total += to.write(buf);
}
Java8Compatibility.clear(buf);
}
return total;
}
|
Copies all bytes from the readable channel to the writable channel. Does not close or flush
either channel.
@param from the readable channel to read from
@param to the writable channel to write to
@return the number of bytes copied
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 134
|
[
"from",
"to"
] | true
| 5
| 8.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
isEmpty
|
@Override
public boolean isEmpty() {
return upperBoundWindow.equals(Range.all())
? rangesByLowerBound.isEmpty()
: !entryIterator().hasNext();
}
|
upperBoundWindow represents the headMap/subMap/tailMap view of the entire "ranges by upper
bound" map; it's a constraint on the *keys*, and does not affect the values.
|
java
|
android/guava/src/com/google/common/collect/TreeRangeSet.java
| 442
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof TextResourceOrigin other) {
boolean result = true;
result = result && ObjectUtils.nullSafeEquals(this.resource, other.resource);
result = result && ObjectUtils.nullSafeEquals(this.location, other.location);
return result;
}
return super.equals(obj);
}
|
Return the location of the property within the source (if known).
@return the location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/TextResourceOrigin.java
| 70
|
[
"obj"
] | true
| 6
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
cellSet
|
@Override
public Set<Cell<R, C, @Nullable V>> cellSet() {
return super.cellSet();
}
|
Returns an unmodifiable set of all row key / column key / value triplets. Changes to the table
will update the returned set.
<p>The returned set's iterator traverses the mappings with the first row key, the mappings with
the second row key, and so on.
<p>The value in the returned cells may change if the table subsequently changes.
@return set of table cells consisting of row key / column key / value triplets
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 540
|
[] | true
| 1
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
|
describeProducers
|
default DescribeProducersResult describeProducers(Collection<TopicPartition> partitions) {
return describeProducers(partitions, new DescribeProducersOptions());
}
|
Describe producer state on a set of topic partitions. See
{@link #describeProducers(Collection, DescribeProducersOptions)} for more details.
@param partitions The set of partitions to query
@return The result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,675
|
[
"partitions"
] |
DescribeProducersResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
createDirectory
|
private Path createDirectory(Path path) {
try {
if (!Files.exists(path)) {
Files.createDirectory(path, getFileAttributes(path.getFileSystem(), DIRECTORY_PERMISSIONS));
}
return path;
}
catch (IOException ex) {
throw new IllegalStateException("Unable to create application temp directory " + path, ex);
}
}
|
Return a subdirectory of the application temp.
@param subDir the subdirectory name
@return a subdirectory
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationTemp.java
| 116
|
[
"path"
] |
Path
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
fetchCheckInterval
|
protected abstract long fetchCheckInterval(EventCountCircuitBreaker breaker);
|
Obtains the check interval to applied for the represented state from the given
{@link CircuitBreaker}.
@param breaker the {@link CircuitBreaker}
@return the check interval to be applied
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/EventCountCircuitBreaker.java
| 153
|
[
"breaker"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
builder
|
public static Builder builder() {
return new Builder();
}
|
Creates a new builder.
@return a new builder.
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BasicThreadFactory.java
| 243
|
[] |
Builder
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
watchPresentFileSystemEntry
|
function watchPresentFileSystemEntry(): FileWatcher {
if (hitSystemWatcherLimit) {
sysLog(`sysLog:: ${fileOrDirectory}:: Defaulting to watchFile`);
return watchPresentFileSystemEntryWithFsWatchFile();
}
try {
const presentWatcher = (entryKind === FileSystemEntryKind.Directory || !fsWatchWithTimestamp ? fsWatchWorker : fsWatchWorkerHandlingTimestamp)(
fileOrDirectory,
recursive,
inodeWatching ?
callbackChangingToMissingFileSystemEntry :
callback,
);
// Watch the missing file or directory or error
presentWatcher.on("error", () => {
callback("rename", "");
updateWatcher(watchMissingFileSystemEntry);
});
return presentWatcher;
}
catch (e) {
// Catch the exception and use polling instead
// Eg. on linux the number of watches are limited and one could easily exhaust watches and the exception ENOSPC is thrown when creating watcher at that point
// so instead of throwing error, use fs.watchFile
hitSystemWatcherLimit ||= e.code === "ENOSPC";
sysLog(`sysLog:: ${fileOrDirectory}:: Changing to watchFile`);
return watchPresentFileSystemEntryWithFsWatchFile();
}
}
|
Watch the file or directory that is currently present
and when the watched file or directory is deleted, switch to missing file system entry watcher
|
typescript
|
src/compiler/sys.ts
| 1,256
|
[] | true
| 6
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getAspectJImports
|
private String[] getAspectJImports() {
List<String> result = new ArrayList<>(2);
result.add(CACHE_ASPECT_CONFIGURATION_CLASS_NAME);
if (JSR_107_PRESENT && JCACHE_IMPL_PRESENT) {
result.add(JCACHE_ASPECT_CONFIGURATION_CLASS_NAME);
}
return StringUtils.toStringArray(result);
}
|
Return the imports to use if the {@link AdviceMode} is set to {@link AdviceMode#ASPECTJ}.
<p>Take care of adding the necessary JSR-107 import if it is available.
|
java
|
spring-context/src/main/java/org/springframework/cache/annotation/CachingConfigurationSelector.java
| 95
|
[] | true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
transitionToFatal
|
public void transitionToFatal() {
MemberState previousState = state;
transitionTo(MemberState.FATAL);
log.error("Member {} with epoch {} transitioned to fatal state", memberId, memberEpoch);
notifyEpochChange(Optional.empty());
if (previousState == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} got fatal error from the broker but it already " +
"left the group, so onPartitionsLost callback won't be triggered.", memberId, memberEpoch);
return;
}
if (previousState == MemberState.LEAVING || previousState == MemberState.PREPARE_LEAVING) {
log.info("Member {} with epoch {} was leaving the group with state {} when it got a " +
"fatal error from the broker. It will discard the ongoing leave and remain in " +
"fatal state.", memberId, memberEpoch, previousState);
maybeCompleteLeaveInProgress();
return;
}
// Release assignment
CompletableFuture<Void> callbackResult = signalPartitionsLost(subscriptions.assignedPartitions());
callbackResult.whenComplete((result, error) -> {
if (error != null) {
log.error("onPartitionsLost callback invocation failed while releasing assignment" +
"after member failed with fatal error.", error);
}
clearAssignment();
});
}
|
Transition the member to the FATAL state and update the member info as required. This is
invoked when un-recoverable errors occur (ex. when the heartbeat returns a non-retriable
error)
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 442
|
[] |
void
| true
| 6
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
getVersionString
|
function getVersionString(packageVersion = null) {
if (packageVersion == null) {
packageVersion = JSON.parse(
readFileSync(
resolve(__dirname, '..', 'react-devtools-core', './package.json'),
),
).version;
}
const commit = getGitCommit();
return `${packageVersion}-${commit}`;
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
|
javascript
|
packages/react-devtools-extensions/utils.js
| 44
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
run
|
@Override
public void run() {
log.debug("Thread starting");
try {
processRequests();
} finally {
closing = true;
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
int numTimedOut = 0;
TimeoutProcessor timeoutProcessor = new TimeoutProcessor(Long.MAX_VALUE);
synchronized (this) {
numTimedOut += timeoutProcessor.handleTimeouts(newCalls, "The AdminClient thread has exited.");
}
numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited.");
numTimedOut += timeoutCallsToSend(timeoutProcessor);
numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(),
"The AdminClient thread has exited.");
if (numTimedOut > 0) {
log.info("Timed out {} remaining operation(s) during close.", numTimedOut);
}
closeQuietly(client, "KafkaClient");
closeQuietly(metrics, "Metrics");
log.debug("Exiting AdminClientRunnable thread.");
}
}
|
Return true if there are currently active external calls.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,455
|
[] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
getInnerRegisteredBean
|
private @Nullable RegisteredBean getInnerRegisteredBean(Object value) {
if (value instanceof BeanDefinitionHolder beanDefinitionHolder) {
return RegisteredBean.ofInnerBean(this.registeredBean, beanDefinitionHolder);
}
if (value instanceof BeanDefinition beanDefinition) {
return RegisteredBean.ofInnerBean(this.registeredBean, beanDefinition);
}
return null;
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 193
|
[
"value"
] |
RegisteredBean
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
close
|
@Override
public void close() {
if (initialized.get()) {
nextScheduledTask.skip();
}
this.scheduler.shutdown();
}
|
Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs
it will also schedule a new round after sniffAfterFailureDelay ms.
|
java
|
client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
| 220
|
[] |
void
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
setupNavigator
|
function setupNavigator() {
if (getEmbedderOptions().noBrowserGlobals ||
getOptionValue('--no-experimental-global-navigator')) {
return;
}
// https://html.spec.whatwg.org/multipage/system-state.html#the-navigator-object
exposeLazyInterfaces(globalThis, 'internal/navigator', ['Navigator']);
defineReplaceableLazyAttribute(globalThis, 'internal/navigator', ['navigator'], false);
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 363
|
[] | false
| 3
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
left
|
def left(self) -> Index:
"""
Return the left endpoints of each Interval in the IntervalArray as an Index.
This property provides access to the left endpoints of the intervals
contained within the IntervalArray. This can be useful for analyses where
the starting point of each interval is of interest, such as in histogram
creation, data aggregation, or any scenario requiring the identification
of the beginning of defined ranges. This property returns a ``pandas.Index``
object containing the midpoint for each interval.
See Also
--------
arrays.IntervalArray.right : Return the right endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
arrays.IntervalArray.contains : Check elementwise if the Intervals contain
the value.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (2, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.left
Index([0, 2], dtype='int64')
"""
from pandas import Index
return Index(self._left, copy=False)
|
Return the left endpoints of each Interval in the IntervalArray as an Index.
This property provides access to the left endpoints of the intervals
contained within the IntervalArray. This can be useful for analyses where
the starting point of each interval is of interest, such as in histogram
creation, data aggregation, or any scenario requiring the identification
of the beginning of defined ranges. This property returns a ``pandas.Index``
object containing the midpoint for each interval.
See Also
--------
arrays.IntervalArray.right : Return the right endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
arrays.IntervalArray.contains : Check elementwise if the Intervals contain
the value.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (2, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.left
Index([0, 2], dtype='int64')
|
python
|
pandas/core/arrays/interval.py
| 1,337
|
[
"self"
] |
Index
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
notifyClose
|
public void notifyClose() {
if (log.isDebugEnabled()) {
log.debug("Set the metadata for next fetch request to close the existing session ID={}", nextMetadata.sessionId());
}
nextMetadata = nextMetadata.nextCloseExisting();
}
|
The client will initiate the session close on next fetch request.
|
java
|
clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
| 602
|
[] |
void
| true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
__init__
|
def __init__(
self, error_type: UserErrorType, msg: str, case_name: Optional[str] = None
) -> None:
"""
Type of errors that would be valid in Eager, but not supported in TorchDynamo.
The error message should tell user about next actions.
error_type: Type of user error
msg: Actionable error message
case_name: (Optional) Unique name (snake case) for the usage example in exportdb.
"""
if case_name is not None:
assert isinstance(case_name, str)
if msg.endswith("."):
msg += " "
else:
msg += "\n"
msg += exportdb_error_message(case_name)
super().__init__(msg)
self.error_type = error_type
self.message = msg
|
Type of errors that would be valid in Eager, but not supported in TorchDynamo.
The error message should tell user about next actions.
error_type: Type of user error
msg: Actionable error message
case_name: (Optional) Unique name (snake case) for the usage example in exportdb.
|
python
|
torch/_dynamo/exc.py
| 235
|
[
"self",
"error_type",
"msg",
"case_name"
] |
None
| true
| 4
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
chebadd
|
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([4., 4., 4.])
"""
return pu._add(c1, c2)
|
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([4., 4., 4.])
|
python
|
numpy/polynomial/chebyshev.py
| 567
|
[
"c1",
"c2"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
dump
|
def dump(obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
"""Serialize data as JSON and write to a file.
If :data:`~flask.current_app` is available, it will use its
:meth:`app.json.dump() <flask.json.provider.JSONProvider.dump>`
method, otherwise it will use :func:`json.dump`.
:param obj: The data to serialize.
:param fp: A file opened for writing text. Should use the UTF-8
encoding to be valid JSON.
:param kwargs: Arguments passed to the ``dump`` implementation.
.. versionchanged:: 2.3
The ``app`` parameter was removed.
.. versionchanged:: 2.2
Calls ``current_app.json.dump``, allowing an app to override
the behavior.
.. versionchanged:: 2.0
Writing to a binary file, and the ``encoding`` argument, will be
removed in Flask 2.1.
"""
if current_app:
current_app.json.dump(obj, fp, **kwargs)
else:
kwargs.setdefault("default", _default)
_json.dump(obj, fp, **kwargs)
|
Serialize data as JSON and write to a file.
If :data:`~flask.current_app` is available, it will use its
:meth:`app.json.dump() <flask.json.provider.JSONProvider.dump>`
method, otherwise it will use :func:`json.dump`.
:param obj: The data to serialize.
:param fp: A file opened for writing text. Should use the UTF-8
encoding to be valid JSON.
:param kwargs: Arguments passed to the ``dump`` implementation.
.. versionchanged:: 2.3
The ``app`` parameter was removed.
.. versionchanged:: 2.2
Calls ``current_app.json.dump``, allowing an app to override
the behavior.
.. versionchanged:: 2.0
Writing to a binary file, and the ``encoding`` argument, will be
removed in Flask 2.1.
|
python
|
src/flask/json/__init__.py
| 47
|
[
"obj",
"fp"
] |
None
| true
| 3
| 6.4
|
pallets/flask
| 70,946
|
sphinx
| false
|
applyEmptySelectionErrorSelect
|
function applyEmptySelectionErrorSelect(error: EmptySelectionError, argsTree: ArgumentsRenderingTree) {
const outputType = error.outputType
const selection = argsTree.arguments.getDeepSelectionParent(error.selectionPath)?.value
const isEmpty = selection?.isEmpty() ?? false
if (selection) {
// If selection has fields and we get EmptySelection error, it means all fields within the
// selection are false. We have 2 possible ways to handle suggestions here:
//
// 1. Suggest only the fields, not present inside of the selection. Example:
//
// {
// select: {
// id: false
// posts: false,
// ? name?: true
// ? email?: true
// }
// }
// There are couple of possible problems here. First, we are assuming that user needs to
// add new field to the selection, rather than change one of the existing ones to true.
// Second, we might end up in a situation where all fields are already used in selection and we have nothing left to suggest.
//
// 2.Completely ignore users input and suggest all the fields. Example rendering:
// {
// select: {
// ? id?: true
// ? posts?: true,
// ? name?: true
// ? email?: true
// }
// }
//
// So we will be suggesting to either change one of the fields to true, or add a new one which would be true.
// This is the approach we are taking and in order to it, we need to remove all the fields from selection. Code
// below will then add them back as a suggestion.
selection.removeAllFields()
addSelectionSuggestions(selection, outputType)
}
argsTree.addErrorMessage((colors) => {
if (isEmpty) {
return `The ${colors.red('`select`')} statement for type ${colors.bold(
outputType.name,
)} must not be empty. ${availableOptionsMessage(colors)}`
}
return `The ${colors.red('`select`')} statement for type ${colors.bold(outputType.name)} needs ${colors.bold(
'at least one truthy value',
)}.`
})
}
|
Given the validation error and arguments rendering tree, applies corresponding
formatting to an error tree and adds all relevant messages.
@param error
@param args
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyValidationError.ts
| 186
|
[
"error",
"argsTree"
] | false
| 3
| 6.08
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
processCodePathToEnter
|
function processCodePathToEnter(analyzer, node) {
let codePath = analyzer.codePath;
let state = codePath && CodePath.getState(codePath);
const parent = node.parent;
/**
* Creates a new code path and trigger the onCodePathStart event
* based on the currently selected node.
* @param {string} origin The reason the code path was started.
* @returns {void}
*/
function startCodePath(origin) {
if (codePath) {
// Emits onCodePathSegmentStart events if updated.
forwardCurrentToHead(analyzer, node);
}
// Create the code path of this scope.
codePath = analyzer.codePath = new CodePath({
id: analyzer.idGenerator.next(),
origin,
upper: codePath,
onLooped: analyzer.onLooped,
});
state = CodePath.getState(codePath);
// Emits onCodePathStart events.
analyzer.emitter.emit('onCodePathStart', codePath, node);
}
/*
* Special case: The right side of class field initializer is considered
* to be its own function, so we need to start a new code path in this
* case.
*/
if (isPropertyDefinitionValue(node)) {
startCodePath('class-field-initializer');
/*
* Intentional fall through because `node` needs to also be
* processed by the code below. For example, if we have:
*
* class Foo {
* a = () => {}
* }
*
* In this case, we also need start a second code path.
*/
}
switch (node.type) {
case 'Program':
startCodePath('program');
break;
case 'FunctionDeclaration':
case 'ComponentDeclaration':
case 'HookDeclaration':
case 'FunctionExpression':
case 'ArrowFunctionExpression':
startCodePath('function');
break;
case 'StaticBlock':
startCodePath('class-static-block');
break;
case 'ChainExpression':
state.pushChainContext();
break;
case 'CallExpression':
if (node.optional === true) {
state.makeOptionalNode();
}
break;
case 'MemberExpression':
if (node.optional === true) {
state.makeOptionalNode();
}
break;
case 'LogicalExpression':
if (isHandledLogicalOperator(node.operator)) {
state.pushChoiceContext(node.operator, isForkingByTrueOrFalse(node));
}
break;
case 'AssignmentExpression':
if (isLogicalAssignmentOperator(node.operator)) {
state.pushChoiceContext(
node.operator.slice(0, -1), // removes `=` from the end
isForkingByTrueOrFalse(node),
);
}
break;
case 'ConditionalExpression':
case 'IfStatement':
state.pushChoiceContext('test', false);
break;
case 'SwitchStatement':
state.pushSwitchContext(node.cases.some(isCaseNode), getLabel(node));
break;
case 'TryStatement':
state.pushTryContext(Boolean(node.finalizer));
break;
case 'SwitchCase':
/*
* Fork if this node is after the 2st node in `cases`.
* It's similar to `else` blocks.
* The next `test` node is processed in this path.
*/
if (parent.discriminant !== node && parent.cases[0] !== node) {
state.forkPath();
}
break;
case 'WhileStatement':
case 'DoWhileStatement':
case 'ForStatement':
case 'ForInStatement':
case 'ForOfStatement':
state.pushLoopContext(node.type, getLabel(node));
break;
case 'LabeledStatement':
if (!breakableTypePattern.test(node.body.type)) {
state.pushBreakContext(false, node.label.name);
}
break;
default:
break;
}
// Emits onCodePathSegmentStart events if updated.
forwardCurrentToHead(analyzer, node);
}
|
Updates the code path due to the type of a given node in entering.
@param {CodePathAnalyzer} analyzer The instance.
@param {ASTNode} node The current AST node.
@returns {void}
|
javascript
|
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-analyzer.js
| 380
|
[
"analyzer",
"node"
] | false
| 11
| 6
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
toArray
|
public static <T> Collector<T, List<T>, T[]> toArray(final Class<T> elementType) {
return new ArrayCollector<>(elementType);
}
|
Returns a {@link Collector} that accumulates the input elements into a new array.
@param <T> the type of the input elements
@param elementType Type of an element in the array.
@return a {@link Collector} which collects all the input elements into an array, in encounter order
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 841
|
[
"elementType"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
dates
|
def dates(
self, start_date, end_date, return_name: bool = False
) -> Series | DatetimeIndex:
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
Returns
-------
Series or DatetimeIndex
Series if return_name is True
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
dti = DatetimeIndex([dt])
if return_name:
return Series(self.name, index=dti)
else:
return dti
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[
np.isin(
# error: "DatetimeIndex" has no attribute "dayofweek"
holiday_dates.dayofweek, # type: ignore[attr-defined]
self.days_of_week,
).ravel()
]
if self.start_date is not None:
filter_start_date = max(
self.start_date.tz_localize(filter_start_date.tz), filter_start_date
)
if self.end_date is not None:
filter_end_date = min(
self.end_date.tz_localize(filter_end_date.tz), filter_end_date
)
holiday_dates = holiday_dates[
(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)
]
if self.exclude_dates is not None:
holiday_dates = holiday_dates.difference(self.exclude_dates)
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
|
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
Returns
-------
Series or DatetimeIndex
Series if return_name is True
|
python
|
pandas/tseries/holiday.py
| 299
|
[
"self",
"start_date",
"end_date",
"return_name"
] |
Series | DatetimeIndex
| true
| 9
| 6.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
loadBundle
|
protected ResourceBundle loadBundle(InputStream inputStream) throws IOException {
return new PropertyResourceBundle(inputStream);
}
|
Load a property-based resource bundle from the given input stream,
picking up the default properties encoding on JDK 9+.
<p>This will only be called with {@link #setDefaultEncoding "defaultEncoding"}
set to {@code null}, explicitly enforcing the platform default encoding
(which is UTF-8 with a ISO-8859-1 fallback on JDK 9+ but configurable
through the "java.util.PropertyResourceBundle.encoding" system property).
Note that this method can only be called with a {@code ResourceBundle.Control}:
When running on the JDK 9+ module path where such control handles are not
supported, any overrides in custom subclasses will effectively get ignored.
<p>The default implementation returns a {@link PropertyResourceBundle}.
@param inputStream the input stream for the target resource
@return the fully loaded bundle
@throws IOException in case of I/O failure
@since 5.1
@see #loadBundle(Reader)
@see PropertyResourceBundle#PropertyResourceBundle(InputStream)
|
java
|
spring-context/src/main/java/org/springframework/context/support/ResourceBundleMessageSource.java
| 294
|
[
"inputStream"
] |
ResourceBundle
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get_backend_context
|
def get_backend_context(backend: str):
"""
Returns a context manager for the specified backend.
Args:
backend (str): The name of the backend to use.
Valid options are 'math', 'efficient', 'cudnn', 'fav2', 'fav3', 'fakv', 'og-eager'.
Returns:
A context manager for the specified backend.
Raises:
ValueError: If an invalid backend is specified.
"""
backends = {
"fav2": sdpa_kernel(SDPBackend.FLASH_ATTENTION),
"cudnn": sdpa_kernel(SDPBackend.CUDNN_ATTENTION),
"math": sdpa_kernel(SDPBackend.MATH),
"efficient": sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION),
"fav3": nullcontext(),
"fakv": nullcontext(),
"og-eager": nullcontext(),
}
if backend not in backends:
raise ValueError(
f"Unknown backend: {backend}. Valid options are: {', '.join(backends.keys())}"
)
return backends[backend]
|
Returns a context manager for the specified backend.
Args:
backend (str): The name of the backend to use.
Valid options are 'math', 'efficient', 'cudnn', 'fav2', 'fav3', 'fakv', 'og-eager'.
Returns:
A context manager for the specified backend.
Raises:
ValueError: If an invalid backend is specified.
|
python
|
benchmarks/transformer/score_mod.py
| 897
|
[
"backend"
] | true
| 2
| 7.92
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
threadNamePrefix
|
public ThreadPoolTaskExecutorBuilder threadNamePrefix(@Nullable String threadNamePrefix) {
return new ThreadPoolTaskExecutorBuilder(this.queueCapacity, this.corePoolSize, this.maxPoolSize,
this.allowCoreThreadTimeOut, this.keepAlive, this.acceptTasksAfterContextClose, this.awaitTermination,
this.awaitTerminationPeriod, threadNamePrefix, this.taskDecorator, this.customizers);
}
|
Set the prefix to use for the names of newly created threads.
@param threadNamePrefix the thread name prefix to set
@return a new builder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskExecutorBuilder.java
| 217
|
[
"threadNamePrefix"
] |
ThreadPoolTaskExecutorBuilder
| true
| 1
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
wait_for_task_execution
|
def wait_for_task_execution(self, task_execution_arn: str, max_iterations: int = 60) -> bool:
"""
Wait for Task Execution status to be complete (SUCCESS/ERROR).
The ``task_execution_arn`` must exist, or a boto3 ClientError will be raised.
:param task_execution_arn: TaskExecutionArn
:param max_iterations: Maximum number of iterations before timing out.
:return: Result of task execution.
:raises AirflowTaskTimeout: If maximum iterations is exceeded.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
"""
if not task_execution_arn:
raise AirflowBadRequest("task_execution_arn not specified")
for _ in range(max_iterations):
task_execution = self.get_conn().describe_task_execution(TaskExecutionArn=task_execution_arn)
status = task_execution["Status"]
self.log.info("status=%s", status)
if status in self.TASK_EXECUTION_SUCCESS_STATES:
return True
if status in self.TASK_EXECUTION_FAILURE_STATES:
return False
if status is None or status in self.TASK_EXECUTION_INTERMEDIATE_STATES:
time.sleep(self.wait_interval_seconds)
else:
raise AirflowException(f"Unknown status: {status}") # Should never happen
time.sleep(self.wait_interval_seconds)
raise AirflowTaskTimeout("Max iterations exceeded!")
|
Wait for Task Execution status to be complete (SUCCESS/ERROR).
The ``task_execution_arn`` must exist, or a boto3 ClientError will be raised.
:param task_execution_arn: TaskExecutionArn
:param max_iterations: Maximum number of iterations before timing out.
:return: Result of task execution.
:raises AirflowTaskTimeout: If maximum iterations is exceeded.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
| 295
|
[
"self",
"task_execution_arn",
"max_iterations"
] |
bool
| true
| 8
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
render_dag_dependencies
|
def render_dag_dependencies(deps: dict[str, list[DagDependency]]) -> graphviz.Digraph:
"""
Render the DAG dependency to the DOT object.
:param deps: List of DAG dependencies
:return: Graphviz object
"""
if not graphviz:
raise AirflowException(
"Could not import graphviz. Install the graphviz python package to fix this error."
)
dot = graphviz.Digraph(graph_attr={"rankdir": "LR"})
for dag, dependencies in deps.items():
for dep in dependencies:
with dot.subgraph(
name=dag,
graph_attr={
"rankdir": "LR",
"labelloc": "t",
"label": dag,
},
) as dep_subgraph:
leaf_nodes = ("asset", "asset-name-ref", "asset-uri-ref", "asset-alias")
if dep.source not in leaf_nodes:
dep_subgraph.edge(dep.source, dep.dependency_id)
if dep.target not in leaf_nodes:
dep_subgraph.edge(dep.dependency_id, dep.target)
return dot
|
Render the DAG dependency to the DOT object.
:param deps: List of DAG dependencies
:return: Graphviz object
|
python
|
airflow-core/src/airflow/utils/dot_renderer.py
| 164
|
[
"deps"
] |
graphviz.Digraph
| true
| 6
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
byAnnotation
|
static MethodValidationExcludeFilter byAnnotation(Class<? extends Annotation> annotationType,
SearchStrategy searchStrategy) {
return (type) -> MergedAnnotations.from(type, searchStrategy).isPresent(annotationType);
}
|
Factory method to create a {@link MethodValidationExcludeFilter} that excludes
classes by annotation found using the given search strategy.
@param annotationType the annotation to check
@param searchStrategy the annotation search strategy
@return a {@link MethodValidationExcludeFilter} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/validation/beanvalidation/MethodValidationExcludeFilter.java
| 59
|
[
"annotationType",
"searchStrategy"
] |
MethodValidationExcludeFilter
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
readStaticField
|
public static Object readStaticField(final Class<?> cls, final String fieldName) throws IllegalAccessException {
return readStaticField(cls, fieldName, false);
}
|
Reads the named {@code public static} {@link Field}. Superclasses will be considered.
@param cls
the {@link Class} to reflect, must not be {@code null}.
@param fieldName
the field name to obtain.
@return the value of the field.
@throws NullPointerException
if the class is {@code null}, or the field could not be found.
@throws IllegalArgumentException
if the field name is {@code null}, blank or empty, or is not {@code static}.
@throws IllegalAccessException
if the field is not accessible.
@throws SecurityException if an underlying accessible object's method denies the request.
@see SecurityManager#checkPermission
|
java
|
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
| 473
|
[
"cls",
"fieldName"
] |
Object
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toInteger
|
function toInteger(value) {
var result = toFinite(value),
remainder = result % 1;
return result === result ? (remainder ? result - remainder : result) : 0;
}
|
Converts `value` to an integer.
**Note:** This method is loosely based on
[`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger).
@static
@memberOf _
@since 4.0.0
@category Lang
@param {*} value The value to convert.
@returns {number} Returns the converted integer.
@example
_.toInteger(3.2);
// => 3
_.toInteger(Number.MIN_VALUE);
// => 0
_.toInteger(Infinity);
// => 1.7976931348623157e+308
_.toInteger('3.2');
// => 3
|
javascript
|
lodash.js
| 12,503
|
[
"value"
] | false
| 3
| 7.04
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
next
|
public String next(final int count) {
return next(count, false, false);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of all characters.
</p>
@param count the length of random string to create.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 691
|
[
"count"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
terminate_instances
|
def terminate_instances(self, instance_ids: list) -> dict:
"""
Terminate instances with given ids.
:param instance_ids: List of instance ids to terminate
:return: Dict with key `TerminatingInstances` and value as list of instances being terminated
"""
self.log.info("Terminating instances: %s", instance_ids)
return self.conn.terminate_instances(InstanceIds=instance_ids)
|
Terminate instances with given ids.
:param instance_ids: List of instance ids to terminate
:return: Dict with key `TerminatingInstances` and value as list of instances being terminated
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/ec2.py
| 122
|
[
"self",
"instance_ids"
] |
dict
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
wrapInstances
|
public static <T> List<Plugin<T>> wrapInstances(List<T> instances, Metrics metrics, String key) {
List<Plugin<T>> plugins = new ArrayList<>();
for (T instance : instances) {
plugins.add(wrapInstance(instance, metrics, key));
}
return plugins;
}
|
Wrap a list of instances into Plugins.
@param instances the instances to wrap
@param metrics the metrics
@param key the value for the <code>config</code> tag
@return the list of plugins
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/Plugin.java
| 109
|
[
"instances",
"metrics",
"key"
] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
subarray
|
public static boolean[] subarray(final boolean[] array, int startIndexInclusive, int endIndexExclusive) {
if (array == null) {
return null;
}
startIndexInclusive = max0(startIndexInclusive);
endIndexExclusive = Math.min(endIndexExclusive, array.length);
final int newSize = endIndexExclusive - startIndexInclusive;
if (newSize <= 0) {
return EMPTY_BOOLEAN_ARRAY;
}
return arraycopy(array, startIndexInclusive, 0, newSize, boolean[]::new);
}
|
Produces a new {@code boolean} array containing the elements between the start and end indices.
<p>
The start index is inclusive, the end index exclusive. Null array input produces null output.
</p>
@param array the input array.
@param startIndexInclusive the starting index. Undervalue (<0) is promoted to 0, overvalue (>array.length) results in an empty array.
@param endIndexExclusive elements up to endIndex-1 are present in the returned subarray. Undervalue (< startIndex) produces empty array, overvalue
(>array.length) is demoted to array length.
@return a new array containing the elements between the start and end indices.
@since 2.1
@see Arrays#copyOfRange(boolean[], int, int)
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 7,762
|
[
"array",
"startIndexInclusive",
"endIndexExclusive"
] | true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
nanosToMillis
|
private long nanosToMillis(final long nanos) {
return nanos / NANO_2_MILLIS;
}
|
Converts nanoseconds to milliseconds.
@param nanos nanoseconds to convert.
@return milliseconds conversion result.
|
java
|
src/main/java/org/apache/commons/lang3/time/StopWatch.java
| 611
|
[
"nanos"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
noMatch
|
public static ConditionOutcome noMatch(ConditionMessage message) {
return new ConditionOutcome(false, message);
}
|
Create a new {@link ConditionOutcome} instance for 'no match'.
@param message the message
@return the {@link ConditionOutcome}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionOutcome.java
| 100
|
[
"message"
] |
ConditionOutcome
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
sinh
|
public static double sinh(double value) {
// sinh(x) = (exp(x)-exp(-x))/2
double h;
if (value < 0.0) {
value = -value;
h = -0.5;
} else {
h = 0.5;
}
if (value < 22.0) {
if (value < TWO_POW_N28) {
return (h < 0.0) ? -value : value;
} else {
double t = Math.expm1(value);
// Might be more accurate, if value < 1: return h*((t+t)-t*t/(t+1.0)).
return h * (t + t / (t + 1.0));
}
} else if (value < LOG_DOUBLE_MAX_VALUE) {
return h * Math.exp(value);
} else {
double t = Math.exp(value * 0.5);
return (h * t) * t;
}
}
|
A faster and less accurate {@link Math#sinh}
@param value A double value.
@return Value hyperbolic sine.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/FastMath.java
| 151
|
[
"value"
] | true
| 6
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
table
|
def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:
"""
Helper function to convert DataFrame and Series to matplotlib.table.
This method provides an easy way to visualize tabular data within a Matplotlib
figure. It automatically extracts index and column labels from the DataFrame
or Series, unless explicitly specified. This function is particularly useful
when displaying summary tables alongside other plots or when creating static
reports. It utilizes the `matplotlib.pyplot.table` backend and allows
customization through various styling options available in Matplotlib.
Parameters
----------
ax : Matplotlib axes object
The axes on which to draw the table.
data : DataFrame or Series
Data for table contents.
**kwargs
Keyword arguments to be passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
names will be used.
Returns
-------
matplotlib table object
The created table as a matplotlib Table object.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.pyplot.table : Create a table from data in a Matplotlib plot.
Examples
--------
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> fig, ax = plt.subplots()
>>> ax.axis("off")
(np.float64(0.0), np.float64(1.0), np.float64(0.0), np.float64(1.0))
>>> table = pd.plotting.table(
... ax, df, loc="center", cellLoc="center", colWidths=[0.2, 0.2]
... )
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.table(
ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs
)
|
Helper function to convert DataFrame and Series to matplotlib.table.
This method provides an easy way to visualize tabular data within a Matplotlib
figure. It automatically extracts index and column labels from the DataFrame
or Series, unless explicitly specified. This function is particularly useful
when displaying summary tables alongside other plots or when creating static
reports. It utilizes the `matplotlib.pyplot.table` backend and allows
customization through various styling options available in Matplotlib.
Parameters
----------
ax : Matplotlib axes object
The axes on which to draw the table.
data : DataFrame or Series
Data for table contents.
**kwargs
Keyword arguments to be passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
names will be used.
Returns
-------
matplotlib table object
The created table as a matplotlib Table object.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.pyplot.table : Create a table from data in a Matplotlib plot.
Examples
--------
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> fig, ax = plt.subplots()
>>> ax.axis("off")
(np.float64(0.0), np.float64(1.0), np.float64(0.0), np.float64(1.0))
>>> table = pd.plotting.table(
... ax, df, loc="center", cellLoc="center", colWidths=[0.2, 0.2]
... )
|
python
|
pandas/plotting/_misc.py
| 32
|
[
"ax",
"data"
] |
Table
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
equals
|
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof Map.Entry<?, ?>) {
final Map.Entry<?, ?> other = (Map.Entry<?, ?>) obj;
return Objects.equals(getKey(), other.getKey())
&& Objects.equals(getValue(), other.getValue());
}
return false;
}
|
Compares this pair to another based on the two elements.
@param obj the object to compare to, null returns false.
@return true if the elements of the pair are equal.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Pair.java
| 167
|
[
"obj"
] | true
| 4
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getAsText
|
@Override
public String getAsText() {
InputSource value = (InputSource) getValue();
return (value != null ? value.getSystemId() : "");
}
|
Create a new InputSourceEditor,
using the given ResourceEditor underneath.
@param resourceEditor the ResourceEditor to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/InputSourceEditor.java
| 80
|
[] |
String
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_sanity_check
|
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - float32(2.0)) < 1e-5:
raise AssertionError
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__)) from None
|
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
|
python
|
numpy/__init__.py
| 794
|
[] | false
| 2
| 6.24
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
convertIfNecessary
|
<T> @Nullable T convertIfNecessary(@Nullable Object value, @Nullable Class<T> requiredType, @Nullable Field field)
throws TypeMismatchException;
|
Convert the value to the required type (if necessary from a String).
<p>Conversions from String to any type will typically use the {@code setAsText}
method of the PropertyEditor class, or a Spring Converter in a ConversionService.
@param value the value to convert
@param requiredType the type we must convert to
(or {@code null} if not known, for example in case of a collection element)
@param field the reflective field that is the target of the conversion
(for analysis of generic types; may be {@code null})
@return the new value, possibly the result of type conversion
@throws TypeMismatchException if type conversion failed
@see java.beans.PropertyEditor#setAsText(String)
@see java.beans.PropertyEditor#getValue()
@see org.springframework.core.convert.ConversionService
@see org.springframework.core.convert.converter.Converter
|
java
|
spring-beans/src/main/java/org/springframework/beans/TypeConverter.java
| 92
|
[
"value",
"requiredType",
"field"
] |
T
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getJarEntry
|
private JarEntry getJarEntry(URL jarFileUrl) throws IOException {
if (this.entryName == null) {
return null;
}
JarEntry jarEntry = this.jarFile.getJarEntry(this.entryName);
if (jarEntry == null) {
jarFiles.closeIfNotCached(jarFileUrl, this.jarFile);
throwFileNotFound();
}
return jarEntry;
}
|
The {@link URLClassLoader} connects often to check if a resource exists, we can
save some object allocations by using the cached copy if we have one.
@param jarFileURL the jar file to check
@param entryName the entry name to check
@throws FileNotFoundException on a missing entry
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrlConnection.java
| 312
|
[
"jarFileUrl"
] |
JarEntry
| true
| 3
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getHelp
|
public String getHelp() {
if (this.help == null) {
getParser().formatHelpWith(new BuiltinHelpFormatter(80, 2));
OutputStream out = new ByteArrayOutputStream();
try {
getParser().printHelpOn(out);
}
catch (IOException ex) {
return "Help not available";
}
this.help = out.toString().replace(" --cp ", " -cp ");
}
return this.help;
}
|
Run the command using the specified parsed {@link OptionSet}.
@param options the parsed option set
@return an ExitStatus
@throws Exception in case of errors
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/options/OptionHandler.java
| 119
|
[] |
String
| true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_broadcast_shape
|
def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:64])
# unfortunately, it cannot handle 64 or more arguments directly
for pos in range(64, len(args), 63):
# ironically, np.broadcast does not properly handle np.broadcast
# objects (it treats them as scalars)
# use broadcasting to avoid allocating the full array
b = broadcast_to(0, b.shape)
b = np.broadcast(b, *args[pos:(pos + 63)])
return b.shape
|
Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
|
python
|
numpy/lib/_stride_tricks_impl.py
| 446
|
[] | false
| 2
| 6.4
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
start_crawler
|
def start_crawler(self, crawler_name: str) -> dict:
"""
Triggers the AWS Glue Crawler.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.start_crawler`
:param crawler_name: unique crawler name per AWS account
:return: Empty dictionary
"""
self.log.info("Starting crawler %s", crawler_name)
return self.glue_client.start_crawler(Name=crawler_name)
|
Triggers the AWS Glue Crawler.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.start_crawler`
:param crawler_name: unique crawler name per AWS account
:return: Empty dictionary
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_crawler.py
| 160
|
[
"self",
"crawler_name"
] |
dict
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
isOriginIPPotentiallyTrustworthy
|
function isOriginIPPotentiallyTrustworthy (origin) {
// IPv6
if (origin.includes(':')) {
// Remove brackets from IPv6 addresses
if (origin[0] === '[' && origin[origin.length - 1] === ']') {
origin = origin.slice(1, -1)
}
return isPotentiallyTrustworthyIPv6(origin)
}
// IPv4
return isPotentialleTrustworthyIPv4(origin)
}
|
Check if host matches one of the CIDR notations 127.0.0.0/8 or ::1/128.
@param {string} origin
@returns {boolean}
|
javascript
|
deps/undici/src/lib/web/fetch/util.js
| 569
|
[
"origin"
] | false
| 4
| 6.4
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
of
|
public static <T> Bindable<T> of(Class<T> type) {
Assert.notNull(type, "'type' must not be null");
return of(ResolvableType.forClass(type));
}
|
Create a new {@link Bindable} of the specified type.
@param <T> the source type
@param type the type (must not be {@code null})
@return a {@link Bindable} instance
@see #of(ResolvableType)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 270
|
[
"type"
] | true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.