function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
logDeprecatedBean
|
protected void logDeprecatedBean(String beanName, Class<?> beanType, BeanDefinition beanDefinition) {
StringBuilder builder = new StringBuilder();
builder.append(beanType);
builder.append(" ['");
builder.append(beanName);
builder.append('\'');
String resourceDescription = beanDefinition.getResourceDescription();
if (StringUtils.hasText(resourceDescription)) {
builder.append(" in ");
builder.append(resourceDescription);
}
builder.append("] has been deprecated");
writeToLog(builder.toString());
}
|
Logs a warning for a bean annotated with {@link Deprecated @Deprecated}.
@param beanName the name of the deprecated bean
@param beanType the user-specified type of the deprecated bean
@param beanDefinition the definition of the deprecated bean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/DeprecatedBeanWarner.java
| 81
|
[
"beanName",
"beanType",
"beanDefinition"
] |
void
| true
| 2
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
transformObjectLiteralMembersSortText
|
function transformObjectLiteralMembersSortText(start: number): void {
for (let i = start; i < symbols.length; i++) {
const symbol = symbols[i];
const symbolId = getSymbolId(symbol);
const origin = symbolToOriginInfoMap?.[i];
const target = getEmitScriptTarget(compilerOptions);
const displayName = getCompletionEntryDisplayNameForSymbol(
symbol,
target,
origin,
CompletionKind.ObjectPropertyDeclaration,
/*jsxIdentifierExpected*/ false,
);
if (displayName) {
const originalSortText = symbolToSortTextMap[symbolId] ?? SortText.LocationPriority;
const { name } = displayName;
symbolToSortTextMap[symbolId] = SortText.ObjectLiteralProperty(originalSortText, name);
}
}
}
|
Filters out completion suggestions for named imports or exports.
@returns Symbols to be suggested in an object binding pattern or object literal expression, barring those whose declarations
do not occur at the current position and have not otherwise been typed.
|
typescript
|
src/services/completions.ts
| 5,215
|
[
"start"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
requireBracketsForIPv6
|
@CanIgnoreReturnValue
public HostAndPort requireBracketsForIPv6() {
checkArgument(!hasBracketlessColons, "Possible bracketless IPv6 literal: %s", host);
return this;
}
|
Generate an error if the host might be a non-bracketed IPv6 literal.
<p>URI formatting requires that IPv6 literals be surrounded by brackets, like "[2001:db8::1]".
Chain this call after {@link #fromString(String)} to increase the strictness of the parser, and
disallow IPv6 literals that don't contain these brackets.
<p>Note that this parser identifies IPv6 literals solely based on the presence of a colon. To
perform actual validation of IP addresses, see the {@link InetAddresses#forString(String)}
method.
@return {@code this}, to enable chaining of calls.
@throws IllegalArgumentException if bracketless IPv6 is detected.
|
java
|
android/guava/src/com/google/common/net/HostAndPort.java
| 271
|
[] |
HostAndPort
| true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
calculateDeadlineMs
|
static long calculateDeadlineMs(final long currentTimeMs, final long timeoutMs) {
if (currentTimeMs > Long.MAX_VALUE - timeoutMs)
return Long.MAX_VALUE;
else
return currentTimeMs + timeoutMs;
}
|
Calculate the deadline timestamp based on the current time and timeout.
@param currentTimeMs Current time, in milliseconds
@param timeoutMs Timeout, in milliseconds
@return Absolute time by which event should be completed
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java
| 121
|
[
"currentTimeMs",
"timeoutMs"
] | true
| 2
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
localeLookupList
|
public static List<Locale> localeLookupList(final Locale locale) {
return localeLookupList(locale, locale);
}
|
Obtains the list of locales to search through when performing a locale search.
<pre>
localeLookupList(Locale("fr", "CA", "xxx"))
= [Locale("fr", "CA", "xxx"), Locale("fr", "CA"), Locale("fr")]
</pre>
@param locale the locale to start from.
@return the unmodifiable list of Locale objects, 0 being locale, not null.
|
java
|
src/main/java/org/apache/commons/lang3/LocaleUtils.java
| 268
|
[
"locale"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
range
|
public static IntStream range(final int endExclusive) {
return IntStream.range(0, endExclusive);
}
|
Shorthand for {@code IntStream.range(0, i)}.
@param endExclusive the exclusive upper bound.
@return a sequential {@link IntStream} for the range of {@code int} elements.
|
java
|
src/main/java/org/apache/commons/lang3/stream/IntStreams.java
| 49
|
[
"endExclusive"
] |
IntStream
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getExpireAfterAccessNanos
|
@SuppressWarnings("GoodTime") // nanos internally, should be Duration
long getExpireAfterAccessNanos() {
return (expireAfterAccessNanos == UNSET_INT)
? DEFAULT_EXPIRATION_NANOS
: expireAfterAccessNanos;
}
|
Specifies that each entry should be automatically removed from the cache once a fixed duration
has elapsed after the entry's creation, the most recent replacement of its value, or its last
access. Access time is reset by all cache read and write operations (including {@code
Cache.asMap().get(Object)} and {@code Cache.asMap().put(K, V)}), but not by {@code
containsKey(Object)}, nor by operations on the collection-views of {@link Cache#asMap}. So, for
example, iterating through {@code Cache.asMap().entrySet()} does not reset access time for the
entries you retrieve.
<p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long)
maximumSize}{@code (0)}, ignoring any otherwise-specified maximum size or weight. This can be
useful in testing, or to disable caching temporarily without a code change.
<p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or
write operations. Expired entries are cleaned up as part of the routine maintenance described
in the class javadoc.
<p>If you can represent the duration as a {@link Duration} (which should be preferred when
feasible), use {@link #expireAfterAccess(Duration)} instead.
@param duration the length of time after an entry is last accessed that it should be
automatically removed
@param unit the unit that {@code duration} is expressed in
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalArgumentException if {@code duration} is negative
@throws IllegalStateException if {@link #expireAfterAccess} was already set
@deprecated Use {@link #expireAfterAccess(Duration)} instead.
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 848
|
[] | true
| 2
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
|
getWriteMethodParameter
|
public static MethodParameter getWriteMethodParameter(PropertyDescriptor pd) {
if (pd instanceof GenericTypeAwarePropertyDescriptor gpd) {
return new MethodParameter(gpd.getWriteMethodParameter());
}
else {
Method writeMethod = pd.getWriteMethod();
Assert.state(writeMethod != null, "No write method available");
return new MethodParameter(writeMethod, 0);
}
}
|
Obtain a new MethodParameter object for the write method of the
specified property.
@param pd the PropertyDescriptor for the property
@return a corresponding MethodParameter object
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 633
|
[
"pd"
] |
MethodParameter
| true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
run
|
@Override
public void run() {
InterruptibleTask<?> localTask = task;
if (localTask != null) {
localTask.run();
}
/*
* In the Async case, we may have called setFuture(pendingFuture), in which case afterDone()
* won't have been called yet.
*/
this.task = null;
}
|
Creates a {@code ListenableFutureTask} that will upon running, execute the given {@code
Runnable}, and arrange that {@code get} will return the given result on successful completion.
@param runnable the runnable task
@param result the result to return on successful completion. If you don't need a particular
result, consider using constructions of the form: {@code ListenableFuture<?> f =
ListenableFutureTask.create(runnable, null)}
|
java
|
android/guava/src/com/google/common/util/concurrent/TrustedListenableFutureTask.java
| 76
|
[] |
void
| true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
buildMessage
|
private static String buildMessage(@Nullable ConfigurationPropertyName name, Bindable<?> target) {
StringBuilder message = new StringBuilder();
message.append("Failed to bind properties");
message.append((name != null) ? " under '" + name + "'" : "");
message.append(" to ").append(target.getType());
return message.toString();
}
|
Return the configuration property name of the item that was being bound.
@return the configuration property name
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindException.java
| 78
|
[
"name",
"target"
] |
String
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
registerMetricForSubscription
|
@Override
public void registerMetricForSubscription(KafkaMetric metric) {
if (!metrics().containsKey(metric.metricName())) {
clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric));
} else {
log.debug("Skipping registration for metric {}. Existing producer metrics cannot be overwritten.", metric.metricName());
}
}
|
Add the provided application metric for subscription.
This metric will be added to this client's metrics
that are available for subscription and sent as
telemetry data to the broker.
The provided metric must map to an OTLP metric data point
type in the OpenTelemetry v1 metrics protobuf message types.
Specifically, the metric should be one of the following:
<ul>
<li>
`Sum`: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent.
</li>
<li>
`Gauge`: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count.
</li>
</ul>
Metrics not matching these types are silently ignored.
Executing this method for a previously registered metric is a benign operation and results in updating that metrics entry.
@param metric The application metric to register
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
| 1,406
|
[
"metric"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
setCount
|
@CanIgnoreReturnValue
@Override
public Builder<E> setCount(E element, int count) {
checkNotNull(element);
CollectPreconditions.checkNonnegative(count, "count");
maintenance();
elements[length] = element;
counts[length] = ~count;
length++;
return this;
}
|
Adds or removes the necessary occurrences of an element such that the element attains the
desired count.
@param element the element to add or remove occurrences of
@param count the desired count of the element in this multiset
@return this {@code Builder} object
@throws NullPointerException if {@code element} is null
@throws IllegalArgumentException if {@code count} is negative
|
java
|
android/guava/src/com/google/common/collect/ImmutableSortedMultiset.java
| 618
|
[
"element",
"count"
] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
canSendMore
|
public boolean canSendMore(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null || queue.isEmpty() ||
(queue.peekFirst().send.completed() && queue.size() < this.maxInFlightRequestsPerConnection);
}
|
Can we send more requests to this node?
@param node Node in question
@return true iff we have no requests still being sent to the given node
|
java
|
clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
| 96
|
[
"node"
] | true
| 4
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
expireDelegationToken
|
default ExpireDelegationTokenResult expireDelegationToken(byte[] hmac) {
return expireDelegationToken(hmac, new ExpireDelegationTokenOptions());
}
|
Expire a Delegation Token.
<p>
This is a convenience method for {@link #expireDelegationToken(byte[], ExpireDelegationTokenOptions)} with default options.
This will expire the token immediately. See the overload for more details.
@param hmac HMAC of the Delegation token
@return The ExpireDelegationTokenResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 792
|
[
"hmac"
] |
ExpireDelegationTokenResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
unstack
|
def unstack(
self,
level: IndexLabel = -1,
fill_value: Hashable | None = None,
sort: bool = True,
) -> DataFrame:
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN values.
sort : bool, default True
Sort the level(s) in the resulting MultiIndex columns.
Returns
-------
DataFrame
Unstacked Series.
See Also
--------
DataFrame.unstack : Pivot the MultiIndex of a DataFrame.
Notes
-----
Reference :ref:`the user guide <reshaping.stacking>` for more examples.
Examples
--------
>>> s = pd.Series(
... [1, 2, 3, 4],
... index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]]),
... )
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value, sort)
|
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN values.
sort : bool, default True
Sort the level(s) in the resulting MultiIndex columns.
Returns
-------
DataFrame
Unstacked Series.
See Also
--------
DataFrame.unstack : Pivot the MultiIndex of a DataFrame.
Notes
-----
Reference :ref:`the user guide <reshaping.stacking>` for more examples.
Examples
--------
>>> s = pd.Series(
... [1, 2, 3, 4],
... index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]]),
... )
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
|
python
|
pandas/core/series.py
| 4,346
|
[
"self",
"level",
"fill_value",
"sort"
] |
DataFrame
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
refactor_levels
|
def refactor_levels(
level: Level | list[Level] | None,
obj: Index,
) -> list[int]:
"""
Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``.
Parameters
----------
level : int, str, list
Original ``level`` arg supplied to above methods.
obj:
Either ``self.index`` or ``self.columns``
Returns
-------
list : refactored arg with a list of levels to hide
"""
if level is None:
levels_: list[int] = list(range(obj.nlevels))
elif isinstance(level, int):
levels_ = [level]
elif isinstance(level, str):
levels_ = [obj._get_level_number(level)]
elif isinstance(level, list):
levels_ = [
obj._get_level_number(lev) if not isinstance(lev, int) else lev
for lev in level
]
else:
raise ValueError("`level` must be of type `int`, `str` or list of such")
return levels_
|
Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``.
Parameters
----------
level : int, str, list
Original ``level`` arg supplied to above methods.
obj:
Either ``self.index`` or ``self.columns``
Returns
-------
list : refactored arg with a list of levels to hide
|
python
|
pandas/io/formats/style_render.py
| 2,087
|
[
"level",
"obj"
] |
list[int]
| true
| 7
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
findAllAnnotationsOnBean
|
<A extends Annotation> Set<A> findAllAnnotationsOnBean(
String beanName, Class<A> annotationType, boolean allowFactoryBeanInit)
throws NoSuchBeanDefinitionException;
|
Find all {@link Annotation} instances of {@code annotationType} on the specified
bean, traversing its interfaces and superclasses if no annotation can be found on
the given class itself, as well as checking the bean's factory method (if any).
@param beanName the name of the bean to look for annotations on
@param annotationType the type of annotation to look for
(at class, interface or factory method level of the specified bean)
@param allowFactoryBeanInit whether a {@code FactoryBean} may get initialized
just for the purpose of determining its object type
@return the set of annotations of the given type found (potentially empty)
@throws NoSuchBeanDefinitionException if there is no bean with the given name
@since 6.0
@see #getBeanNamesForAnnotation(Class)
@see #findAnnotationOnBean(String, Class, boolean)
@see #getType(String, boolean)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/ListableBeanFactory.java
| 418
|
[
"beanName",
"annotationType",
"allowFactoryBeanInit"
] | true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
findPrimaryConstructor
|
@SuppressWarnings("unchecked")
public static <T> @Nullable Constructor<T> findPrimaryConstructor(Class<T> clazz) {
try {
KClass<T> kClass = JvmClassMappingKt.getKotlinClass(clazz);
KFunction<T> primaryCtor = KClasses.getPrimaryConstructor(kClass);
if (primaryCtor == null) {
return null;
}
if (KotlinDetector.isInlineClass(clazz)) {
Constructor<?>[] constructors = clazz.getDeclaredConstructors();
Assert.state(constructors.length == 1,
"Kotlin value classes annotated with @JvmInline are expected to have a single JVM constructor");
return (Constructor<T>) constructors[0];
}
Constructor<T> constructor = ReflectJvmMapping.getJavaConstructor(primaryCtor);
if (constructor == null) {
throw new IllegalStateException(
"Failed to find Java constructor for Kotlin primary constructor: " + clazz.getName());
}
return constructor;
}
catch (UnsupportedOperationException ex) {
return null;
}
}
|
Retrieve the Java constructor corresponding to the Kotlin primary constructor, if any.
@param clazz the {@link Class} of the Kotlin class
@see <a href="https://kotlinlang.org/docs/reference/classes.html#constructors">
https://kotlinlang.org/docs/reference/classes.html#constructors</a>
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 873
|
[
"clazz"
] | true
| 5
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
__call__
|
def __call__(self, iterable):
"""Dispatch the tasks and return the results.
Parameters
----------
iterable : iterable
Iterable containing tuples of (delayed_function, args, kwargs) that should
be consumed.
Returns
-------
results : list
List of results of the tasks.
"""
# Capture the thread-local scikit-learn configuration at the time
# Parallel.__call__ is issued since the tasks can be dispatched
# in a different thread depending on the backend and on the value of
# pre_dispatch and n_jobs.
config = get_config()
# In free-threading Python >= 3.14, warnings filters are managed through a
# ContextVar and warnings.filters is not modified inside a
# warnings.catch_warnings context. You need to use warnings._get_filters().
# For more details, see
# https://docs.python.org/3.14/whatsnew/3.14.html#concurrent-safe-warnings-control
filters_func = getattr(warnings, "_get_filters", None)
warning_filters = (
filters_func() if filters_func is not None else warnings.filters
)
iterable_with_config_and_warning_filters = (
(
_with_config_and_warning_filters(delayed_func, config, warning_filters),
args,
kwargs,
)
for delayed_func, args, kwargs in iterable
)
return super().__call__(iterable_with_config_and_warning_filters)
|
Dispatch the tasks and return the results.
Parameters
----------
iterable : iterable
Iterable containing tuples of (delayed_function, args, kwargs) that should
be consumed.
Returns
-------
results : list
List of results of the tasks.
|
python
|
sklearn/utils/parallel.py
| 54
|
[
"self",
"iterable"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
all
|
public KafkaFuture<Map<ConfigResource, Config>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0])).
thenApply(v -> {
Map<ConfigResource, Config> configs = new HashMap<>(futures.size());
for (Map.Entry<ConfigResource, KafkaFuture<Config>> entry : futures.entrySet()) {
try {
configs.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures
// completed successfully.
throw new RuntimeException(e);
}
}
return configs;
});
}
|
Return a future which succeeds only if all the config descriptions succeed.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java
| 50
|
[] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof FeatureUpdate)) {
return false;
}
final FeatureUpdate that = (FeatureUpdate) other;
return this.maxVersionLevel == that.maxVersionLevel && this.upgradeType.equals(that.upgradeType);
}
|
@param maxVersionLevel The new maximum version level for the finalized feature.
a value of zero is special and indicates that the update is intended to
delete the finalized feature, and should be accompanied by setting
the upgradeType to safe or unsafe.
@param upgradeType Indicate what kind of upgrade should be performed in this operation.
- UPGRADE: upgrading the feature level
- SAFE_DOWNGRADE: only downgrades which do not result in metadata loss are permitted
- UNSAFE_DOWNGRADE: any downgrade, including those which may result in metadata loss, are permitted
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/FeatureUpdate.java
| 88
|
[
"other"
] | true
| 4
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
concat
|
function concat() {
var length = arguments.length;
if (!length) {
return [];
}
var args = Array(length - 1),
array = arguments[0],
index = length;
while (index--) {
args[index - 1] = arguments[index];
}
return arrayPush(isArray(array) ? copyArray(array) : [array], baseFlatten(args, 1));
}
|
Creates a new array concatenating `array` with any additional arrays
and/or values.
@static
@memberOf _
@since 4.0.0
@category Array
@param {Array} array The array to concatenate.
@param {...*} [values] The values to concatenate.
@returns {Array} Returns the new concatenated array.
@example
var array = [1];
var other = _.concat(array, 2, [3], [[4]]);
console.log(other);
// => [1, 2, 3, [4]]
console.log(array);
// => [1]
|
javascript
|
lodash.js
| 7,014
|
[] | false
| 4
| 8.56
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
notEmpty
|
public static <T extends Collection<?>> T notEmpty(final T collection) {
return notEmpty(collection, DEFAULT_NOT_EMPTY_COLLECTION_EX_MESSAGE);
}
|
<p>Validates that the specified argument collection is neither {@code null}
nor a size of zero (no elements); otherwise throwing an exception.
<pre>Validate.notEmpty(myCollection);</pre>
<p>The message in the exception is "The validated collection is
empty".
@param <T> the collection type.
@param collection the collection to check, validated not null by this method.
@return the validated collection (never {@code null} method for chaining).
@throws NullPointerException if the collection is {@code null}.
@throws IllegalArgumentException if the collection is empty.
@see #notEmpty(Collection, String, Object...)
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 827
|
[
"collection"
] |
T
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
check_for_prefix
|
def check_for_prefix(self, prefix: str, delimiter: str, bucket_name: str | None = None) -> bool:
"""
Check that a prefix exists in a bucket.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:return: False if the prefix does not exist in the bucket and True if it does.
"""
if not prefix.endswith(delimiter):
prefix += delimiter
prefix_split = re.split(rf"(\w+[{delimiter}])$", prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return prefix in plist
|
Check that a prefix exists in a bucket.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:return: False if the prefix does not exist in the bucket and True if it does.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 367
|
[
"self",
"prefix",
"delimiter",
"bucket_name"
] |
bool
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
removeRequest
|
@Override
void removeRequest() {
if (!unsentOffsetCommitRequests().remove(this)) {
log.warn("OffsetCommit request to remove not found in the outbound buffer: {}", this);
}
}
|
Handle OffsetCommitResponse. This will complete the request future successfully if no
errors are found in the response. If the response contains errors, this will:
- handle expected errors and fail the future with specific exceptions depending on the error
- fail the future with a non-recoverable KafkaException for all unexpected errors (even if retriable)
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 862
|
[] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
toChar
|
public static char toChar(final Character ch, final char defaultValue) {
return ch != null ? ch.charValue() : defaultValue;
}
|
Converts the Character to a char handling {@code null}.
<pre>
CharUtils.toChar(null, 'X') = 'X'
CharUtils.toChar(' ', 'X') = ' '
CharUtils.toChar('A', 'X') = 'A'
</pre>
@param ch the character to convert
@param defaultValue the value to use if the Character is null
@return the char value of the Character or the default if null
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 296
|
[
"ch",
"defaultValue"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkIndexBounds
|
private static void checkIndexBounds(long index) {
assert index >= MIN_INDEX && index <= MAX_INDEX : "index must be in range [" + MIN_INDEX + ".." + MAX_INDEX + "]";
}
|
Provides the index of the bucket of the exponential histogram with the given scale that contains the provided value.
@param value the value to find the bucket for
@param scale the scale of the histogram
@return the index of the bucket
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialScaleUtils.java
| 283
|
[
"index"
] |
void
| true
| 2
| 8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
handleExitCode
|
private void handleExitCode(@Nullable ConfigurableApplicationContext context, Throwable exception) {
int exitCode = getExitCodeFromException(context, exception);
if (exitCode != 0) {
if (context != null) {
context.publishEvent(new ExitCodeEvent(context, exitCode));
}
SpringBootExceptionHandler handler = getSpringBootExceptionHandler();
if (handler != null) {
handler.registerExitCode(exitCode);
}
}
}
|
Register that the given exception has been logged. By default, if the running in
the main thread, this method will suppress additional printing of the stacktrace.
@param exception the exception that was logged
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 881
|
[
"context",
"exception"
] |
void
| true
| 4
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
uppercase
|
public static String uppercase(String value) {
return UppercaseProcessor.apply(value);
}
|
Uses {@link UppercaseProcessor} to convert a string to its uppercase
equivalent.
@param value string to convert
@return uppercase equivalent
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java
| 51
|
[
"value"
] |
String
| true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
grouped_reduce
|
def grouped_reduce(self, func: Callable) -> Self:
"""
Apply grouped reduction function blockwise, returning a new BlockManager.
Parameters
----------
func : grouped reduction function
Returns
-------
BlockManager
"""
result_blocks: list[Block] = []
for blk in self.blocks:
if blk.is_object:
# split on object-dtype blocks bc some columns may raise
# while others do not.
for sb in blk._split():
applied = sb.apply(func)
result_blocks = extend_blocks(applied, result_blocks)
else:
applied = blk.apply(func)
result_blocks = extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
nrows = 0
else:
nrows = result_blocks[0].values.shape[-1]
index = default_index(nrows)
return type(self).from_blocks(result_blocks, [self.axes[0], index])
|
Apply grouped reduction function blockwise, returning a new BlockManager.
Parameters
----------
func : grouped reduction function
Returns
-------
BlockManager
|
python
|
pandas/core/internals/managers.py
| 1,597
|
[
"self",
"func"
] |
Self
| true
| 7
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
closeBeanFactory
|
@Override
protected final void closeBeanFactory() {
DefaultListableBeanFactory beanFactory = this.beanFactory;
if (beanFactory != null) {
beanFactory.setSerializationId(null);
this.beanFactory = null;
}
}
|
This implementation performs an actual refresh of this context's underlying
bean factory, shutting down the previous bean factory (if any) and
initializing a fresh bean factory for the next phase of the context's lifecycle.
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractRefreshableApplicationContext.java
| 146
|
[] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_na_for_min_count
|
def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray:
"""
Return the missing value for `values`.
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction, required if values.ndim > 1.
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
"""
# we either return np.nan or pd.NaT
if values.dtype.kind in "iufcb":
values = values.astype("float64")
fill_value = na_value_for_dtype(values.dtype)
if values.ndim == 1:
return fill_value
elif axis is None:
return fill_value
else:
result_shape = values.shape[:axis] + values.shape[axis + 1 :]
return np.full(result_shape, fill_value, dtype=values.dtype)
|
Return the missing value for `values`.
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction, required if values.ndim > 1.
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
|
python
|
pandas/core/nanops.py
| 419
|
[
"values",
"axis"
] |
Scalar | np.ndarray
| true
| 5
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
tokenize_string
|
def tokenize_string(source: str) -> Iterator[tuple[int, str]]:
"""
Tokenize a Python source code string.
Parameters
----------
source : str
The Python source code string.
Returns
-------
tok_generator : Iterator[Tuple[int, str]]
An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).
"""
# GH 59285
# Escape characters, including backticks
source = "".join(
(
create_valid_python_identifier(substring[1:-1])
if is_backtick_quoted
else substring
)
for is_backtick_quoted, substring in _split_by_backtick(source)
)
line_reader = StringIO(source).readline
token_generator = tokenize.generate_tokens(line_reader)
for toknum, tokval, _, _, _ in token_generator:
yield toknum, tokval
|
Tokenize a Python source code string.
Parameters
----------
source : str
The Python source code string.
Returns
-------
tok_generator : Iterator[Tuple[int, str]]
An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).
|
python
|
pandas/core/computation/parsing.py
| 223
|
[
"source"
] |
Iterator[tuple[int, str]]
| true
| 3
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
optBoolean
|
public boolean optBoolean(String name, boolean fallback) {
Object object = opt(name);
Boolean result = JSON.toBoolean(object);
return result != null ? result : fallback;
}
|
Returns the value mapped by {@code name} if it exists and is a boolean or can be
coerced to a boolean. Returns {@code fallback} otherwise.
@param name the name of the property
@param fallback a fallback value
@return the value or {@code fallback}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 421
|
[
"name",
"fallback"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
createServiceLocatorException
|
protected Exception createServiceLocatorException(Constructor<Exception> exceptionConstructor, BeansException cause) {
Class<?>[] paramTypes = exceptionConstructor.getParameterTypes();
@Nullable Object[] args = new Object[paramTypes.length];
for (int i = 0; i < paramTypes.length; i++) {
if (String.class == paramTypes[i]) {
args[i] = cause.getMessage();
}
else if (paramTypes[i].isInstance(cause)) {
args[i] = cause;
}
}
return BeanUtils.instantiateClass(exceptionConstructor, args);
}
|
Create a service locator exception for the given cause.
Only called in case of a custom service locator exception.
<p>The default implementation can handle all variations of
message and exception arguments.
@param exceptionConstructor the constructor to use
@param cause the cause of the service lookup failure
@return the service locator exception to throw
@see #setServiceLocatorExceptionClass
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/ServiceLocatorFactoryBean.java
| 312
|
[
"exceptionConstructor",
"cause"
] |
Exception
| true
| 4
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
updateAutoCommitTimer
|
private void updateAutoCommitTimer(final long currentTimeMs) {
this.autoCommitState.ifPresent(t -> t.updateTimer(currentTimeMs));
}
|
Enqueue a request to fetch committed offsets, that will be sent on the next call to {@link #poll(long)}.
@param partitions Partitions to fetch offsets for.
@param deadlineMs Time until which the request should be retried if it fails
with expected retriable errors.
@return Future that will complete when a successful response is received, or the request
fails and cannot be retried. Note that the request is retried whenever it fails with
retriable expected error and the retry time hasn't expired.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 577
|
[
"currentTimeMs"
] |
void
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
read
|
private int read(ByteBuffer dst, int pos) {
int remaining = dst.remaining();
int length = Math.min(this.bytes.length - pos, remaining);
if (this.maxReadSize > 0 && length > this.maxReadSize) {
length = this.maxReadSize;
}
dst.put(this.bytes, pos, length);
return length;
}
|
Create a new {@link ByteArrayDataBlock} backed by the given bytes.
@param bytes the bytes to use
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ByteArrayDataBlock.java
| 56
|
[
"dst",
"pos"
] | true
| 3
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
benchmark_gpu
|
def benchmark_gpu( # type: ignore[override]
self: Self,
_callable: Callable[[], Any],
estimation_iters: int = 5,
memory_warmup_iters: int = 100,
benchmark_iters: int = 100,
max_benchmark_duration: int = 25,
return_mode: str = "min",
grad_to_none: list[torch.Tensor] | None = None,
is_vetted_benchmarking: bool = False,
**kwargs: Any,
) -> float | list[float]:
"""Benchmark a GPU callable using a custom benchmarking implementation.
Arguments:
- _callable: The callable to benchmark.
Keyword Arguments:
- estimation_iters: Optionally, the number of iterations to run `_callable`
during runtime estimation.
- memory_warmup_iters: Optionally, the number of iterations to flush the L2
cache before starting benchmarking.
- benchmark_iters: Optionally, the number of iterations to run `_callable`
during the benchmarking.
- max_benchmark_duration: Optionally, the maximum duration of the benchmarking,
in milliseconds. An estimated duration is calculated based on the values
of `memory_warmup_iters` and `benchmark_iters`, along with the estimated
runtime of `_callable` and various other factors, and we then shrink
`benchmark_iters` to fit in the allotted maximum duration.
- return_mode: Return mode for benchmark results. Options are "min" (default),
"all" (returns all measurements).
- grad_to_none: Optionally, a list of tensors whose gradients should be cleared
before each benchmark iteration.
- is_vetted_benchmarking: in deterministic mode, we only allow
benchmarking in vetted cases.
- **kwargs: Additional kwargs that may be passed to the fallback.
Returns:
- If return_mode="min": The minimum runtime of `_callable`, in milliseconds.
- If return_mode="all": List of all runtime measurements, in milliseconds.
"""
if not is_vetted_benchmarking:
may_ban_benchmarking()
# we don't want any outside errors propagating into benchmarking
torch.cuda.synchronize()
# warmup `_callable` (and catches any failures in the process)
_callable()
torch.cuda.synchronize()
# see https://github.com/triton-lang/triton/pull/840 for why `dtype=torch.int`
buffer = torch.empty(self.L2_cache_size // 4, dtype=torch.int, device="cuda")
buffer.zero_()
# estimate the runtime of `_callable`
event_pairs = self.get_event_pairs(estimation_iters)
for start_event, end_event in event_pairs:
# Clear gradients before timing (matches triton.testing.do_bench)
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
buffer.zero_()
start_event.record()
_callable()
end_event.record()
torch.cuda.synchronize()
estimated_timing = self.get_event_pairs_min_timing(event_pairs)
# adjust `benchmark_iters` to fit in the maximum benchmarking duration
benchmark_iters = max(
min(benchmark_iters, int(max_benchmark_duration // estimated_timing)), 1
)
# do the memory warmup
for _ in range(memory_warmup_iters):
buffer.zero_()
# benchmark `_callable`
event_pairs = self.get_event_pairs(benchmark_iters)
for start_event, end_event in event_pairs:
# Clear gradients before timing (matches triton.testing.do_bench)
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
buffer.zero_()
start_event.record()
_callable()
end_event.record()
torch.cuda.synchronize()
# explicitly delete the buffer, sometimes helps memory
# footprint metrics in OSS Inductor performance benchmarks
del buffer
# Return based on the requested mode
if return_mode == "all":
# Get all timings from event pairs
all_timings = [
start_event.elapsed_time(end_event)
for start_event, end_event in event_pairs
]
return all_timings
elif return_mode == "min":
benchmarked_timing = self.get_event_pairs_min_timing(event_pairs)
# return the minimum of `estimated_timing` and `benchmarked_timing`,
# we just want the minimum timing overall so we might as well check both
return min(estimated_timing, benchmarked_timing)
else:
raise ValueError(
f"Unsupported return_mode: {return_mode}. Use 'min' or 'all'."
)
|
Benchmark a GPU callable using a custom benchmarking implementation.
Arguments:
- _callable: The callable to benchmark.
Keyword Arguments:
- estimation_iters: Optionally, the number of iterations to run `_callable`
during runtime estimation.
- memory_warmup_iters: Optionally, the number of iterations to flush the L2
cache before starting benchmarking.
- benchmark_iters: Optionally, the number of iterations to run `_callable`
during the benchmarking.
- max_benchmark_duration: Optionally, the maximum duration of the benchmarking,
in milliseconds. An estimated duration is calculated based on the values
of `memory_warmup_iters` and `benchmark_iters`, along with the estimated
runtime of `_callable` and various other factors, and we then shrink
`benchmark_iters` to fit in the allotted maximum duration.
- return_mode: Return mode for benchmark results. Options are "min" (default),
"all" (returns all measurements).
- grad_to_none: Optionally, a list of tensors whose gradients should be cleared
before each benchmark iteration.
- is_vetted_benchmarking: in deterministic mode, we only allow
benchmarking in vetted cases.
- **kwargs: Additional kwargs that may be passed to the fallback.
Returns:
- If return_mode="min": The minimum runtime of `_callable`, in milliseconds.
- If return_mode="all": List of all runtime measurements, in milliseconds.
|
python
|
torch/_inductor/runtime/benchmarking.py
| 331
|
[
"self",
"_callable",
"estimation_iters",
"memory_warmup_iters",
"benchmark_iters",
"max_benchmark_duration",
"return_mode",
"grad_to_none",
"is_vetted_benchmarking"
] |
float | list[float]
| true
| 12
| 7.84
|
pytorch/pytorch
| 96,034
|
google
| false
|
maybeValidatePositionForCurrentLeader
|
public synchronized boolean maybeValidatePositionForCurrentLeader(ApiVersions apiVersions,
TopicPartition tp,
Metadata.LeaderAndEpoch leaderAndEpoch) {
TopicPartitionState state = assignedStateOrNull(tp);
if (state == null) {
log.debug("Skipping validating position for partition {} which is not currently assigned.", tp);
return false;
}
if (leaderAndEpoch.leader.isPresent()) {
NodeApiVersions nodeApiVersions = apiVersions.get(leaderAndEpoch.leader.get().idString());
if (nodeApiVersions == null || hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) {
return state.maybeValidatePosition(leaderAndEpoch);
} else {
// If the broker does not support a newer version of OffsetsForLeaderEpoch, we skip validation
state.updatePositionLeaderNoValidation(leaderAndEpoch);
return false;
}
} else {
return state.maybeValidatePosition(leaderAndEpoch);
}
}
|
Enter the offset validation state if the leader for this partition is known to support a usable version of the
OffsetsForLeaderEpoch API. If the leader node does not support the API, simply complete the offset validation.
@param apiVersions supported API versions
@param tp topic partition to validate
@param leaderAndEpoch leader epoch of the topic partition
@return true if we enter the offset validation state
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 542
|
[
"apiVersions",
"tp",
"leaderAndEpoch"
] | true
| 5
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
of
|
@SuppressWarnings("unchecked")
static <T> BindResult<T> of(@Nullable T value) {
if (value == null) {
return (BindResult<T>) UNBOUND;
}
return new BindResult<>(value);
}
|
Return the object that was bound, or throw an exception to be created by the
provided supplier if no value has been bound.
@param <X> the type of the exception to be thrown
@param exceptionSupplier the supplier which will return the exception to be thrown
@return the present value
@throws X if there is no value present
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindResult.java
| 150
|
[
"value"
] | true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
clearInFlightCorrelationId
|
private void clearInFlightCorrelationId() {
inFlightRequestCorrelationId = NO_INFLIGHT_REQUEST_CORRELATION_ID;
}
|
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
the lowest sequence number.
@return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
If there are no inflight requests being tracked for this partition, this method will return
RecordBatch.NO_SEQUENCE.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 979
|
[] |
void
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
unmodifiableOrNull
|
private static <K extends @Nullable Object, V extends @Nullable Object>
@Nullable Entry<K, V> unmodifiableOrNull(@Nullable Entry<K, ? extends V> entry) {
return (entry == null) ? null : unmodifiableEntry(entry);
}
|
Returns an unmodifiable view of the specified navigable map. Query operations on the returned
map read through to the specified map, and attempts to modify the returned map, whether direct
or via its views, result in an {@code UnsupportedOperationException}.
<p>The returned navigable map will be serializable if the specified navigable map is
serializable.
<p>This method's signature will not permit you to convert a {@code NavigableMap<? extends K,
V>} to a {@code NavigableMap<K, V>}. If it permitted this, the returned map's {@code
comparator()} method might return a {@code Comparator<? extends K>}, which works only on a
particular subtype of {@code K}, but promise that it's a {@code Comparator<? super K>}, which
must work on any type of {@code K}.
@param map the navigable map for which an unmodifiable view is to be returned
@return an unmodifiable view of the specified navigable map
@since 12.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 3,306
|
[
"entry"
] | true
| 2
| 7.84
|
google/guava
| 51,352
|
javadoc
| false
|
|
_try_get_metadata_from_dynamo
|
def _try_get_metadata_from_dynamo(
mod: torch.nn.Module,
param_keys: KeysView[str],
full_args_num: int,
full_args_descs: list[DifferentiableAOTInput],
) -> tuple[Optional[list[torch._guards.Source]], list[int]]:
"""
Metadata is forwarded from Dynamo to AOTDispatch via special fields on GraphModule.
We first verify that `mod` does come from Dynamo, then we handle cases where
metadata might be missing.
Returns:
aot_autograd_arg_pos_to_source: used to dedup params and their guards
static_input_indices: used to identify static inputs for cudagraphs
"""
# Note [Assumption on Dynamo Metadata]
# This function assumes a graph module from dynamo provides `dynamo_compiled_id`,
# _param_name_to_source, and every placeholder node has `_dynamo_source` attributes.
# When gm is modified (e.g., DDPOptimizer via split_module), metadata needs to
# be propagated in order to be recognized as a dynamo graph
if not (isinstance(mod, torch.fx.GraphModule) and "dynamo_compile_id" in mod.meta):
# graph was not captured by dynamo
return None, []
if not hasattr(mod, "_param_name_to_source"):
# is from export
static_input_indices = [
i
for i, node in enumerate(full_args_descs)
if isinstance(node, (ParamAOTInput, BufferAOTInput))
]
return None, static_input_indices
# We now know this came from dynamo, and (1) we care about guards,
# so setting up aot_autograd_arg_pos_to_source for downstream dedup guards
# can now be done safely. (2) Dynamo logic protects the 1:1 sizing below.
# Additionally, we mark static indices for cudagraphs.
param_name_to_source = mod._param_name_to_source
seen_sources = set()
aot_autograd_arg_pos_to_source = []
static_input_indices = []
# Collect the new inputs lifted by aotdispatch
for i, name in enumerate(param_keys):
assert name in param_name_to_source, f"{name} not found."
source = param_name_to_source[name]
assert source not in seen_sources, source
seen_sources.add(source)
aot_autograd_arg_pos_to_source.append(source)
static_input_indices.append(i)
# Collect the dynamo graph inputs
# TODO(mlazos): Revisit if this is still needed. With Dynamo install ID
# matched tensors back into the Fx graph, this might not be necessary.
for pos, node in enumerate(mod.graph.find_nodes(op="placeholder")):
assert hasattr(node, "_dynamo_source")
source = node._dynamo_source
# `source`` specifies the source from user code. ddp optimizer may have
# intermediate values becoming submodule placeholders which does not
# have a source
assert source is None or source not in seen_sources, source
seen_sources.add(source)
aot_autograd_arg_pos_to_source.append(source)
source_name = source.name if source else str(source)
# input[i] in dynamo is now:
# input[i + len(extra_params)] in AOT,
# where extra_params are the params/buffers that dynamo baked into the
# OutputGraph
actual_pos = pos + len(param_keys)
if "tensor_dict" in node.meta and node.meta["tensor_dict"].get(
"_dynamo_static_input_type", None
):
static_inputs_log.debug(
"Adding static input pos %s for source %s", actual_pos, source_name
)
static_input_indices.append(actual_pos)
else:
static_inputs_log.debug(
"Non-static input pos %s for source %s", actual_pos, source_name
)
assert full_args_num == len(aot_autograd_arg_pos_to_source)
return aot_autograd_arg_pos_to_source, static_input_indices
|
Metadata is forwarded from Dynamo to AOTDispatch via special fields on GraphModule.
We first verify that `mod` does come from Dynamo, then we handle cases where
metadata might be missing.
Returns:
aot_autograd_arg_pos_to_source: used to dedup params and their guards
static_input_indices: used to identify static inputs for cudagraphs
|
python
|
torch/_functorch/_aot_autograd/frontend_utils.py
| 111
|
[
"mod",
"param_keys",
"full_args_num",
"full_args_descs"
] |
tuple[Optional[list[torch._guards.Source]], list[int]]
| true
| 11
| 7.52
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
quantile
|
@Override
public double quantile(double q) {
if (q < 0 || q > 1) {
throw new IllegalArgumentException("q should be in [0,1], got " + q);
}
AVLGroupTree values = summary;
if (values.isEmpty()) {
// no centroids means no data, no way to get a quantile
return Double.NaN;
} else if (values.size() == 1) {
// with one data point, all quantiles lead to Rome
return values.iterator().next().mean();
}
// if values were stored in a sorted array, index would be the offset we are interested in
final double index = q * count;
// deal with min and max as a special case singletons
if (index <= 0) {
return min;
}
if (index >= count) {
return max;
}
int currentNode = values.first();
long currentWeight = values.count(currentNode);
// Total mass to the left of the center of the current node.
double weightSoFar = currentWeight / 2.0;
if (index <= weightSoFar && weightSoFar > 1) {
// Interpolate between min and first mean, if there's no singleton on the left boundary.
return weightedAverage(min, weightSoFar - index, values.mean(currentNode), index);
}
for (int i = 0; i < values.size() - 1; i++) {
int nextNode = values.next(currentNode);
long nextWeight = values.count(nextNode);
// this is the mass between current center and next center
double dw = (currentWeight + nextWeight) / 2.0;
if (index < weightSoFar + dw) {
// index is bracketed between centroids i and i+1
assert dw >= 1;
double w1 = index - weightSoFar;
double w2 = weightSoFar + dw - index;
return weightedAverage(values.mean(currentNode), w2, values.mean(nextNode), w1);
}
weightSoFar += dw;
currentNode = nextNode;
currentWeight = nextWeight;
}
// Index is close or after the last centroid.
assert currentWeight >= 1;
assert index - weightSoFar < count - currentWeight / 2.0;
assert count - weightSoFar >= 0.5;
// Interpolate between the last mean and the max.
double w1 = index - weightSoFar;
double w2 = currentWeight / 2.0 - w1;
return weightedAverage(values.mean(currentNode), w2, max, w1);
}
|
@param q The quantile desired. Can be in the range [0,1].
@return The minimum value x such that we think that the proportion of samples is ≤ x is q.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLTreeDigest.java
| 294
|
[
"q"
] | true
| 11
| 8.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
isEmpty
|
function isEmpty(value) {
if (value == null) {
return true;
}
if (isArrayLike(value) &&
(isArray(value) || typeof value == 'string' || typeof value.splice == 'function' ||
isBuffer(value) || isTypedArray(value) || isArguments(value))) {
return !value.length;
}
var tag = getTag(value);
if (tag == mapTag || tag == setTag) {
return !value.size;
}
if (isPrototype(value)) {
return !baseKeys(value).length;
}
for (var key in value) {
if (hasOwnProperty.call(value, key)) {
return false;
}
}
return true;
}
|
Checks if `value` is an empty object, collection, map, or set.
Objects are considered empty if they have no own enumerable string keyed
properties.
Array-like values such as `arguments` objects, arrays, buffers, strings, or
jQuery-like collections are considered empty if they have a `length` of `0`.
Similarly, maps and sets are considered empty if they have a `size` of `0`.
@static
@memberOf _
@since 0.1.0
@category Lang
@param {*} value The value to check.
@returns {boolean} Returns `true` if `value` is empty, else `false`.
@example
_.isEmpty(null);
// => true
_.isEmpty(true);
// => true
_.isEmpty(1);
// => true
_.isEmpty([1, 2, 3]);
// => false
_.isEmpty({ 'a': 1 });
// => false
|
javascript
|
lodash.js
| 11,586
|
[
"value"
] | false
| 13
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
drainUninterruptibly
|
@CanIgnoreReturnValue
@J2ktIncompatible
@GwtIncompatible // BlockingQueue
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static <E> int drainUninterruptibly(
BlockingQueue<E> q,
Collection<? super E> buffer,
int numElements,
long timeout,
TimeUnit unit) {
Preconditions.checkNotNull(buffer);
long deadline = System.nanoTime() + unit.toNanos(timeout);
int added = 0;
boolean interrupted = false;
try {
while (added < numElements) {
// we could rely solely on #poll, but #drainTo might be more efficient when there are
// multiple elements already available (e.g. LinkedBlockingQueue#drainTo locks only once)
added += q.drainTo(buffer, numElements - added);
if (added < numElements) { // not enough elements immediately available; will have to poll
E e; // written exactly once, by a successful (uninterrupted) invocation of #poll
while (true) {
try {
e = q.poll(deadline - System.nanoTime(), NANOSECONDS);
break;
} catch (InterruptedException ex) {
interrupted = true; // note interruption and retry
}
}
if (e == null) {
break; // we already waited enough, and there are no more elements in sight
}
buffer.add(e);
added++;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
return added;
}
|
Drains the queue as {@linkplain #drain(BlockingQueue, Collection, int, long, TimeUnit)}, but
with a different behavior in case it is interrupted while waiting. In that case, the operation
will continue as usual, and in the end the thread's interruption status will be set (no {@code
InterruptedException} is thrown).
@param q the blocking queue to be drained
@param buffer where to add the transferred elements
@param numElements the number of elements to be waited for
@param timeout how long to wait before giving up, in units of {@code unit}
@param unit a {@code TimeUnit} determining how to interpret the timeout parameter
@return the number of elements transferred
|
java
|
android/guava/src/com/google/common/collect/Queues.java
| 384
|
[
"q",
"buffer",
"numElements",
"timeout",
"unit"
] | true
| 7
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
consumingIterator
|
public static <T extends @Nullable Object> Iterator<T> consumingIterator(Iterator<T> iterator) {
checkNotNull(iterator);
return new UnmodifiableIterator<T>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
@ParametricNullness
public T next() {
T next = iterator.next();
iterator.remove();
return next;
}
@Override
public String toString() {
return "Iterators.consumingIterator(...)";
}
};
}
|
Returns a view of the supplied {@code iterator} that removes each element from the supplied
{@code iterator} as it is returned.
<p>The provided iterator must support {@link Iterator#remove()} or else the returned iterator
will fail on the first call to {@code next}. The returned {@link Iterator} is also not
thread-safe.
@param iterator the iterator to remove and return elements from
@return an iterator that removes and returns elements from the supplied iterator
@since 2.0
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 997
|
[
"iterator"
] | true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
calendarToZonedDateTime
|
private static ZonedDateTime calendarToZonedDateTime(Calendar source) {
if (source instanceof GregorianCalendar gc) {
return gc.toZonedDateTime();
}
else {
return Instant.ofEpochMilli(source.getTimeInMillis()).atZone(source.getTimeZone().toZoneId());
}
}
|
Install the converters into the converter registry.
@param registry the converter registry
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DateTimeConverters.java
| 79
|
[
"source"
] |
ZonedDateTime
| true
| 2
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
addMetricIfAbsent
|
public KafkaMetric addMetricIfAbsent(MetricName metricName, MetricConfig config, MetricValueProvider<?> metricValueProvider) {
KafkaMetric metric = new KafkaMetric(new Object(),
Objects.requireNonNull(metricName),
Objects.requireNonNull(metricValueProvider),
config == null ? this.config : config,
time);
KafkaMetric existingMetric = registerMetric(metric);
return existingMetric == null ? metric : existingMetric;
}
|
Create or get an existing metric to monitor an object that implements MetricValueProvider.
This metric won't be associated with any sensor. This is a way to expose existing values as metrics.
This method takes care of synchronisation while updating/accessing metrics by concurrent threads.
@param metricName The name of the metric
@param metricValueProvider The metric value provider associated with this metric
@return Existing KafkaMetric if already registered or else a newly created one
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
| 531
|
[
"metricName",
"config",
"metricValueProvider"
] |
KafkaMetric
| true
| 3
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
calculateNextColorValue
|
function calculateNextColorValue(
srcValue: ColorValue,
targetValue: ColorValue,
changeRate: number,
): ColorValue {
const nextColor: (string | number)[] = [srcValue.value[0]];
// Skip the first element since it represents the type.
for (let i = 1; i < targetValue.value.length; i++) {
const srcChannel = srcValue.value[i] as number;
const targetChannel = targetValue.value[i] as number;
const delta = calculateValueDelta(srcChannel, targetChannel, changeRate);
nextColor.push(Math.round(srcChannel + delta));
}
return {
type: 'color',
value: nextColor as typeof srcValue.value,
};
}
|
Calculate the next `CssPropertyValue` based on the source and a target one.
@param srcValue The source value
@param targetValue The target values (it's either the final or the initial value)
@param changeRate The change rate relative to the target (i.e. 1 = target value; 0 = source value)
@returns The newly generated value
|
typescript
|
adev/src/app/features/home/animation/calculations/calc-css-value.ts
| 98
|
[
"srcValue",
"targetValue",
"changeRate"
] | true
| 2
| 8.08
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
sum
|
def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.sum()
25
>>> x.sum(axis=1)
masked_array(data=[4, 5, 16],
mask=[False, False, False],
fill_value=999999)
>>> x.sum(axis=0)
masked_array(data=[8, 5, 12],
mask=[False, False, False],
fill_value=999999)
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<class 'numpy.int64'>
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
|
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.sum()
25
>>> x.sum(axis=1)
masked_array(data=[4, 5, 16],
mask=[False, False, False],
fill_value=999999)
>>> x.sum(axis=0)
masked_array(data=[8, 5, 12],
mask=[False, False, False],
fill_value=999999)
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<class 'numpy.int64'>
|
python
|
numpy/ma/core.py
| 5,196
|
[
"self",
"axis",
"dtype",
"out",
"keepdims"
] | false
| 7
| 6.08
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
open
|
public static ZipContent open(Path path) throws IOException {
return open(new Source(path.toAbsolutePath(), null));
}
|
Open {@link ZipContent} from the specified path. The resulting {@link ZipContent}
<em>must</em> be {@link #close() closed} by the caller.
@param path the zip path
@return a {@link ZipContent} instance
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 360
|
[
"path"
] |
ZipContent
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_waiter
|
def get_waiter(
self,
waiter_name: str,
parameters: dict[str, str] | None = None,
config_overrides: dict[str, Any] | None = None,
deferrable: bool = False,
client=None,
) -> Waiter:
"""
Get a waiter by name.
First checks if there is a custom waiter with the provided waiter_name and
uses that if it exists, otherwise it will check the service client for a
waiter that matches the name and pass that through.
If `deferrable` is True, the waiter will be an AIOWaiter, generated from the
client that is passed as a parameter. If `deferrable` is True, `client` must be
provided.
:param waiter_name: The name of the waiter. The name should exactly match the
name of the key in the waiter model file (typically this is CamelCase).
:param parameters: will scan the waiter config for the keys of that dict,
and replace them with the corresponding value. If a custom waiter has
such keys to be expanded, they need to be provided here.
Note: cannot be used if parameters are included in config_overrides
:param config_overrides: will update values of provided keys in the waiter's
config. Only specified keys will be updated.
:param deferrable: If True, the waiter is going to be an async custom waiter.
An async client must be provided in that case.
:param client: The client to use for the waiter's operations
"""
from airflow.providers.amazon.aws.waiters.base_waiter import BaseBotoWaiter
if deferrable and not client:
raise ValueError("client must be provided for a deferrable waiter.")
if parameters is not None and config_overrides is not None and "acceptors" in config_overrides:
raise ValueError('parameters must be None when "acceptors" is included in config_overrides')
# Currently, the custom waiter doesn't work with resource_type, only client_type is supported.
client = client or self._client
if self.waiter_path and (waiter_name in self._list_custom_waiters()):
# Technically if waiter_name is in custom_waiters then self.waiter_path must
# exist but MyPy doesn't like the fact that self.waiter_path could be None.
with open(self.waiter_path) as config_file:
config: dict = json.loads(config_file.read())
if config_overrides is not None:
config["waiters"][waiter_name].update(config_overrides)
config = self._apply_parameters_value(config, waiter_name, parameters)
return BaseBotoWaiter(client=client, model_config=config, deferrable=deferrable).waiter(
waiter_name
)
# If there is no custom waiter found for the provided name,
# then try checking the service's official waiters.
return client.get_waiter(waiter_name)
|
Get a waiter by name.
First checks if there is a custom waiter with the provided waiter_name and
uses that if it exists, otherwise it will check the service client for a
waiter that matches the name and pass that through.
If `deferrable` is True, the waiter will be an AIOWaiter, generated from the
client that is passed as a parameter. If `deferrable` is True, `client` must be
provided.
:param waiter_name: The name of the waiter. The name should exactly match the
name of the key in the waiter model file (typically this is CamelCase).
:param parameters: will scan the waiter config for the keys of that dict,
and replace them with the corresponding value. If a custom waiter has
such keys to be expanded, they need to be provided here.
Note: cannot be used if parameters are included in config_overrides
:param config_overrides: will update values of provided keys in the waiter's
config. Only specified keys will be updated.
:param deferrable: If True, the waiter is going to be an async custom waiter.
An async client must be provided in that case.
:param client: The client to use for the waiter's operations
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/base_aws.py
| 959
|
[
"self",
"waiter_name",
"parameters",
"config_overrides",
"deferrable",
"client"
] |
Waiter
| true
| 10
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
handshake
|
@Override
public void handshake() throws IOException {
if (state == State.NOT_INITIALIZED) {
try {
startHandshake();
} catch (SSLException e) {
maybeProcessHandshakeFailure(e, false, null);
}
}
if (ready())
throw renegotiationException();
if (state == State.CLOSING)
throw closingException();
int read = 0;
boolean readable = key.isReadable();
try {
// Read any available bytes before attempting any writes to ensure that handshake failures
// reported by the peer are processed even if writes fail (since peer closes connection
// if handshake fails)
if (readable)
read = readFromSocketChannel();
doHandshake();
if (ready())
updateBytesBuffered(true);
} catch (SSLException e) {
maybeProcessHandshakeFailure(e, true, null);
} catch (IOException e) {
maybeThrowSslAuthenticationException();
// This exception could be due to a write. If there is data available to unwrap in the buffer, or data available
// in the socket channel to read and unwrap, process the data so that any SSL handshake exceptions are reported.
try {
do {
log.trace("Process any available bytes from peer, netReadBuffer {} netWriterBuffer {} handshakeStatus {} readable? {}",
netReadBuffer, netWriteBuffer, handshakeStatus, readable);
handshakeWrapAfterFailure(false);
handshakeUnwrap(false, true);
} while (readable && readFromSocketChannel() > 0);
} catch (SSLException e1) {
maybeProcessHandshakeFailure(e1, false, e);
}
// If we get here, this is not a handshake failure, throw the original IOException
throw e;
}
// Read from socket failed, so throw any pending handshake exception or EOF exception.
if (read == -1) {
maybeThrowSslAuthenticationException();
throw new EOFException("EOF during handshake, handshake status is " + handshakeStatus);
}
}
|
Performs SSL handshake, non blocking.
Before application data (kafka protocols) can be sent client & kafka broker must
perform ssl handshake.
During the handshake SSLEngine generates encrypted data that will be transported over socketChannel.
Each SSLEngine operation generates SSLEngineResult , of which SSLEngineResult.handshakeStatus field is used to
determine what operation needs to occur to move handshake along.
A typical handshake might look like this.
+-------------+----------------------------------+-------------+
| client | SSL/TLS message | HSStatus |
+-------------+----------------------------------+-------------+
| wrap() | ClientHello | NEED_UNWRAP |
| unwrap() | ServerHello/Cert/ServerHelloDone | NEED_WRAP |
| wrap() | ClientKeyExchange | NEED_WRAP |
| wrap() | ChangeCipherSpec | NEED_WRAP |
| wrap() | Finished | NEED_UNWRAP |
| unwrap() | ChangeCipherSpec | NEED_UNWRAP |
| unwrap() | Finished | FINISHED |
+-------------+----------------------------------+-------------+
@throws IOException if read/write fails
@throws SslAuthenticationException if handshake fails with an {@link SSLException}
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 279
|
[] |
void
| true
| 12
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
writeBytesTo
|
@CanIgnoreReturnValue
public int writeBytesTo(byte[] dest, int offset, int maxLength) {
maxLength = min(maxLength, bits() / 8);
Preconditions.checkPositionIndexes(offset, offset + maxLength, dest.length);
writeBytesToImpl(dest, offset, maxLength);
return maxLength;
}
|
Copies bytes from this hash code into {@code dest}.
@param dest the byte array into which the hash code will be written
@param offset the start offset in the data
@param maxLength the maximum number of bytes to write
@return the number of bytes written to {@code dest}
@throws IndexOutOfBoundsException if there is not enough room in {@code dest}
|
java
|
android/guava/src/com/google/common/hash/HashCode.java
| 83
|
[
"dest",
"offset",
"maxLength"
] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
construct_from_string
|
def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
"""
Construct a DatetimeTZDtype from a string.
Parameters
----------
string : str
The string alias for this DatetimeTZDtype.
Should be formatted like ``datetime64[ns, <tz>]``,
where ``<tz>`` is the timezone name.
Examples
--------
>>> DatetimeTZDtype.construct_from_string("datetime64[ns, UTC]")
datetime64[ns, UTC]
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
match = cls._match.match(string)
if match:
d = match.groupdict()
try:
unit = cast("TimeUnit", d["unit"])
return cls(unit=unit, tz=d["tz"])
except (KeyError, TypeError, ValueError) as err:
# KeyError if maybe_get_tz tries and fails to get a
# zoneinfo timezone (actually zoneinfo.ZoneInfoNotFoundError).
# TypeError if we pass a nonsense tz;
# ValueError if we pass a unit other than "ns"
raise TypeError(msg) from err
raise TypeError(msg)
|
Construct a DatetimeTZDtype from a string.
Parameters
----------
string : str
The string alias for this DatetimeTZDtype.
Should be formatted like ``datetime64[ns, <tz>]``,
where ``<tz>`` is the timezone name.
Examples
--------
>>> DatetimeTZDtype.construct_from_string("datetime64[ns, UTC]")
datetime64[ns, UTC]
|
python
|
pandas/core/dtypes/dtypes.py
| 872
|
[
"cls",
"string"
] |
DatetimeTZDtype
| true
| 3
| 8.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_list_keys_async
|
async def _list_keys_async(
self,
client: AioBaseClient,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> list[str]:
"""
List keys in a bucket under prefix and not containing delimiter.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched keys
"""
prefix = prefix or ""
delimiter = delimiter or ""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = client.get_paginator("list_objects_v2")
params = {
"Bucket": bucket_name,
"Prefix": prefix,
"Delimiter": delimiter,
"PaginationConfig": config,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
response = paginator.paginate(**params)
keys = []
async for page in response:
if "Contents" in page:
for k in page["Contents"]:
keys.append(k["Key"])
return keys
|
List keys in a bucket under prefix and not containing delimiter.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched keys
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 664
|
[
"self",
"client",
"bucket_name",
"prefix",
"delimiter",
"page_size",
"max_items"
] |
list[str]
| true
| 7
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
of
|
static Option of(String name, String valueDescription, String description) {
return new Option(name, valueDescription, description, false);
}
|
Factory method to create value option.
@param name the name of the option
@param valueDescription a description of the expected value
@param description a description of the option
@return a new {@link Option} instance
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 369
|
[
"name",
"valueDescription",
"description"
] |
Option
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_register_lowering
|
def _register_lowering(
aten_fn,
decomp_fn: Callable[..., Any],
broadcast: bool,
type_promotion_kind: Optional[ELEMENTWISE_TYPE_PROMOTION_KIND],
convert_input_to_bool: bool,
lowering_dict: dict[Union[Callable[..., Any], str], Callable[..., Any]],
):
"""
Add a lowering to lowerings dict
Arguments:
aten_fn: torch.ops.aten.* fn we are lowering
decomp_fn: alternate implementation on our IR
broadcast: True to apply broadcasting to tensor inputs
type_promotion_kind: kind of type promotion applied to tensor inputs, `None` means no type promotion
convert_input_to_bool: some logical ops require inputs are converted to bool
"""
@functools.wraps(decomp_fn)
def wrapped(*args, **kwargs):
args: list[Any] = list(args)
kwargs: dict[str, Any] = dict(kwargs)
unpacked = False
# TODO maybe we need to use pytrees here
if len(args) == 1 and isinstance(args[0], (list, tuple)):
unpacked = True
args = list(args[0])
if not all(
(fn in fallbacks or in_namespace(fn, "_c10d_functional")) for fn in aten_fn
):
# explicitly assert for "out=" ops for better error messages
assert not any(x == "out" for x in kwargs), "out= ops aren't yet supported"
args, kwargs = transform_args(
args, kwargs, broadcast, type_promotion_kind, convert_input_to_bool
)
if unpacked:
args = [args]
out = decomp_fn(*args, **kwargs)
validate_ir(out)
return out
aten_fn = get_overloads(aten_fn)
lowering_dict.update(dict.fromkeys(aten_fn, wrapped))
return wrapped
|
Add a lowering to lowerings dict
Arguments:
aten_fn: torch.ops.aten.* fn we are lowering
decomp_fn: alternate implementation on our IR
broadcast: True to apply broadcasting to tensor inputs
type_promotion_kind: kind of type promotion applied to tensor inputs, `None` means no type promotion
convert_input_to_bool: some logical ops require inputs are converted to bool
|
python
|
torch/_inductor/lowering.py
| 474
|
[
"aten_fn",
"decomp_fn",
"broadcast",
"type_promotion_kind",
"convert_input_to_bool",
"lowering_dict"
] | true
| 6
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
_derive_colors
|
def _derive_colors(
*,
color: Color | Collection[Color] | None,
colormap: str | Colormap | None,
color_type: str,
num_colors: int,
) -> list[Color]:
"""
Derive colors from either `colormap`, `color_type` or `color` inputs.
Get a list of colors either from `colormap`, or from `color`,
or from `color_type` (if both `colormap` and `color` are None).
Parameters
----------
color : str or sequence, optional
Color(s) to be used for deriving sequence of colors.
Can be either be a single color (single color string, or sequence of floats
representing a single color), or a sequence of colors.
colormap : :py:class:`matplotlib.colors.Colormap`, optional
Matplotlib colormap.
When provided, the resulting colors will be derived from the colormap.
color_type : {"default", "random"}, optional
Type of colors to derive. Used if provided `color` and `colormap` are None.
Ignored if either `color` or `colormap`` are not None.
num_colors : int
Number of colors to be extracted.
Returns
-------
list
List of colors extracted.
Warns
-----
UserWarning
If both `colormap` and `color` are provided.
Parameter `color` will override.
"""
if color is None and colormap is not None:
return _get_colors_from_colormap(colormap, num_colors=num_colors)
elif color is not None:
if colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'",
stacklevel=find_stack_level(),
)
return _get_colors_from_color(color)
else:
return _get_colors_from_color_type(color_type, num_colors=num_colors)
|
Derive colors from either `colormap`, `color_type` or `color` inputs.
Get a list of colors either from `colormap`, or from `color`,
or from `color_type` (if both `colormap` and `color` are None).
Parameters
----------
color : str or sequence, optional
Color(s) to be used for deriving sequence of colors.
Can be either be a single color (single color string, or sequence of floats
representing a single color), or a sequence of colors.
colormap : :py:class:`matplotlib.colors.Colormap`, optional
Matplotlib colormap.
When provided, the resulting colors will be derived from the colormap.
color_type : {"default", "random"}, optional
Type of colors to derive. Used if provided `color` and `colormap` are None.
Ignored if either `color` or `colormap`` are not None.
num_colors : int
Number of colors to be extracted.
Returns
-------
list
List of colors extracted.
Warns
-----
UserWarning
If both `colormap` and `color` are provided.
Parameter `color` will override.
|
python
|
pandas/plotting/_matplotlib/style.py
| 111
|
[
"color",
"colormap",
"color_type",
"num_colors"
] |
list[Color]
| true
| 6
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
createValues
|
@SuppressWarnings("unchecked")
@Override
ImmutableCollection<V> createValues() {
return (ImmutableList<V>) new KeysOrValuesAsList(alternatingKeysAndValues, 1, size);
}
|
Returns a hash table for the specified keys and values, and ensures that neither keys nor
values are null. This method may update {@code alternatingKeysAndValues} if there are duplicate
keys. If so, the return value will indicate how many entries are still valid, and will also
include a {@link Builder.DuplicateKey} in case duplicate keys are not allowed now or will not
be allowed on a later {@link Builder#buildOrThrow()} call.
@param keyOffset 1 if this is the reverse direction of a BiMap, 0 otherwise.
@return an {@code Object} that is a {@code byte[]}, {@code short[]}, or {@code int[]}, the
smallest possible to fit {@code tableSize}; or an {@code Object[]} where [0] is one of
these; [1] indicates how many element pairs in {@code alternatingKeysAndValues} are valid;
and [2] is a {@link Builder.DuplicateKey} for the first duplicate key encountered.
|
java
|
android/guava/src/com/google/common/collect/RegularImmutableMap.java
| 568
|
[] | true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
compare
|
public static int compare(byte a, byte b) {
return toUnsignedInt(a) - toUnsignedInt(b);
}
|
Compares the two specified {@code byte} values, treating them as unsigned values between 0 and
255 inclusive. For example, {@code (byte) -127} is considered greater than {@code (byte) 127}
because it is seen as having the value of positive {@code 129}.
@param a the first {@code byte} to compare
@param b the second {@code byte} to compare
@return a negative value if {@code a} is less than {@code b}; a positive value if {@code a} is
greater than {@code b}; or zero if they are equal
|
java
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
| 130
|
[
"a",
"b"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
contains
|
@Override
public boolean contains(@Nullable Class<?> exClass) {
if (super.contains(exClass)) {
return true;
}
if (this.relatedCauses != null) {
for (Throwable relatedCause : this.relatedCauses) {
if (relatedCause instanceof NestedRuntimeException nested && nested.contains(exClass)) {
return true;
}
}
}
return false;
}
|
Return the related causes, if any.
@return the array of related causes, or {@code null} if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanCreationException.java
| 195
|
[
"exClass"
] | true
| 5
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
isAssignable
|
public static boolean isAssignable(Class<?>[] classArray, Class<?>[] toClassArray, final boolean autoboxing) {
if (!ArrayUtils.isSameLength(classArray, toClassArray)) {
return false;
}
classArray = ArrayUtils.nullToEmpty(classArray);
toClassArray = ArrayUtils.nullToEmpty(toClassArray);
for (int i = 0; i < classArray.length; i++) {
if (!isAssignable(classArray[i], toClassArray[i], autoboxing)) {
return false;
}
}
return true;
}
|
Tests whether an array of Classes can be assigned to another array of Classes.
<p>
This method calls {@link #isAssignable(Class, Class) isAssignable} for each Class pair in the input arrays. It can be
used to check if a set of arguments (the first parameter) are suitably compatible with a set of method parameter
types (the second parameter).
</p>
<p>
Unlike the {@link Class#isAssignableFrom(java.lang.Class)} method, this method takes into account widenings of
primitive classes and {@code null}s.
</p>
<p>
Primitive widenings allow an int to be assigned to a {@code long}, {@code float} or {@code double}. This method
returns the correct result for these cases.
</p>
<p>
{@code null} may be assigned to any reference type. This method will return {@code true} if {@code null} is passed in
and the toClass is non-primitive.
</p>
<p>
Specifically, this method tests whether the type represented by the specified {@link Class} parameter can be
converted to the type represented by this {@link Class} object via an identity conversion widening primitive or
widening reference conversion. See <em><a href="https://docs.oracle.com/javase/specs/">The Java Language
Specification</a></em>, sections 5.1.1, 5.1.2 and 5.1.4 for details.
</p>
@param classArray the array of Classes to check, may be {@code null}
@param toClassArray the array of Classes to try to assign into, may be {@code null}
@param autoboxing whether to use implicit autoboxing/unboxing between primitives and wrappers
@return {@code true} if assignment possible
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 1,460
|
[
"classArray",
"toClassArray",
"autoboxing"
] | true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_get_feature_index
|
def _get_feature_index(fx, feature_names=None):
"""Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
"""
if isinstance(fx, str):
if feature_names is None:
raise ValueError(
f"Cannot plot partial dependence for feature {fx!r} since "
"the list of feature names was not provided, neither as "
"column names of a pandas data-frame nor via the feature_names "
"parameter."
)
try:
return feature_names.index(fx)
except ValueError as e:
raise ValueError(f"Feature {fx!r} not in feature_names") from e
return fx
|
Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
|
python
|
sklearn/inspection/_pd_utils.py
| 40
|
[
"fx",
"feature_names"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
isCreatable
|
public static boolean isCreatable(final String str) {
if (StringUtils.isEmpty(str)) {
return false;
}
final char[] chars = str.toCharArray();
int sz = chars.length;
boolean hasExp = false;
boolean hasDecPoint = false;
boolean allowSigns = false;
boolean foundDigit = false;
// deal with any possible sign up front
final int start = isSign(chars[0]) ? 1 : 0;
if (sz > start + 1 && chars[start] == '0' && !StringUtils.contains(str, '.')) { // leading 0, skip if is a decimal number
if (chars[start + 1] == 'x' || chars[start + 1] == 'X') { // leading 0x/0X
int i = start + 2;
if (i == sz) {
return false; // str == "0x"
}
// checking hex (it can't be anything else)
for (; i < chars.length; i++) {
if (!CharUtils.isHex(chars[i])) {
return false;
}
}
return true;
}
if (Character.isDigit(chars[start + 1])) {
// leading 0, but not hex, must be octal
int i = start + 1;
for (; i < chars.length; i++) {
if (!CharUtils.isOctal(chars[i])) {
return false;
}
}
return true;
}
}
sz--; // don't want to loop to the last char, check it afterwards
// for type qualifiers
int i = start;
// loop to the next to last char or to the last char if we need another digit to
// make a valid number (e.g. chars[0..5] = "1234E")
while (i < sz || i < sz + 1 && allowSigns && !foundDigit) {
if (CharUtils.isAsciiNumeric(chars[i])) {
foundDigit = true;
allowSigns = false;
} else if (chars[i] == '.') {
if (hasDecPoint || hasExp) {
// two decimal points or dec in exponent
return false;
}
hasDecPoint = true;
} else if (chars[i] == 'e' || chars[i] == 'E') {
// we've already taken care of hex.
if (hasExp) {
// two E's
return false;
}
if (!foundDigit) {
return false;
}
hasExp = true;
allowSigns = true;
} else if (isSign(chars[i])) {
if (!allowSigns) {
return false;
}
allowSigns = false;
foundDigit = false; // we need a digit after the E
} else {
return false;
}
i++;
}
if (i < chars.length) {
if (CharUtils.isAsciiNumeric(chars[i])) {
// no type qualifier, OK
return true;
}
if (chars[i] == 'e' || chars[i] == 'E') {
// can't have an E at the last byte
return false;
}
if (chars[i] == '.') {
if (hasDecPoint || hasExp) {
// two decimal points or dec in exponent
return false;
}
// single trailing decimal point after non-exponent is ok
return foundDigit;
}
if (!allowSigns && (chars[i] == 'd' || chars[i] == 'D' || chars[i] == 'f' || chars[i] == 'F')) {
return foundDigit;
}
if (chars[i] == 'l' || chars[i] == 'L') {
// not allowing L with an exponent or decimal point
return foundDigit && !hasExp && !hasDecPoint;
}
// last character is illegal
return false;
}
// allowSigns is true iff the val ends in 'E'
// found digit it to make sure weird stuff like '.' and '1E-' doesn't pass
return !allowSigns && foundDigit;
}
|
Checks whether the String is a valid Java number.
<p>
Valid numbers include hexadecimal marked with the {@code 0x} or {@code 0X} qualifier, octal numbers, scientific notation and numbers marked with a type
qualifier (e.g. 123L).
</p>
<p>
Non-hexadecimal strings beginning with a leading zero are treated as octal values. Thus the string {@code 09} will return {@code false}, since {@code 9}
is not a valid octal value. However, numbers beginning with {@code 0.} are treated as decimal.
</p>
<p>
{@code null} and empty/blank {@link String} will return {@code false}.
</p>
<p>
Note, {@link #createNumber(String)} should return a number for every input resulting in {@code true}.
</p>
@param str the {@link String} to check.
@return {@code true} if the string is a correctly formatted number.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 555
|
[
"str"
] | true
| 45
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getLastType
|
function getLastType(stack: ProcessorState['measureStack']) {
if (stack.length > 0) {
const {type} = stack[stack.length - 1];
return type;
}
return null;
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-timeline/src/import-worker/preprocessData.js
| 127
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
setSortTextToOptionalMember
|
function setSortTextToOptionalMember() {
symbols.forEach(m => {
if (m.flags & SymbolFlags.Optional) {
const symbolId = getSymbolId(m);
symbolToSortTextMap[symbolId] = symbolToSortTextMap[symbolId] ?? SortText.OptionalMember;
}
});
}
|
Filters out completion suggestions for named imports or exports.
@returns Symbols to be suggested in an object binding pattern or object literal expression, barring those whose declarations
do not occur at the current position and have not otherwise been typed.
|
typescript
|
src/services/completions.ts
| 5,194
|
[] | false
| 2
| 6.96
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
batchIterator
|
@Override
public AbstractIterator<FileChannelRecordBatch> batchIterator() {
return batchIterator(start);
}
|
Get an iterator over the record batches in the file, starting at a specific position. This is similar to
{@link #batches()} except that callers specify a particular position to start reading the batches from. This
method must be used with caution: the start position passed in must be a known start of a batch.
@param start The position to start record iteration from; must be a known position for start of a batch
@return An iterator over batches starting from {@code start}
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 429
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
get
|
public static Binder get(Environment environment, @Nullable BindHandler defaultBindHandler) {
Iterable<ConfigurationPropertySource> sources = ConfigurationPropertySources.get(environment);
PropertySourcesPlaceholdersResolver placeholdersResolver = new PropertySourcesPlaceholdersResolver(environment);
return new Binder(sources, placeholdersResolver, null, null, defaultBindHandler);
}
|
Create a new {@link Binder} instance from the specified environment.
@param environment the environment source (must have attached
{@link ConfigurationPropertySources})
@param defaultBindHandler the default bind handler to use if none is specified when
binding
@return a {@link Binder} instance
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 568
|
[
"environment",
"defaultBindHandler"
] |
Binder
| true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
visitVariableDeclaration
|
function visitVariableDeclaration(node: VariableDeclaration): VisitResult<VariableDeclaration> {
const ancestorFacts = enterSubtree(HierarchyFacts.ExportedVariableStatement, HierarchyFacts.None);
let updated: VisitResult<VariableDeclaration>;
if (isBindingPattern(node.name)) {
updated = flattenDestructuringBinding(
node,
visitor,
context,
FlattenLevel.All,
/*rval*/ undefined,
(ancestorFacts & HierarchyFacts.ExportedVariableStatement) !== 0,
);
}
else {
updated = visitEachChild(node, visitor, context);
}
exitSubtree(ancestorFacts, HierarchyFacts.None, HierarchyFacts.None);
return updated;
}
|
Visits a VariableDeclaration node with a binding pattern.
@param node A VariableDeclaration node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,914
|
[
"node"
] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
addContextDataPairs
|
private static void addContextDataPairs(ContextPairs.Pairs<ReadOnlyStringMap> contextPairs) {
contextPairs.add((contextData, pairs) -> contextData.forEach(pairs::accept));
}
|
Converts the log4j2 event level to the Syslog event level code.
@param event the log event
@return an integer representing the syslog log level code
@see Severity class from Log4j2 which contains the conversion logic
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/GraylogExtendedLogFormatStructuredLogFormatter.java
| 142
|
[
"contextPairs"
] |
void
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
ceiling
|
public static Calendar ceiling(final Calendar calendar, final int field) {
Objects.requireNonNull(calendar, "calendar");
return modify((Calendar) calendar.clone(), field, ModifyType.CEILING);
}
|
Gets a date ceiling, leaving the field specified as the most
significant field.
<p>For example, if you had the date-time of 28 Mar 2002
13:45:01.231, if you passed with HOUR, it would return 28 Mar
2002 14:00:00.000. If this was passed with MONTH, it would
return 1 Apr 2002 0:00:00.000.</p>
@param calendar the date to work with, not null.
@param field the field from {@link Calendar} or {@code SEMI_MONTH}.
@return the different ceil date, not null.
@throws NullPointerException if the date is {@code null}.
@throws ArithmeticException if the year is over 280 million.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 348
|
[
"calendar",
"field"
] |
Calendar
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
apply_over_axes
|
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
-----
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been available since
version 1.7.0.
Examples
--------
>>> import numpy as np
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
|
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
-----
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been available since
version 1.7.0.
Examples
--------
>>> import numpy as np
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
|
python
|
numpy/lib/_shape_base_impl.py
| 422
|
[
"func",
"a",
"axes"
] | false
| 8
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
hermmulx
|
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
hermadd, hermsub, hermmul, hermdiv, hermpow
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0] * 0
prd[1] = c[0] / 2
for i in range(1, len(c)):
prd[i + 1] = c[i] / 2
prd[i - 1] += c[i] * i
return prd
|
Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
hermadd, hermsub, hermmul, hermdiv, hermpow
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([2. , 6.5, 1. , 1.5])
|
python
|
numpy/polynomial/hermite.py
| 392
|
[
"c"
] | false
| 4
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
put
|
private @Nullable V put(@ParametricNullness K key, @ParametricNullness V value, boolean force) {
int keyHash = Hashing.smearedHash(key);
int entryForKey = findEntryByKey(key, keyHash);
if (entryForKey != ABSENT) {
V oldValue = values[entryForKey];
if (Objects.equals(oldValue, value)) {
return value;
} else {
replaceValueInEntry(entryForKey, value, force);
return oldValue;
}
}
int valueHash = Hashing.smearedHash(value);
int valueEntry = findEntryByValue(value, valueHash);
if (force) {
if (valueEntry != ABSENT) {
removeEntryValueHashKnown(valueEntry, valueHash);
}
} else {
checkArgument(valueEntry == ABSENT, "Value already present: %s", value);
}
ensureCapacity(size + 1);
keys[size] = key;
values[size] = value;
insertIntoTableKToV(size, keyHash);
insertIntoTableVToK(size, valueHash);
setSucceeds(lastInInsertionOrder, size);
setSucceeds(size, ENDPOINT);
size++;
modCount++;
return null;
}
|
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or,
equivalently, if this inverse view contains a key that is equal to {@code value}).
<p>Due to the property that values in a BiMap are unique, this will tend to execute in
faster-than-linear time.
@param value the object to search for in the values of this BiMap
@return true if a mapping exists from a key to the specified value
|
java
|
android/guava/src/com/google/common/collect/HashBiMap.java
| 287
|
[
"key",
"value",
"force"
] |
V
| true
| 5
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
now
|
private static <E extends Throwable> Instant now(final FailableConsumer<Instant, E> nowConsumer) throws E {
final Instant start = Instant.now();
nowConsumer.accept(start);
return start;
}
|
Tests whether the given Duration is positive (duration > 0).
@param duration the value to test
@return whether the given Duration is positive (duration > 0).
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationUtils.java
| 153
|
[
"nowConsumer"
] |
Instant
| true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
arctanh
|
def arctanh(x):
"""
Compute the inverse hyperbolic tangent of `x`.
Return the "principal value" (for a description of this, see
`numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
complex, the result is complex. Finally, `x = 1` returns``inf`` and
``x=-1`` returns ``-inf``.
Parameters
----------
x : array_like
The value(s) whose arctanh is (are) required.
Returns
-------
out : ndarray or scalar
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
a scalar so is `out`, otherwise an array is returned.
See Also
--------
numpy.arctanh
Notes
-----
For an arctanh() that returns ``NAN`` when real `x` is not in the
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
return +/-inf for ``x = +/-1``).
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.arctanh(0.5)
0.5493061443340549
>>> import warnings
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', RuntimeWarning)
... np.emath.arctanh(np.eye(2))
array([[inf, 0.],
[ 0., inf]])
>>> np.emath.arctanh([1j])
array([0.+0.7854j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
|
Compute the inverse hyperbolic tangent of `x`.
Return the "principal value" (for a description of this, see
`numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
complex, the result is complex. Finally, `x = 1` returns``inf`` and
``x=-1`` returns ``-inf``.
Parameters
----------
x : array_like
The value(s) whose arctanh is (are) required.
Returns
-------
out : ndarray or scalar
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
a scalar so is `out`, otherwise an array is returned.
See Also
--------
numpy.arctanh
Notes
-----
For an arctanh() that returns ``NAN`` when real `x` is not in the
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
return +/-inf for ``x = +/-1``).
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.arctanh(0.5)
0.5493061443340549
>>> import warnings
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', RuntimeWarning)
... np.emath.arctanh(np.eye(2))
array([[inf, 0.],
[ 0., inf]])
>>> np.emath.arctanh([1j])
array([0.+0.7854j])
|
python
|
numpy/lib/_scimath_impl.py
| 591
|
[
"x"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
visitFunctionDeclaration
|
function visitFunctionDeclaration(node: FunctionDeclaration): FunctionDeclaration {
const savedConvertedLoopState = convertedLoopState;
convertedLoopState = undefined;
const ancestorFacts = enterSubtree(HierarchyFacts.FunctionExcludes, HierarchyFacts.FunctionIncludes);
const parameters = visitParameterList(node.parameters, visitor, context);
const body = transformFunctionBody(node);
const name = hierarchyFacts & HierarchyFacts.NewTarget
? factory.getLocalName(node)
: node.name;
exitSubtree(ancestorFacts, HierarchyFacts.FunctionSubtreeExcludes, HierarchyFacts.None);
convertedLoopState = savedConvertedLoopState;
return factory.updateFunctionDeclaration(
node,
visitNodes(node.modifiers, visitor, isModifier),
node.asteriskToken,
name,
/*typeParameters*/ undefined,
parameters,
/*type*/ undefined,
body,
);
}
|
Visits a FunctionDeclaration node.
@param node a FunctionDeclaration node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,481
|
[
"node"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
nullToEmpty
|
public static int[] nullToEmpty(final int[] array) {
return isEmpty(array) ? EMPTY_INT_ARRAY : array;
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,489
|
[
"array"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
findGulpCommand
|
async function findGulpCommand(rootPath: string): Promise<string> {
const platform = process.platform;
if (platform === 'win32' && await exists(path.join(rootPath, 'node_modules', '.bin', 'gulp.cmd'))) {
const globalGulp = path.join(process.env.APPDATA ? process.env.APPDATA : '', 'npm', 'gulp.cmd');
if (await exists(globalGulp)) {
return `"${globalGulp}"`;
}
return path.join('.', 'node_modules', '.bin', 'gulp.cmd');
}
if ((platform === 'linux' || platform === 'darwin') && await exists(path.join(rootPath, 'node_modules', '.bin', 'gulp'))) {
return path.join('.', 'node_modules', '.bin', 'gulp');
}
return 'gulp';
}
|
Check if the given filename is a file.
If returns false in case the file does not exist or
the file stats cannot be accessed/queried or it
is no file at all.
@param filename
the filename to the checked
@returns
true in case the file exists, in any other case false.
|
typescript
|
extensions/gulp/src/main.ts
| 89
|
[
"rootPath"
] | true
| 8
| 7.92
|
microsoft/vscode
| 179,840
|
jsdoc
| true
|
|
findUrlsInClasspath
|
private static Enumeration<URL> findUrlsInClasspath(ClassLoader classLoader, String location) {
try {
return classLoader.getResources(location);
}
catch (IOException ex) {
throw new IllegalArgumentException("Failed to load configurations from location [" + location + "]", ex);
}
}
|
Loads the relocations from the classpath. Relocations are stored in files named
{@code META-INF/spring/full-qualified-annotation-name.replacements} on the
classpath. The file is loaded using {@link Properties#load(java.io.InputStream)}
with each entry containing an auto-configuration class name as the key and the
replacement class name as the value.
@param annotation annotation to load
@param classLoader class loader to use for loading
@return list of names of annotated classes
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationReplacements.java
| 113
|
[
"classLoader",
"location"
] | true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
includes
|
def includes(self, x):
"""Test whether all values of x are in interval range.
Parameters
----------
x : ndarray
Array whose elements are tested to be in interval range.
Returns
-------
result : bool
"""
if self.low_inclusive:
low = np.greater_equal(x, self.low)
else:
low = np.greater(x, self.low)
if not np.all(low):
return False
if self.high_inclusive:
high = np.less_equal(x, self.high)
else:
high = np.less(x, self.high)
# Note: np.all returns numpy.bool_
return bool(np.all(high))
|
Test whether all values of x are in interval range.
Parameters
----------
x : ndarray
Array whose elements are tested to be in interval range.
Returns
-------
result : bool
|
python
|
sklearn/_loss/link.py
| 32
|
[
"self",
"x"
] | false
| 6
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
indexesOf
|
public static BitSet indexesOf(final long[] array, final long valueToFind) {
return indexesOf(array, valueToFind, 0);
}
|
Finds the indices of the given value in the array.
<p>This method returns an empty BitSet for a {@code null} input array.</p>
@param array the array to search for the object, may be {@code null}.
@param valueToFind the value to find.
@return a BitSet of all the indices of the value within the array,
an empty BitSet if not found or {@code null} array input.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 2,232
|
[
"array",
"valueToFind"
] |
BitSet
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toString
|
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
if (sessionId == INVALID_SESSION_ID) {
bld.append("(sessionId=INVALID, ");
} else {
bld.append("(sessionId=").append(sessionId).append(", ");
}
if (epoch == INITIAL_EPOCH) {
bld.append("epoch=INITIAL)");
} else if (epoch == FINAL_EPOCH) {
bld.append("epoch=FINAL)");
} else {
bld.append("epoch=").append(epoch).append(")");
}
return bld.toString();
}
|
Return the metadata for the next incremental response.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/FetchMetadata.java
| 146
|
[] |
String
| true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
toArray
|
public static long[] toArray(Collection<? extends Number> collection) {
if (collection instanceof LongArrayAsList) {
return ((LongArrayAsList) collection).toLongArray();
}
Object[] boxedArray = collection.toArray();
int len = boxedArray.length;
long[] array = new long[len];
for (int i = 0; i < len; i++) {
// checkNotNull for GWT (do not optimize)
array[i] = ((Number) checkNotNull(boxedArray[i])).longValue();
}
return array;
}
|
Returns an array containing each value of {@code collection}, converted to a {@code long} value
in the manner of {@link Number#longValue}.
<p>Elements are copied from the argument collection as if by {@code collection.toArray()}.
Calling this method is as thread-safe as calling that method.
@param collection a collection of {@code Number} instances
@return an array containing the same values as {@code collection}, in the same order, converted
to primitives
@throws NullPointerException if {@code collection} or any of its elements is null
@since 1.0 (parameter was {@code Collection<Long>} before 12.0)
|
java
|
android/guava/src/com/google/common/primitives/Longs.java
| 677
|
[
"collection"
] | true
| 3
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
get_engine
|
def get_engine():
"""Get the configured engine, raising an error if not configured."""
if engine is None:
raise RuntimeError("Engine not configured. Call configure_orm() first.")
return engine
|
Get the configured engine, raising an error if not configured.
|
python
|
airflow-core/src/airflow/settings.py
| 142
|
[] | false
| 2
| 6.08
|
apache/airflow
| 43,597
|
unknown
| false
|
|
sampleVariance
|
public double sampleVariance() {
checkState(count > 1);
if (isNaN(sumOfSquaresOfDeltas)) {
return NaN;
}
return ensureNonNegative(sumOfSquaresOfDeltas) / (count - 1);
}
|
Returns the <a href="http://en.wikipedia.org/wiki/Variance#Sample_variance">unbiased sample
variance</a> of the values. If this dataset is a sample drawn from a population, this is an
unbiased estimator of the population variance of the population. The count must be greater than
one.
<p>This is not guaranteed to return zero when the dataset consists of the same value multiple
times, due to numerical errors. However, it is guaranteed never to return a negative result.
<h3>Non-finite values</h3>
<p>If the dataset contains any non-finite values ({@link Double#POSITIVE_INFINITY}, {@link
Double#NEGATIVE_INFINITY}, or {@link Double#NaN}) then the result is {@link Double#NaN}.
@throws IllegalStateException if the dataset is empty or contains a single value
|
java
|
android/guava/src/com/google/common/math/Stats.java
| 341
|
[] | true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
load
|
@Nullable ConfigData load(ConfigDataLoaderContext context, R resource)
throws IOException, ConfigDataResourceNotFoundException;
|
Load {@link ConfigData} for the given resource.
@param context the loader context
@param resource the resource to load
@return the loaded config data or {@code null} if the location should be skipped
@throws IOException on IO error
@throws ConfigDataResourceNotFoundException if the resource cannot be found
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataLoader.java
| 66
|
[
"context",
"resource"
] |
ConfigData
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getBean
|
@Override
public Object getBean(String name, @Nullable Object @Nullable ... args) throws BeansException {
if (!ObjectUtils.isEmpty(args)) {
throw new UnsupportedOperationException(
"StaticListableBeanFactory does not support explicit bean creation arguments");
}
return getBean(name);
}
|
Add a new singleton bean.
<p>Will overwrite any existing instance for the given name.
@param name the name of the bean
@param bean the bean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/StaticListableBeanFactory.java
| 152
|
[
"name"
] |
Object
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
groupSubscribe
|
synchronized boolean groupSubscribe(Collection<String> topics) {
if (!hasAutoAssignedPartitions())
throw new IllegalStateException(SUBSCRIPTION_EXCEPTION_MESSAGE);
groupSubscription = new HashSet<>(topics);
return !subscription.containsAll(groupSubscription);
}
|
Set the current group subscription. This is used by the group leader to ensure
that it receives metadata updates for all topics that the group is interested in.
@param topics All topics from the group subscription
@return true if the group subscription contains topics which are not part of the local subscription
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 239
|
[
"topics"
] | true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_split_line
|
def _split_line(s: str, parts):
"""
Parameters
----------
s: str
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start : start + length].strip()
start += length
del out["_"]
return out
|
Parameters
----------
s: str
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
|
python
|
pandas/io/sas/sas_xport.py
| 138
|
[
"s",
"parts"
] | true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
escapeSlow
|
protected final String escapeSlow(String s, int index) {
int end = s.length();
// Get a destination buffer and setup some loop variables.
char[] dest = Platform.charBufferFromThreadLocal();
int destIndex = 0;
int unescapedChunkStart = 0;
while (index < end) {
int cp = codePointAt(s, index, end);
if (cp < 0) {
throw new IllegalArgumentException("Trailing high surrogate at end of input");
}
// It is possible for this to return null because nextEscapeIndex() may
// (for performance reasons) yield some false positives but it must never
// give false negatives.
char[] escaped = escape(cp);
int nextIndex = index + (Character.isSupplementaryCodePoint(cp) ? 2 : 1);
if (escaped != null) {
int charsSkipped = index - unescapedChunkStart;
// This is the size needed to add the replacement, not the full
// size needed by the string. We only regrow when we absolutely must.
int sizeNeeded = destIndex + charsSkipped + escaped.length;
if (dest.length < sizeNeeded) {
int destLength = sizeNeeded + (end - index) + DEST_PAD;
dest = growBuffer(dest, destIndex, destLength);
}
// If we have skipped any characters, we need to copy them now.
if (charsSkipped > 0) {
s.getChars(unescapedChunkStart, index, dest, destIndex);
destIndex += charsSkipped;
}
if (escaped.length > 0) {
System.arraycopy(escaped, 0, dest, destIndex, escaped.length);
destIndex += escaped.length;
}
// If we dealt with an escaped character, reset the unescaped range.
unescapedChunkStart = nextIndex;
}
index = nextEscapeIndex(s, nextIndex, end);
}
// Process trailing unescaped characters - no need to account for escaped
// length or padding the allocation.
int charsSkipped = end - unescapedChunkStart;
if (charsSkipped > 0) {
int endIndex = destIndex + charsSkipped;
if (dest.length < endIndex) {
dest = growBuffer(dest, destIndex, endIndex);
}
s.getChars(unescapedChunkStart, end, dest, destIndex);
destIndex = endIndex;
}
return new String(dest, 0, destIndex);
}
|
Returns the escaped form of a given literal string, starting at the given index. This method is
called by the {@link #escape(String)} method when it discovers that escaping is required. It is
protected to allow subclasses to override the fastpath escaping function to inline their
escaping test. See {@link CharEscaperBuilder} for an example usage.
<p>This method is not reentrant and may only be invoked by the top level {@link
#escape(String)} method.
@param s the literal string to be escaped
@param index the index to start escaping from
@return the escaped form of {@code string}
@throws NullPointerException if {@code string} is null
@throws IllegalArgumentException if invalid surrogate characters are encountered
|
java
|
android/guava/src/com/google/common/escape/UnicodeEscaper.java
| 157
|
[
"s",
"index"
] |
String
| true
| 10
| 8
|
google/guava
| 51,352
|
javadoc
| false
|
_terminate_process_tree
|
def _terminate_process_tree(
process: subprocess.Popen[bytes],
timeout: int = 5,
force_kill_remaining: bool = True,
) -> None:
"""
Terminate a process and all its children recursively.
Uses psutil to ensure all child processes are properly terminated,
which is important for cleaning up subprocesses like serve-log servers.
:param process: The subprocess.Popen process to terminate
:param timeout: Timeout in seconds to wait for graceful termination
:param force_kill_remaining: If True, force kill processes that don't terminate gracefully
"""
import subprocess
import psutil
try:
parent = psutil.Process(process.pid)
# Get all child processes recursively
children = parent.children(recursive=True)
# Terminate all children first
for child in children:
try:
child.terminate()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Terminate the parent
parent.terminate()
# Wait for all processes to terminate
gone, alive = psutil.wait_procs(children + [parent], timeout=timeout)
# Force kill any remaining processes if requested
if force_kill_remaining:
for proc in alive:
try:
log.warning("Force killing process %s", proc.pid)
proc.kill()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
except (psutil.NoSuchProcess, psutil.AccessDenied):
# Process already terminated
pass
except Exception as e:
log.warning("Error terminating process tree: %s", e)
# Fallback to simple termination
try:
process.terminate()
process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
if force_kill_remaining:
log.warning("Process did not terminate gracefully, killing...")
process.kill()
process.wait()
|
Terminate a process and all its children recursively.
Uses psutil to ensure all child processes are properly terminated,
which is important for cleaning up subprocesses like serve-log servers.
:param process: The subprocess.Popen process to terminate
:param timeout: Timeout in seconds to wait for graceful termination
:param force_kill_remaining: If True, force kill processes that don't terminate gracefully
|
python
|
airflow-core/src/airflow/cli/hot_reload.py
| 71
|
[
"process",
"timeout",
"force_kill_remaining"
] |
None
| true
| 5
| 6.64
|
apache/airflow
| 43,597
|
sphinx
| false
|
descendingEntryIterator
|
@Override
Iterator<Entry<Cut<C>, Range<C>>> descendingEntryIterator() {
if (restriction.isEmpty()) {
return emptyIterator();
}
Cut<Cut<C>> upperBoundOnLowerBounds =
Ordering.<Cut<Cut<C>>>natural()
.min(lowerBoundWindow.upperBound, Cut.belowValue(restriction.upperBound));
Iterator<Range<C>> completeRangeItr =
rangesByLowerBound
.headMap(
upperBoundOnLowerBounds.endpoint(),
upperBoundOnLowerBounds.typeAsUpperBound() == BoundType.CLOSED)
.descendingMap()
.values()
.iterator();
return new AbstractIterator<Entry<Cut<C>, Range<C>>>() {
@Override
protected @Nullable Entry<Cut<C>, Range<C>> computeNext() {
if (!completeRangeItr.hasNext()) {
return endOfData();
}
Range<C> nextRange = completeRangeItr.next();
if (restriction.lowerBound.compareTo(nextRange.upperBound) >= 0) {
return endOfData();
}
nextRange = nextRange.intersection(restriction);
if (lowerBoundWindow.contains(nextRange.lowerBound)) {
return immutableEntry(nextRange.lowerBound, nextRange);
} else {
return endOfData();
}
}
};
}
|
restriction is the subRangeSet view; ranges are truncated to their intersection with
restriction.
|
java
|
android/guava/src/com/google/common/collect/TreeRangeSet.java
| 816
|
[] | true
| 5
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
scanSourceCharacter
|
function scanSourceCharacter(): string {
const size = anyUnicodeMode ? charSize(codePointChecked(pos)) : 1;
pos += size;
return size > 0 ? text.substring(pos - size, pos) : "";
}
|
A stack of scopes for named capturing groups. @see {scanGroupName}
|
typescript
|
src/compiler/scanner.ts
| 3,568
|
[] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isArrayIndex
|
function isArrayIndex(key) {
const keyNum = +key;
if (`${keyNum}` !== key) { return false; }
return keyNum >= 0 && keyNum < 0xFFFF_FFFF;
}
|
Checks if the given key is a valid array index.
@param {string} key - The key to check.
@returns {boolean} - Returns `true` if the key is a valid array index, else `false`.
|
javascript
|
lib/internal/modules/esm/resolve.js
| 461
|
[
"key"
] | false
| 3
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
appendSeparator
|
public StrBuilder appendSeparator(final char separator, final int loopIndex) {
if (loopIndex > 0) {
append(separator);
}
return this;
}
|
Appends a separator to the builder if the loop index is greater than zero.
The separator is appended using {@link #append(char)}.
<p>
This method is useful for adding a separator each time around the
loop except the first.
</p>
<pre>{@code
for (int i = 0; i < list.size(); i++) {
appendSeparator(",", i);
append(list.get(i));
}
}
</pre>
Note that for this simple example, you should use
{@link #appendWithSeparators(Iterable, String)}.
@param separator the separator to use
@param loopIndex the loop index
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,268
|
[
"separator",
"loopIndex"
] |
StrBuilder
| true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
handleCause
|
public static void handleCause(final ExecutionException ex) throws ConcurrentException {
final ConcurrentException cause = extractCause(ex);
if (cause != null) {
throw cause;
}
}
|
Handles the specified {@link ExecutionException}. This method calls
{@link #extractCause(ExecutionException)} for obtaining the cause of the
exception - which might already cause an unchecked exception or an error
being thrown. If the cause is a checked exception however, it is wrapped
in a {@link ConcurrentException}, which is thrown. If the passed in
exception is <strong>null</strong> or has no cause, the method simply returns
without throwing an exception.
@param ex the exception to be handled
@throws ConcurrentException if the cause of the {@code
ExecutionException} is a checked exception
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/ConcurrentUtils.java
| 247
|
[
"ex"
] |
void
| true
| 2
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
value
|
@SuppressWarnings("unchecked")
public T value() {
if (!succeeded())
throw new IllegalStateException("Attempt to retrieve value from future which hasn't successfully completed");
return (T) result.get();
}
|
Get the value corresponding to this request (only available if the request succeeded)
@return the value set in {@link #complete(Object)}
@throws IllegalStateException if the future is not complete or failed
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 71
|
[] |
T
| true
| 2
| 7.12
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.