function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
removePropertySources
|
private void removePropertySources(MutablePropertySources propertySources, boolean isServletEnvironment) {
Set<String> names = new HashSet<>();
for (PropertySource<?> propertySource : propertySources) {
names.add(propertySource.getName());
}
for (String name : names) {
if (!isServletEnvironment || !SERVLET_ENVIRONMENT_SOURCE_NAMES.contains(name)) {
propertySources.remove(name);
}
}
}
|
Converts the given {@code environment} to the given {@link StandardEnvironment}
type. If the environment is already of the same type, no conversion is performed
and it is returned unchanged.
@param environment the Environment to convert
@param type the type to convert the Environment to
@return the converted Environment
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/EnvironmentConverter.java
| 120
|
[
"propertySources",
"isServletEnvironment"
] |
void
| true
| 3
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
of
|
public static <E> Stream<E> of(final Iterable<E> iterable) {
return iterable == null ? Stream.empty() : StreamSupport.stream(iterable.spliterator(), false);
}
|
Creates a sequential stream on the given Iterable.
@param <E> the type of elements in the Iterable.
@param iterable the Iterable to stream or null.
@return a new Stream or {@link Stream#empty()} if the Iterable is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 699
|
[
"iterable"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getResourceByPath
|
@Override
protected Resource getResourceByPath(String path) {
if (path.startsWith("/")) {
path = path.substring(1);
}
return new FileSystemResource(path);
}
|
Resolve resource paths as file system paths.
<p>Note: Even if a given path starts with a slash, it will get
interpreted as relative to the current VM working directory.
This is consistent with the semantics in a Servlet container.
@param path the path to the resource
@return the Resource handle
@see org.springframework.web.context.support.XmlWebApplicationContext#getResourceByPath
|
java
|
spring-context/src/main/java/org/springframework/context/support/FileSystemXmlApplicationContext.java
| 157
|
[
"path"
] |
Resource
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
to_pickle
|
def to_pickle(
self,
path: FilePath | WriteBuffer[bytes],
*,
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions | None = None,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. File path where
the pickled object will be stored.
{compression_options}
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4, 5. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
{storage_options}
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame(
... {{"foo": range(5), "bar": range(5, 10)}}
... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
"""
from pandas.io.pickle import to_pickle
to_pickle(
self,
path,
compression=compression,
protocol=protocol,
storage_options=storage_options,
)
|
Pickle (serialize) object to file.
Parameters
----------
path : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. File path where
the pickled object will be stored.
{compression_options}
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4, 5. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
{storage_options}
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame(
... {{"foo": range(5), "bar": range(5, 10)}}
... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
|
python
|
pandas/core/generic.py
| 3,061
|
[
"self",
"path",
"compression",
"protocol",
"storage_options"
] |
None
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isCamelCasePattern
|
function isCamelCasePattern(word: string): boolean {
let upper = 0, lower = 0, code = 0, whitespace = 0;
for (let i = 0; i < word.length; i++) {
code = word.charCodeAt(i);
if (isUpper(code)) { upper++; }
if (isLower(code)) { lower++; }
if (isWhitespace(code)) { whitespace++; }
}
if ((upper === 0 || lower === 0) && whitespace === 0) {
return word.length <= 30;
} else {
return upper <= 5;
}
}
|
Gets alternative codes to the character code passed in. This comes in the
form of an array of character codes, all of which must match _in order_ to
successfully match.
@param code The character code to check.
|
typescript
|
src/vs/base/common/filters.ts
| 274
|
[
"word"
] | true
| 9
| 7.04
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
get6to4IPv4Address
|
public static Inet4Address get6to4IPv4Address(Inet6Address ip) {
checkArgument(is6to4Address(ip), "Address '%s' is not a 6to4 address.", toAddrString(ip));
return getInet4Address(Arrays.copyOfRange(ip.getAddress(), 2, 6));
}
|
Returns the IPv4 address embedded in a 6to4 address.
@param ip {@link Inet6Address} to be examined for embedded IPv4 in 6to4 address
@return {@link Inet4Address} of embedded IPv4 in 6to4 address
@throws IllegalArgumentException if the argument is not a valid IPv6 6to4 address
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 733
|
[
"ip"
] |
Inet4Address
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
getResults
|
Map<String, String> getResults() {
results.clear();
if (simpleCount > 0) {
results.putAll(simpleResults);
}
if (referenceCount > 0) {
referenceResults.forEach((k, v) -> results.put(v.getKey(), v.getValue()));
}
if (appendCount > 0) {
appendResults.forEach((k, v) -> results.put(k, v.getAppendResult()));
}
return results;
}
|
Gets all the current matches. Pass the results of this to isValid to determine if a fully successful match has occurred.
@return the map of the results.
|
java
|
libs/dissect/src/main/java/org/elasticsearch/dissect/DissectMatch.java
| 92
|
[] | true
| 4
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
substituteExpressionIdentifier
|
function substituteExpressionIdentifier(node: Identifier): Identifier {
if (enabledSubstitutions & ES2015SubstitutionFlags.BlockScopedBindings && !isInternalName(node)) {
const declaration = resolver.getReferencedDeclarationWithCollidingName(node);
if (declaration && !(isClassLike(declaration) && isPartOfClassBody(declaration, node))) {
return setTextRange(factory.getGeneratedNameForNode(getNameOfDeclaration(declaration)), node);
}
}
return node;
}
|
Substitutes an expression identifier.
@param node An Identifier node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 4,977
|
[
"node"
] | true
| 6
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
stampedLockVisitor
|
public static <O> StampedLockVisitor<O> stampedLockVisitor(final O object) {
return new LockingVisitors.StampedLockVisitor<>(object, new StampedLock());
}
|
Creates a new instance of {@link StampedLockVisitor} with the given object.
@param <O> The type of the object to protect.
@param object The object to protect.
@return A new {@link StampedLockVisitor}.
@see LockingVisitors
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/locks/LockingVisitors.java
| 746
|
[
"object"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toBloomFilter
|
@IgnoreJRERequirement // Users will use this only if they're already using streams.
public static <T extends @Nullable Object> Collector<T, ?, BloomFilter<T>> toBloomFilter(
Funnel<? super T> funnel, long expectedInsertions, double fpp) {
checkNotNull(funnel);
checkArgument(
expectedInsertions >= 0, "Expected insertions (%s) must be >= 0", expectedInsertions);
checkArgument(fpp > 0.0, "False positive probability (%s) must be > 0.0", fpp);
checkArgument(fpp < 1.0, "False positive probability (%s) must be < 1.0", fpp);
return Collector.of(
() -> BloomFilter.create(funnel, expectedInsertions, fpp),
BloomFilter::put,
(bf1, bf2) -> {
bf1.putAll(bf2);
return bf1;
},
Collector.Characteristics.UNORDERED,
Collector.Characteristics.CONCURRENT);
}
|
Returns a {@code Collector} expecting the specified number of insertions, and yielding a {@link
BloomFilter} with the specified expected false positive probability.
<p>Note that if the {@code Collector} receives significantly more elements than specified, the
resulting {@code BloomFilter} will suffer a sharp deterioration of its false positive
probability.
<p>The constructed {@code BloomFilter} will be serializable if the provided {@code Funnel<T>}
is.
<p>It is recommended that the funnel be implemented as a Java enum. This has the benefit of
ensuring proper serialization and deserialization, which is important since {@link #equals}
also relies on object identity of funnels.
@param funnel the funnel of T's that the constructed {@code BloomFilter} will use
@param expectedInsertions the number of expected insertions to the constructed {@code
BloomFilter}; must be positive
@param fpp the desired false positive probability (must be positive and less than 1.0)
@return a {@code Collector} generating a {@code BloomFilter} of the received elements
@since 33.4.0 (but since 23.0 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/hash/BloomFilter.java
| 357
|
[
"funnel",
"expectedInsertions",
"fpp"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
_stripOrigin
|
function _stripOrigin(baseHref: string): string {
// DO NOT REFACTOR! Previously, this check looked like this:
// `/^(https?:)?\/\//.test(baseHref)`, but that resulted in
// syntactically incorrect code after Closure Compiler minification.
// This was likely caused by a bug in Closure Compiler, but
// for now, the check is rewritten to use `new RegExp` instead.
const isAbsoluteUrl = new RegExp('^(https?:)?//').test(baseHref);
if (isAbsoluteUrl) {
const [, pathname] = baseHref.split(/\/\/[^\/]+/);
return pathname;
}
return baseHref;
}
|
@description
A service that applications can use to interact with a browser's URL.
Depending on the `LocationStrategy` used, `Location` persists
to the URL's path or the URL's hash segment.
@usageNotes
It's better to use the `Router.navigate()` service to trigger route changes. Use
`Location` only if you need to interact with or create normalized URLs outside of
routing.
`Location` is responsible for normalizing the URL against the application's base href.
A normalized URL is absolute from the URL host, includes the application's base href, and has no
trailing slash:
- `/my/app/user/123` is normalized
- `my/app/user/123` **is not** normalized
- `/my/app/user/123/` **is not** normalized
### Example
{@example common/location/ts/path_location_component.ts region='LocationComponent'}
@publicApi
|
typescript
|
packages/common/src/location/location.ts
| 325
|
[
"baseHref"
] | true
| 2
| 6.8
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
nextOffsets
|
public Map<TopicPartition, OffsetAndMetadata> nextOffsets() {
return nextOffsets;
}
|
Get the next offsets and metadata corresponding to all topic partitions for which the position have been advanced in this poll call
@return the next offsets that the consumer will consume
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
| 70
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
build
|
public Escaper build() {
return new ArrayBasedCharEscaper(replacementMap, safeMin, safeMax) {
private final char @Nullable [] replacementChars =
unsafeReplacement != null ? unsafeReplacement.toCharArray() : null;
@Override
protected char @Nullable [] escapeUnsafe(char c) {
return replacementChars;
}
};
}
|
Returns a new escaper based on the current state of the builder.
|
java
|
android/guava/src/com/google/common/escape/Escapers.java
| 149
|
[] |
Escaper
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
_infer_selection
|
def _infer_selection(self, key, subset: Series | DataFrame):
"""
Infer the `selection` to pass to our constructor in _gotitem.
"""
# Shared by Rolling and Resample
selection = None
if subset.ndim == 2 and (
(lib.is_scalar(key) and key in subset) or lib.is_list_like(key)
):
selection = key
elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name:
selection = key
return selection
|
Infer the `selection` to pass to our constructor in _gotitem.
|
python
|
pandas/core/base.py
| 255
|
[
"self",
"key",
"subset"
] | true
| 8
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
categorical_column_to_series
|
def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:
"""
Convert a column holding categorical data to a pandas Series.
Parameters
----------
col : Column
Returns
-------
tuple
Tuple of pd.Series holding the data and the memory owner object
that keeps the memory alive.
"""
categorical = col.describe_categorical
if not categorical["is_dictionary"]:
raise NotImplementedError("Non-dictionary categoricals not supported yet")
cat_column = categorical["categories"]
if hasattr(cat_column, "_col"):
# Item "Column" of "Optional[Column]" has no attribute "_col"
# Item "None" of "Optional[Column]" has no attribute "_col"
categories = np.array(cat_column._col) # type: ignore[union-attr]
else:
raise NotImplementedError(
"Interchanging categorical columns isn't supported yet, and our "
"fallback of using the `col._col` attribute (a ndarray) failed."
)
buffers = col.get_buffers()
codes_buff, codes_dtype = buffers["data"]
codes = buffer_to_ndarray(
codes_buff, codes_dtype, offset=col.offset, length=col.size()
)
# Doing module in order to not get ``IndexError`` for
# out-of-bounds sentinel values in `codes`
if len(categories) > 0:
values = categories[codes % len(categories)]
else:
values = codes
cat = pd.Categorical(
values, categories=categories, ordered=categorical["is_ordered"]
)
data = pd.Series(cat)
data = set_nulls(data, col, buffers["validity"])
return data, buffers
|
Convert a column holding categorical data to a pandas Series.
Parameters
----------
col : Column
Returns
-------
tuple
Tuple of pd.Series holding the data and the memory owner object
that keeps the memory alive.
|
python
|
pandas/core/interchange/from_dataframe.py
| 249
|
[
"col"
] |
tuple[pd.Series, Any]
| true
| 6
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
indexOf
|
public int indexOf(final StrMatcher matcher) {
return indexOf(matcher, 0);
}
|
Searches the string builder using the matcher to find the first match.
<p>
Matchers can be used to perform advanced searching behavior.
For example you could write a matcher to find the character 'a'
followed by a number.
</p>
@param matcher the matcher to use, null returns -1
@return the first index matched, or -1 if not found
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,053
|
[
"matcher"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
asList
|
public static List<Integer> asList(int... backingArray) {
if (backingArray.length == 0) {
return Collections.emptyList();
}
return new IntArrayAsList(backingArray);
}
|
Returns a fixed-size list backed by the specified array, similar to {@link
Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)}, but any attempt to
set a value to {@code null} will result in a {@link NullPointerException}.
<p>The returned list maintains the values, but not the identities, of {@code Integer} objects
written to or read from it. For example, whether {@code list.get(0) == list.get(0)} is true for
the returned list is unspecified.
<p>The returned list is serializable.
<p><b>Note:</b> when possible, you should represent your data as an {@link ImmutableIntArray}
instead, which has an {@link ImmutableIntArray#asList asList} view.
@param backingArray the array to back the list
@return a list view of the array
|
java
|
android/guava/src/com/google/common/primitives/Ints.java
| 657
|
[] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
toStringAll
|
public String toStringAll() {
return "FastDateParser [pattern=" + pattern + ", timeZone=" + timeZone + ", locale=" + locale + ", century=" + century + ", startYear=" + startYear
+ ", patterns=" + patterns + "]";
}
|
Converts all state of this instance to a String handy for debugging.
@return a string.
@since 3.12.0
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateParser.java
| 1,125
|
[] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
revoke
|
def revoke(state, task_id, terminate=False, signal=None, **kwargs):
"""Revoke task by task id (or list of ids).
Keyword Arguments:
terminate (bool): Also terminate the process if the task is active.
signal (str): Name of signal to use for terminate (e.g., ``KILL``).
"""
# pylint: disable=redefined-outer-name
# XXX Note that this redefines `terminate`:
# Outside of this scope that is a function.
# supports list argument since 3.1
task_ids, task_id = set(maybe_list(task_id) or []), None
task_ids = _revoke(state, task_ids, terminate, signal, **kwargs)
if isinstance(task_ids, dict) and 'ok' in task_ids:
return task_ids
return ok(f'tasks {task_ids} flagged as revoked')
|
Revoke task by task id (or list of ids).
Keyword Arguments:
terminate (bool): Also terminate the process if the task is active.
signal (str): Name of signal to use for terminate (e.g., ``KILL``).
|
python
|
celery/worker/control.py
| 138
|
[
"state",
"task_id",
"terminate",
"signal"
] | false
| 4
| 6.24
|
celery/celery
| 27,741
|
unknown
| false
|
|
_read_dump_from_disk
|
def _read_dump_from_disk(self) -> CacheDump | None:
"""Read the cache dump from disk.
Attempts to read and parse the shared cache JSON file.
Returns:
The cache dump if the file exists and is valid JSON, None otherwise.
"""
try:
with open(self._shared_cache_filepath) as f:
data = json.load(f)
return cast(CacheDump, data)
except FileNotFoundError:
return None
except json.JSONDecodeError:
return None
|
Read the cache dump from disk.
Attempts to read and parse the shared cache JSON file.
Returns:
The cache dump if the file exists and is valid JSON, None otherwise.
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 288
|
[
"self"
] |
CacheDump | None
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
addAll
|
@CanIgnoreReturnValue
@Override
public Builder<E> addAll(Iterable<? extends E> elements) {
checkNotNull(elements);
if (elements instanceof Collection) {
Collection<?> collection = (Collection<?>) elements;
ensureRoomFor(collection.size());
if (collection instanceof ImmutableCollection) {
ImmutableCollection<?> immutableCollection = (ImmutableCollection<?>) collection;
size = immutableCollection.copyIntoArray(contents, size);
return this;
}
}
super.addAll(elements);
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableList}.
@param elements the {@code Iterable} to add to the {@code ImmutableList}
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
guava/src/com/google/common/collect/ImmutableList.java
| 887
|
[
"elements"
] | true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
_get_doc_link
|
def _get_doc_link(self):
"""Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
"""
if self.__class__.__module__.split(".")[0] != self._doc_link_module:
return ""
if self._doc_link_url_param_generator is None:
estimator_name = self.__class__.__name__
# Construct the estimator's module name, up to the first private submodule.
# This works because in scikit-learn all public estimators are exposed at
# that level, even if they actually live in a private sub-module.
estimator_module = ".".join(
itertools.takewhile(
lambda part: not part.startswith("_"),
self.__class__.__module__.split("."),
)
)
return self._doc_link_template.format(
estimator_module=estimator_module, estimator_name=estimator_name
)
return self._doc_link_template.format(**self._doc_link_url_param_generator())
|
Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
|
python
|
sklearn/utils/_repr_html/base.py
| 85
|
[
"self"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
length
|
public int length() {
return this.nameValuePairs.size();
}
|
Returns the number of name/value mappings in this object.
@return the number of name/value mappings in this object
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 193
|
[] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
masked
|
def masked(mask: str, body: Callable[[], str], other: float) -> str:
"""
Computes body, but only uses the result where mask is true.
Where mask is false, uses the 'other' value instead.
"""
result = body()
# Format the 'other' value properly for JAX
if isinstance(other, float):
if math.isnan(other):
other_str = "jnp.nan"
elif math.isinf(other):
other_str = "jnp.inf" if other > 0 else "-jnp.inf"
else:
other_str = repr(other)
else:
other_str = repr(other)
# Use jnp.where to select between result and other based on mask
return f"jnp.where({mask}, {result}, {other_str})"
|
Computes body, but only uses the result where mask is true.
Where mask is false, uses the 'other' value instead.
|
python
|
torch/_inductor/codegen/pallas.py
| 197
|
[
"mask",
"body",
"other"
] |
str
| true
| 7
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
readDouble
|
@CanIgnoreReturnValue // to skip some bytes
@Override
public double readDouble() throws IOException {
return Double.longBitsToDouble(readLong());
}
|
Reads a {@code double} as specified by {@link DataInputStream#readDouble()}, except using
little-endian byte order.
@return the next eight bytes of the input stream, interpreted as a {@code double} in
little-endian byte order
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
| 170
|
[] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
get_status
|
def get_status(self, aws_account_id: str | None, data_set_id: str, ingestion_id: str) -> str:
"""
Get the current status of QuickSight Create Ingestion API.
.. seealso::
- :external+boto3:py:meth:`QuickSight.Client.describe_ingestion`
:param aws_account_id: An AWS Account ID, if set to ``None`` then use associated AWS Account ID.
:param data_set_id: QuickSight Data Set ID
:param ingestion_id: QuickSight Ingestion ID
:return: An QuickSight Ingestion Status
"""
aws_account_id = aws_account_id or self.account_id
try:
describe_ingestion_response = self.conn.describe_ingestion(
AwsAccountId=aws_account_id, DataSetId=data_set_id, IngestionId=ingestion_id
)
return describe_ingestion_response["Ingestion"]["IngestionStatus"]
except KeyError as e:
raise AirflowException(f"Could not get status of the Amazon QuickSight Ingestion: {e}")
except ClientError as e:
raise AirflowException(f"AWS request failed: {e}")
|
Get the current status of QuickSight Create Ingestion API.
.. seealso::
- :external+boto3:py:meth:`QuickSight.Client.describe_ingestion`
:param aws_account_id: An AWS Account ID, if set to ``None`` then use associated AWS Account ID.
:param data_set_id: QuickSight Data Set ID
:param ingestion_id: QuickSight Ingestion ID
:return: An QuickSight Ingestion Status
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/quicksight.py
| 95
|
[
"self",
"aws_account_id",
"data_set_id",
"ingestion_id"
] |
str
| true
| 2
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
getLocaleSpecificStrategy
|
private Strategy getLocaleSpecificStrategy(final int field, final Calendar definingCalendar) {
final ConcurrentMap<Locale, Strategy> cache = getCache(field);
return cache.computeIfAbsent(locale,
k -> field == Calendar.ZONE_OFFSET ? new TimeZoneStrategy(locale) : new CaseInsensitiveTextStrategy(field, definingCalendar, locale));
}
|
Constructs a Strategy that parses a Text field
@param field The Calendar field
@param definingCalendar The calendar to obtain the short and long values
@return a TextStrategy for the field and Locale
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateParser.java
| 900
|
[
"field",
"definingCalendar"
] |
Strategy
| true
| 2
| 7.28
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
execute_interactive
|
def execute_interactive(cmd: list[str], **kwargs) -> None:
"""
Run the new command as a subprocess.
Runs the new command as a subprocess and ensures that the terminal's state is restored to its original
state after the process is completed e.g. if the subprocess hides the cursor, it will be restored after
the process is completed.
"""
log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd))
old_tty = termios.tcgetattr(sys.stdin)
old_sigint_handler = signal.getsignal(signal.SIGINT)
old_winch_handler = signal.getsignal(signal.SIGWINCH)
tty.setcbreak(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
primary_fd, secondary_fd = pty.openpty()
try:
with subprocess.Popen(
cmd,
stdin=secondary_fd,
stdout=secondary_fd,
stderr=secondary_fd,
universal_newlines=True,
**kwargs,
) as proc:
# ignore SIGINT in the parent process
def _sighandler(sig, frame):
proc.send_signal(sig)
def _sigwinch(sig, frame):
# On Py3.11+ we could use termios.tcgetwinsize/tcsetwinsize instead
buf = array.array("h", [0, 0, 0, 0])
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True)
fcntl.ioctl(secondary_fd, termios.TIOCSWINSZ, buf)
# Set the initial size too
_sigwinch(signal.SIGWINCH, None)
signal.signal(signal.SIGINT, _sighandler)
signal.signal(signal.SIGWINCH, _sigwinch)
while proc.poll() is None:
readable_fbs, _, _ = select.select([sys.stdin, primary_fd], [], [], 0)
if sys.stdin in readable_fbs:
input_data = os.read(sys.stdin.fileno(), 10240)
os.write(primary_fd, input_data)
if primary_fd in readable_fbs:
output_data = os.read(primary_fd, 10240)
if output_data:
os.write(pty.STDOUT_FILENO, output_data)
finally:
# restore tty settings back
signal.signal(signal.SIGINT, old_sigint_handler)
signal.signal(signal.SIGWINCH, old_winch_handler)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
|
Run the new command as a subprocess.
Runs the new command as a subprocess and ensures that the terminal's state is restored to its original
state after the process is completed e.g. if the subprocess hides the cursor, it will be restored after
the process is completed.
|
python
|
airflow-core/src/airflow/utils/process_utils.py
| 226
|
[
"cmd"
] |
None
| true
| 5
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
insert
|
public StrBuilder insert(final int index, final char value) {
validateIndex(index);
ensureCapacity(size + 1);
System.arraycopy(buffer, index, buffer, index + 1, size - index);
buffer[index] = value;
size++;
return this;
}
|
Inserts the value into this builder.
@param index the index to add at, must be valid
@param value the value to insert
@return {@code this} instance.
@throws IndexOutOfBoundsException if the index is invalid
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,124
|
[
"index",
"value"
] |
StrBuilder
| true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
shift
|
public static void shift(final boolean[] array, final int offset) {
if (array != null) {
shift(array, 0, array.length, offset);
}
}
|
Shifts the order of the given boolean array.
<p>There is no special handling for multi-dimensional arrays. This method
does nothing for {@code null} or empty input arrays.</p>
@param array the array to shift, may be {@code null}.
@param offset
The number of positions to rotate the elements. If the offset is larger than the number of elements to
rotate, than the effective offset is modulo the number of elements to rotate.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,789
|
[
"array",
"offset"
] |
void
| true
| 2
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
drop_duplicates
|
def drop_duplicates(
self,
*,
keep: DropKeep = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Series | None:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Series.unique : Return unique values as an array.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(
... ["llama", "cow", "llama", "beetle", "llama", "hippo"], name="animal"
... )
>>> s
0 llama
1 cow
2 llama
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behavior of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 llama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep="last")
1 cow
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries.
>>> s.drop_duplicates(keep=False)
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
if ignore_index:
result.index = default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
|
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Series.unique : Return unique values as an array.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(
... ["llama", "cow", "llama", "beetle", "llama", "hippo"], name="animal"
... )
>>> s
0 llama
1 cow
2 llama
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behavior of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 llama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep="last")
1 cow
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries.
>>> s.drop_duplicates(keep=False)
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
|
python
|
pandas/core/series.py
| 2,241
|
[
"self",
"keep",
"inplace",
"ignore_index"
] |
Series | None
| true
| 4
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
subscribe
|
@Override
public void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener listener) {
if (listener == null)
throw new IllegalArgumentException("RebalanceListener cannot be null");
subscribeToRegex(pattern, Optional.of(listener));
}
|
This method signals the background thread to {@link CreateFetchRequestsEvent create fetch requests} for the
pre-fetch case, i.e. right before {@link #poll(Duration)} exits. In the pre-fetch case, the application thread
will not wait for confirmation of the request creation before continuing.
<p/>
At the point this method is called, {@link KafkaConsumer#poll(Duration)} has data ready to return to the user,
which means the consumed position was already updated. In order to prevent potential gaps in records, this
method is designed to suppress all exceptions.
@param timer Provides an upper bound for the event and its {@link CompletableFuture future}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 2,048
|
[
"pattern",
"listener"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
nextToken
|
@Override
public Token nextToken() throws IOException {
try {
return convertToken(parser.nextToken());
} catch (IOException e) {
throw handleParserException(e);
}
}
|
Handle parser exception depending on type.
This converts known exceptions to XContentParseException and rethrows them.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
| 87
|
[] |
Token
| true
| 2
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
createInlineReturn
|
function createInlineReturn(expression?: Expression, location?: TextRange): ReturnStatement {
return setTextRange(
factory.createReturnStatement(
factory.createArrayLiteralExpression(
expression
? [createInstruction(Instruction.Return), expression]
: [createInstruction(Instruction.Return)],
),
),
location,
);
}
|
Creates a statement that can be used indicate a Return operation.
@param expression The expression for the return statement.
@param location An optional source map location for the statement.
|
typescript
|
src/compiler/transformers/generators.ts
| 2,575
|
[
"expression?",
"location?"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
readInBuffer
|
private static int readInBuffer(DataBlock dataBlock, long pos, ByteBuffer buffer, int maxLen, int minLen)
throws IOException {
buffer.clear();
if (buffer.remaining() > maxLen) {
buffer.limit(maxLen);
}
int result = 0;
while (result < minLen) {
int count = dataBlock.read(buffer, pos);
if (count <= 0) {
throw new EOFException();
}
result += count;
pos += count;
}
return result;
}
|
Read a string value from the given data block.
@param data the source data
@param pos the position to read from
@param len the number of bytes to read
@return the contents as a string
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipString.java
| 276
|
[
"dataBlock",
"pos",
"buffer",
"maxLen",
"minLen"
] | true
| 4
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
valuesIn
|
function valuesIn(object) {
return object == null ? [] : baseValues(object, keysIn(object));
}
|
Creates an array of the own and inherited enumerable string keyed property
values of `object`.
**Note:** Non-object values are coerced to objects.
@static
@memberOf _
@since 3.0.0
@category Object
@param {Object} object The object to query.
@returns {Array} Returns the array of property values.
@example
function Foo() {
this.a = 1;
this.b = 2;
}
Foo.prototype.c = 3;
_.valuesIn(new Foo);
// => [1, 2, 3] (iteration order is not guaranteed)
|
javascript
|
lodash.js
| 14,063
|
[
"object"
] | false
| 2
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
indexOfIgnoreCase
|
@Deprecated
public static int indexOfIgnoreCase(final CharSequence str, final CharSequence searchStr, final int startPos) {
return Strings.CI.indexOf(str, searchStr, startPos);
}
|
Case in-sensitive find of the first index within a CharSequence from the specified position.
<p>
A {@code null} CharSequence will return {@code -1}. A negative start position is treated as zero. An empty ("") search CharSequence always matches. A
start position greater than the string length only matches an empty search CharSequence.
</p>
<pre>
StringUtils.indexOfIgnoreCase(null, *, *) = -1
StringUtils.indexOfIgnoreCase(*, null, *) = -1
StringUtils.indexOfIgnoreCase("", "", 0) = 0
StringUtils.indexOfIgnoreCase("aabaabaa", "A", 0) = 0
StringUtils.indexOfIgnoreCase("aabaabaa", "B", 0) = 2
StringUtils.indexOfIgnoreCase("aabaabaa", "AB", 0) = 1
StringUtils.indexOfIgnoreCase("aabaabaa", "B", 3) = 5
StringUtils.indexOfIgnoreCase("aabaabaa", "B", 9) = -1
StringUtils.indexOfIgnoreCase("aabaabaa", "B", -1) = 2
StringUtils.indexOfIgnoreCase("aabaabaa", "", 2) = 2
StringUtils.indexOfIgnoreCase("abc", "", 9) = -1
</pre>
@param str the CharSequence to check, may be null.
@param searchStr the CharSequence to find, may be null.
@param startPos the start position, negative treated as zero.
@return the first index of the search CharSequence (always ≥ startPos), -1 if no match or {@code null} string input.
@since 2.5
@since 3.0 Changed signature from indexOfIgnoreCase(String, String, int) to indexOfIgnoreCase(CharSequence, CharSequence, int)
@deprecated Use {@link Strings#indexOf(CharSequence, CharSequence, int) Strings.CI.indexOf(CharSequence, CharSequence, int)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,101
|
[
"str",
"searchStr",
"startPos"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
describeMetadataQuorum
|
default DescribeMetadataQuorumResult describeMetadataQuorum() {
return describeMetadataQuorum(new DescribeMetadataQuorumOptions());
}
|
Describes the state of the metadata quorum.
<p>
This is a convenience method for {@link #describeMetadataQuorum(DescribeMetadataQuorumOptions)} with default options.
See the overload for more details.
@return the {@link DescribeMetadataQuorumResult} containing the result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,608
|
[] |
DescribeMetadataQuorumResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeInterface
|
public boolean removeInterface(Class<?> ifc) {
return this.interfaces.remove(ifc);
}
|
Remove a proxied interface.
<p>Does nothing if the given interface isn't proxied.
@param ifc the interface to remove from the proxy
@return {@code true} if the interface was removed; {@code false}
if the interface was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 249
|
[
"ifc"
] | true
| 1
| 6.96
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
ngroup
|
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN`
and will be skipped from the count.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]})
>>> df
color
0 red
1 NaN
2 red
3 blue
4 blue
5 red
>>> df.groupby("color").ngroup()
0 1.0
1 NaN
2 1.0
3 0.0
4 0.0
5 1.0
dtype: float64
>>> df.groupby("color", dropna=False).ngroup()
0 1
1 2
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby("color", dropna=False).ngroup(ascending=False)
0 1
1 0
2 1
3 2
4 2
5 1
dtype: int64
"""
obj = self._obj_with_exclusions
index = obj.index
comp_ids = self._grouper.ids
dtype: type
if self._grouper.has_dropped_na:
comp_ids = np.where(comp_ids == -1, np.nan, comp_ids)
dtype = np.float64
else:
dtype = np.int64
if any(ping._passed_categorical for ping in self._grouper.groupings):
# comp_ids reflect non-observed groups, we need only observed
comp_ids = rank_1d(comp_ids, ties_method="dense") - 1
result = self._obj_1d_constructor(comp_ids, index, dtype=dtype)
if not ascending:
result = self.ngroups - 1 - result
return result
|
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN`
and will be skipped from the count.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]})
>>> df
color
0 red
1 NaN
2 red
3 blue
4 blue
5 red
>>> df.groupby("color").ngroup()
0 1.0
1 NaN
2 1.0
3 0.0
4 0.0
5 1.0
dtype: float64
>>> df.groupby("color", dropna=False).ngroup()
0 1
1 2
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby("color", dropna=False).ngroup(ascending=False)
0 1
1 0
2 1
3 2
4 2
5 1
dtype: int64
|
python
|
pandas/core/groupby/groupby.py
| 4,623
|
[
"self",
"ascending"
] | true
| 5
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
insert
|
def insert(self, key: str, value: bytes) -> bool:
"""Insert a key-value pair into the on-disk cache.
Args:
key: The key to insert (must be str).
value: The value to associate with the key (must be bytes).
Returns:
True if successfully inserted, False if the key already exists
with a valid version.
"""
fpath: Path = self._fpath_from_key(key)
fpath.parent.mkdir(parents=True, exist_ok=True)
r_fp, w_fp, inserted = None, None, False
try:
w_fp = open(fpath, "xb") # noqa: SIM115
except FileExistsError:
is_stale: bool = False
with open(fpath, "rb") as r_fp:
is_stale = not self._version_header_matches(r_fp)
if is_stale:
# same story as above, in this case the version header doesn't
# match so we choose to remove the old entry so that the new
# k/v pair can be cached
fpath.unlink()
w_fp = open(fpath, "xb") # noqa: SIM115
else:
w_fp = None
finally:
if w_fp:
try:
self._write_version_header(w_fp)
w_fp.write(value)
inserted = True
finally:
w_fp.close()
return inserted
|
Insert a key-value pair into the on-disk cache.
Args:
key: The key to insert (must be str).
value: The value to associate with the key (must be bytes).
Returns:
True if successfully inserted, False if the key already exists
with a valid version.
|
python
|
torch/_inductor/runtime/caching/implementations.py
| 304
|
[
"self",
"key",
"value"
] |
bool
| true
| 4
| 8.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
getExportEqualsImportKind
|
function getExportEqualsImportKind(importingFile: SourceFile | FutureSourceFile, compilerOptions: CompilerOptions, forceImportKeyword: boolean): ImportKind {
const allowSyntheticDefaults = getAllowSyntheticDefaultImports(compilerOptions);
const isJS = hasJSFileExtension(importingFile.fileName);
// 1. 'import =' will not work in es2015+ TS files, so the decision is between a default
// and a namespace import, based on allowSyntheticDefaultImports/esModuleInterop.
if (!isJS && getEmitModuleKind(compilerOptions) >= ModuleKind.ES2015) {
return allowSyntheticDefaults ? ImportKind.Default : ImportKind.Namespace;
}
// 2. 'import =' will not work in JavaScript, so the decision is between a default import,
// a namespace import, and const/require.
if (isJS) {
return importingFile.externalModuleIndicator || forceImportKeyword
? allowSyntheticDefaults ? ImportKind.Default : ImportKind.Namespace
: ImportKind.CommonJS;
}
// 3. At this point the most correct choice is probably 'import =', but people
// really hate that, so look to see if the importing file has any precedent
// on how to handle it.
for (const statement of importingFile.statements ?? emptyArray) {
// `import foo` parses as an ImportEqualsDeclaration even though it could be an ImportDeclaration
if (isImportEqualsDeclaration(statement) && !nodeIsMissing(statement.moduleReference)) {
return ImportKind.CommonJS;
}
}
// 4. We have no precedent to go on, so just use a default import if
// allowSyntheticDefaultImports/esModuleInterop is enabled.
return allowSyntheticDefaults ? ImportKind.Default : ImportKind.CommonJS;
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,667
|
[
"importingFile",
"compilerOptions",
"forceImportKeyword"
] | true
| 11
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (!super.equals(obj)) {
return false;
}
if (!(obj instanceof ExtendedMessageFormat)) {
return false;
}
final ExtendedMessageFormat other = (ExtendedMessageFormat) obj;
return Objects.equals(registry, other.registry) && Objects.equals(toPattern, other.toPattern);
}
|
Learn whether the specified Collection contains non-null elements.
@param coll to check
@return {@code true} if some Object was found, {@code false} otherwise.
|
java
|
src/main/java/org/apache/commons/lang3/text/ExtendedMessageFormat.java
| 257
|
[
"obj"
] | true
| 5
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
incrementAndGet
|
public byte incrementAndGet() {
value++;
return value;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately after the increment operation. This method is not thread safe.
@return the value associated with the instance after it is incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableByte.java
| 302
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getManifest
|
private Manifest getManifest(Archive archive) {
try {
return (archive != null) ? archive.getManifest() : null;
}
catch (IOException ex) {
return null;
}
}
|
Create a new {@link LaunchedClassLoader} instance.
@param exploded if the underlying archive is exploded
@param rootArchive the root archive or {@code null}
@param urls the URLs from which to load classes and resources
@param parent the parent class loader for delegation
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/LaunchedClassLoader.java
| 162
|
[
"archive"
] |
Manifest
| true
| 3
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
remove_docker_networks
|
def remove_docker_networks(networks: list[str] | None = None) -> None:
"""
Removes specified docker networks. If no networks are specified, it removes all networks created by breeze.
Any network with label "com.docker.compose.project=breeze" are removed when no networks are specified.
Errors are ignored (not even printed in the output), so you can safely call it without checking
if the networks exist.
:param networks: list of networks to remove
"""
if networks is None:
run_command(
["docker", "network", "prune", "-f", "-a", "--filter", "label=com.docker.compose.project=breeze"],
check=False,
stderr=DEVNULL,
quiet=True,
)
else:
for network in networks:
run_command(
["docker", "network", "rm", network],
check=False,
stderr=DEVNULL,
quiet=True,
)
|
Removes specified docker networks. If no networks are specified, it removes all networks created by breeze.
Any network with label "com.docker.compose.project=breeze" are removed when no networks are specified.
Errors are ignored (not even printed in the output), so you can safely call it without checking
if the networks exist.
:param networks: list of networks to remove
|
python
|
dev/breeze/src/airflow_breeze/utils/docker_command_utils.py
| 625
|
[
"networks"
] |
None
| true
| 4
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
setCount
|
@CanIgnoreReturnValue
@Override
public Builder<E> setCount(E element, int count) {
super.setCount(element, count);
return this;
}
|
Adds or removes the necessary occurrences of an element such that the element attains the
desired count.
@param element the element to add or remove occurrences of
@param count the desired count of the element in this multiset
@return this {@code Builder} object
@throws NullPointerException if {@code element} is null
@throws IllegalArgumentException if {@code count} is negative
|
java
|
guava/src/com/google/common/collect/ImmutableSortedMultiset.java
| 521
|
[
"element",
"count"
] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
if (obj instanceof MutableShort) {
return value == ((MutableShort) obj).shortValue();
}
return false;
}
|
Compares this object to the specified object. The result is {@code true} if and only if the argument
is not {@code null} and is a {@link MutableShort} object that contains the same {@code short}
value as this object.
@param obj the object to compare with, null returns false.
@return {@code true} if the objects are the same; {@code false} otherwise.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableShort.java
| 180
|
[
"obj"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readInt
|
@CanIgnoreReturnValue // to skip some bytes
@Override
public int readInt() throws IOException {
byte b1 = readAndCheckByte();
byte b2 = readAndCheckByte();
byte b3 = readAndCheckByte();
byte b4 = readAndCheckByte();
return Ints.fromBytes(b4, b3, b2, b1);
}
|
Reads an integer as specified by {@link DataInputStream#readInt()}, except using little-endian
byte order.
@return the next four bytes of the input stream, interpreted as an {@code int} in little-endian
byte order
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
| 114
|
[] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
get_job_description
|
def get_job_description(self, job_id: str) -> dict:
"""
Get job description (using status_retries).
:param job_id: a Batch job ID
:return: an API response for describe jobs
:raises: AirflowException
"""
for retries in range(self.status_retries):
if retries:
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
try:
response = self.get_conn().describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except AirflowException as err:
self.log.warning(err)
except botocore.exceptions.ClientError as err:
# Allow it to retry in case of exceeded quota limit of requests to AWS API
if err.response.get("Error", {}).get("Code") != "TooManyRequestsException":
raise
self.log.warning(
"Ignored TooManyRequestsException error, original message: %r. "
"Please consider to setup retries mode in boto3, "
"check Amazon Provider AWS Connection documentation for more details.",
str(err),
)
raise AirflowException(
f"AWS Batch job ({job_id}) description error: exceeded status_retries ({self.status_retries})"
)
|
Get job description (using status_retries).
:param job_id: a Batch job ID
:return: an API response for describe jobs
:raises: AirflowException
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_client.py
| 391
|
[
"self",
"job_id"
] |
dict
| true
| 4
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
getResource
|
Resource getResource(String location) {
validateNonPattern(location);
location = StringUtils.cleanPath(location);
if (!ResourceUtils.isUrl(location)) {
location = ResourceUtils.FILE_URL_PREFIX + location;
}
return this.resourceLoader.getResource(location);
}
|
Get a single resource from a non-pattern location.
@param location the location
@return the resource
@see #isPattern(String)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/LocationResourceLoader.java
| 74
|
[
"location"
] |
Resource
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
indexesOf
|
public static BitSet indexesOf(final short[] array, final short valueToFind) {
return indexesOf(array, valueToFind, 0);
}
|
Finds the indices of the given value in the array.
<p>This method returns an empty BitSet for a {@code null} input array.</p>
@param array the array to search for the object, may be {@code null}.
@param valueToFind the value to find.
@return a BitSet of all the indices of the value within the array,
an empty BitSet if not found or {@code null} array input.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 2,324
|
[
"array",
"valueToFind"
] |
BitSet
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
list_mode_options
|
def list_mode_options(
mode: Optional[str] = None, dynamic: Optional[bool] = None
) -> dict[str, Any]:
r"""Returns a dictionary describing the optimizations that each of the available
modes passed to `torch.compile()` performs.
Args:
mode (str, optional): The mode to return the optimizations for.
If None, returns optimizations for all modes
dynamic (bool, optional): Whether dynamic shape is enabled.
Example::
>>> torch._inductor.list_mode_options()
"""
mode_options: dict[str, dict[str, bool]] = {
"default": {},
# lite backend for opt-in optimizations
"lite": lite_mode_options,
# enable cudagraphs
"reduce-overhead": {
"triton.cudagraphs": True,
},
# enable max-autotune
"max-autotune-no-cudagraphs": {
"max_autotune": True,
"coordinate_descent_tuning": True,
},
# enable max-autotune
# enable cudagraphs
"max-autotune": {
"max_autotune": True,
"triton.cudagraphs": True,
"coordinate_descent_tuning": True,
},
}
try:
return mode_options[mode] if mode else mode_options
except KeyError as e:
raise RuntimeError(
f"Unrecognized mode={mode}, should be one of: {', '.join(mode_options.keys())}"
) from e
|
r"""Returns a dictionary describing the optimizations that each of the available
modes passed to `torch.compile()` performs.
Args:
mode (str, optional): The mode to return the optimizations for.
If None, returns optimizations for all modes
dynamic (bool, optional): Whether dynamic shape is enabled.
Example::
>>> torch._inductor.list_mode_options()
|
python
|
torch/_inductor/__init__.py
| 337
|
[
"mode",
"dynamic"
] |
dict[str, Any]
| true
| 2
| 8
|
pytorch/pytorch
| 96,034
|
google
| false
|
substituteExpression
|
function substituteExpression(node: Expression): Expression {
if (isIdentifier(node)) {
return substituteExpressionIdentifier(node);
}
return node;
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 2,060
|
[
"node"
] | true
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getNextToProcess
|
private @Nullable ConfigDataEnvironmentContributor getNextToProcess(ConfigDataEnvironmentContributors contributors,
@Nullable ConfigDataActivationContext activationContext, ImportPhase importPhase) {
for (ConfigDataEnvironmentContributor contributor : contributors.getRoot()) {
if (contributor.getKind() == Kind.UNBOUND_IMPORT
|| isActiveWithUnprocessedImports(activationContext, importPhase, contributor)) {
return contributor;
}
}
return null;
}
|
Processes imports from all active contributors and return a new
{@link ConfigDataEnvironmentContributors} instance.
@param importer the importer used to import {@link ConfigData}
@param activationContext the current activation context or {@code null} if the
context has not yet been created
@return a {@link ConfigDataEnvironmentContributors} instance with all relevant
imports have been processed
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributors.java
| 156
|
[
"contributors",
"activationContext",
"importPhase"
] |
ConfigDataEnvironmentContributor
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
join
|
public static String join(final Object[] array, final String delimiter, final int startIndex, final int endIndex) {
return array != null ? Streams.of(array).skip(startIndex).limit(Math.max(0, endIndex - startIndex))
.collect(LangCollectors.joining(delimiter, EMPTY, EMPTY, ObjectUtils::toString)) : null;
}
|
Joins the elements of the provided array into a single String containing the provided list of elements.
<p>
No delimiter is added before or after the list. A {@code null} separator is the same as an empty String (""). Null objects or empty strings within the
array are represented by empty strings.
</p>
<pre>
StringUtils.join(null, *, *, *) = null
StringUtils.join([], *, *, *) = ""
StringUtils.join([null], *, *, *) = ""
StringUtils.join(["a", "b", "c"], "--", 0, 3) = "a--b--c"
StringUtils.join(["a", "b", "c"], "--", 1, 3) = "b--c"
StringUtils.join(["a", "b", "c"], "--", 2, 3) = "c"
StringUtils.join(["a", "b", "c"], "--", 2, 2) = ""
StringUtils.join(["a", "b", "c"], null, 0, 3) = "abc"
StringUtils.join(["a", "b", "c"], "", 0, 3) = "abc"
StringUtils.join([null, "", "a"], ',', 0, 3) = ",,a"
</pre>
@param array the array of values to join together, may be null.
@param delimiter the separator character to use, null treated as "".
@param startIndex the first index to start joining from.
@param endIndex the index to stop joining from (exclusive).
@return the joined String, {@code null} if null array input; or the empty string if {@code endIndex - startIndex <= 0}. The number of joined entries is
given by {@code endIndex - startIndex}.
@throws ArrayIndexOutOfBoundsException ife<br>
{@code startIndex < 0} or <br>
{@code startIndex >= array.length()} or <br>
{@code endIndex < 0} or <br>
{@code endIndex > array.length()}
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 4,599
|
[
"array",
"delimiter",
"startIndex",
"endIndex"
] |
String
| true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
polydiv
|
def polydiv(c1, c2):
"""
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymulx, polymul, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1, 2, 3)
>>> c2 = (3, 2, 1)
>>> P.polydiv(c1, c2)
(array([3.]), array([-8., -4.]))
>>> P.polydiv(c2, c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError # FIXME: add message with details to exception
# note: this is more efficient than `pu._div(polymul, c1, c2)`
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1] * 0, c1
elif lc2 == 1:
return c1 / c2[-1], c1[:1] * 0
else:
dlen = lc1 - lc2
scl = c2[-1]
c2 = c2[:-1] / scl
i = dlen
j = lc1 - 1
while i >= 0:
c1[i:j] -= c2 * c1[j]
i -= 1
j -= 1
return c1[j + 1:] / scl, pu.trimseq(c1[:j + 1])
|
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymulx, polymul, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1, 2, 3)
>>> c2 = (3, 2, 1)
>>> P.polydiv(c1, c2)
(array([3.]), array([-8., -4.]))
>>> P.polydiv(c2, c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary
|
python
|
numpy/polynomial/polynomial.py
| 368
|
[
"c1",
"c2"
] | false
| 6
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getNotFoundAction
|
private ConfigDataNotFoundAction getNotFoundAction(ConfigDataLocation location,
@Nullable ConfigDataResource resource) {
if (location.isOptional() || (resource != null && resource.isOptional())) {
return ConfigDataNotFoundAction.IGNORE;
}
return this.notFoundAction;
}
|
Resolve and load the given list of locations, filtering any that have been
previously loaded.
@param activationContext the activation context
@param locationResolverContext the location resolver context
@param loaderContext the loader context
@param locations the locations to resolve
@return a map of the loaded locations and data
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataImporter.java
| 157
|
[
"location",
"resource"
] |
ConfigDataNotFoundAction
| true
| 4
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createDatabaseLoader
|
private static CheckedSupplier<Reader, IOException> createDatabaseLoader(Path databasePath) {
return () -> {
Reader.FileMode mode = LOAD_DATABASE_ON_HEAP ? Reader.FileMode.MEMORY : Reader.FileMode.MEMORY_MAPPED;
return new Reader(pathToFile(databasePath), mode, NoCache.getInstance());
};
}
|
Prepares the database for lookup by incrementing the usage count.
If the usage count is already negative, it indicates that the database is being closed,
and this method will return false to indicate that no lookup should be performed.
@return true if the database is ready for lookup, false if it is being closed
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
| 166
|
[
"databasePath"
] | true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
kron
|
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``,
the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> import numpy as np
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, ..., 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, ..., 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
# Working:
# 1. Equalise the shapes by prepending smaller array with 1s
# 2. Expand shapes of both the arrays by adding new axes at
# odd positions for 1st array and even positions for 2nd
# 3. Compute the product of the modified array
# 4. The inner most array elements now contain the rows of
# the Kronecker product
# 5. Reshape the result to kron's shape, which is same as
# product of shapes of the two arrays.
b = asanyarray(b)
a = array(a, copy=None, subok=True, ndmin=b.ndim)
is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
ndb, nda = b.ndim, a.ndim
nd = max(ndb, nda)
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
# Equalise the shapes by prepending smaller one with 1s
as_ = (1,) * max(0, ndb - nda) + as_
bs = (1,) * max(0, nda - ndb) + bs
# Insert empty dimensions
a_arr = expand_dims(a, axis=tuple(range(ndb - nda)))
b_arr = expand_dims(b, axis=tuple(range(nda - ndb)))
# Compute the product
a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2)))
b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2)))
# In case of `mat`, convert result to `array`
result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
# Reshape back
result = result.reshape(_nx.multiply(as_, bs))
return result if not is_any_mat else matrix(result, copy=False)
|
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``,
the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> import numpy as np
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, ..., 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, ..., 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
|
python
|
numpy/lib/_shape_base_impl.py
| 1,039
|
[
"a",
"b"
] | false
| 7
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
toString
|
@Override
public String toString() {
return "Deprecation{level='" + this.level + '\'' + ", reason='" + this.reason + '\'' + ", replacement='"
+ this.replacement + '\'' + '}';
}
|
The full name of the property that replaces the related deprecated property, if
any.
@return the replacement property name
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/Deprecation.java
| 91
|
[] |
String
| true
| 1
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
next
|
private ParsePosition next(final ParsePosition pos) {
pos.setIndex(pos.getIndex() + 1);
return pos;
}
|
Convenience method to advance parse position by 1
@param pos ParsePosition
@return {@code pos}
|
java
|
src/main/java/org/apache/commons/lang3/text/ExtendedMessageFormat.java
| 362
|
[
"pos"
] |
ParsePosition
| true
| 1
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
sniffOnFailure
|
public void sniffOnFailure() {
// sniffOnFailure does nothing until the initial sniffing round has been completed
if (initialized.get()) {
/*
* If sniffing is already running, there is no point in scheduling another round right after the current one.
* Concurrent calls may be checking the same task state, but only the first skip call on the same task returns true.
* The task may also get replaced while we check its state, in which case calling skip on it returns false.
*/
if (this.nextScheduledTask.skip()) {
/*
* We do not keep track of this future as the task will immediately run and we don't intend to cancel it
* due to concurrent sniffOnFailure runs. Effectively the previous (now cancelled or skipped) task will stay
* assigned to nextTask till this onFailure round gets run and schedules its corresponding afterFailure round.
*/
scheduler.schedule(new Task(sniffAfterFailureDelayMillis), 0L);
}
}
}
|
Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs
it will also schedule a new round after sniffAfterFailureDelay ms.
|
java
|
client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
| 96
|
[] |
void
| true
| 3
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
isRegistered
|
private boolean isRegistered(@Nullable Throwable ex) {
if (ex == null) {
return false;
}
if (this.loggedExceptions.contains(ex)) {
return true;
}
if (ex instanceof InvocationTargetException) {
return isRegistered(ex.getCause());
}
return false;
}
|
Check if the exception is a log configuration message, i.e. the log call might not
have actually output anything.
@param ex the source exception
@return {@code true} if the exception contains a log configuration message
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringBootExceptionHandler.java
| 108
|
[
"ex"
] | true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
__init__
|
def __init__(self: Self, name: str | None = None) -> None:
"""
Initialize an on-disk cache instance.
Args:
name (str | None, optional): The name of the cache directory. If None,
defaults to "on_disk_cache".
"""
self.name = name or "on_disk_cache"
|
Initialize an on-disk cache instance.
Args:
name (str | None, optional): The name of the cache directory. If None,
defaults to "on_disk_cache".
|
python
|
torch/_inductor/cache.py
| 261
|
[
"self",
"name"
] |
None
| true
| 2
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
printoptions
|
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `set_printoptions` for the full description of
available options.
Examples
--------
>>> import numpy as np
>>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
... np.array([2.0]) / 3
array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
>>> with np.printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
Notes
-----
These print options apply only to NumPy ndarrays, not to scalars.
**Concurrency note:** see :ref:`text_formatting_options`
"""
token = _set_printoptions(*args, **kwargs)
try:
yield get_printoptions()
finally:
format_options.reset(token)
|
Context manager for setting print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `set_printoptions` for the full description of
available options.
Examples
--------
>>> import numpy as np
>>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
... np.array([2.0]) / 3
array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
>>> with np.printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
Notes
-----
These print options apply only to NumPy ndarrays, not to scalars.
**Concurrency note:** see :ref:`text_formatting_options`
|
python
|
numpy/_core/arrayprint.py
| 398
|
[] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
make_functional_deprecated_v1
|
def make_functional_deprecated_v1(model: nn.Module):
"""make_functional_deprecated_v1(model) -> weights, func, weight_names
Given an nn.Module, make_functional_deprecated_v1 extracts the state (weights)
and returns a functional version of the model, `func`. This makes
it so that it is possible use transforms over the parameters of
`model`.
`func` can be invoked as follows:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, func, _ = make_functional_deprecated_v1(model)
func(weights, (x,))
```
And here is an example of applying the grad transform:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, _, func = make_functional_deprecated_v1(model)
grad_weights = grad(func)(weights, (x,))
```
To put the state back into a model, use `load_state`.
"""
buffers = list(model.buffers())
if len(buffers) > 0:
raise RuntimeError(
"make_functional_deprecated_v1(model): `model` has buffers. Please use "
"make_functional_with_buffers_deprecated_v1(model) instead."
)
weights, descriptors, _ = extract_weights(model)
def fun(weights, data):
mutable_model = copy.deepcopy(model)
load_weights(mutable_model, descriptors, weights)
return mutable_model(*data)
return weights, fun, descriptors
|
make_functional_deprecated_v1(model) -> weights, func, weight_names
Given an nn.Module, make_functional_deprecated_v1 extracts the state (weights)
and returns a functional version of the model, `func`. This makes
it so that it is possible use transforms over the parameters of
`model`.
`func` can be invoked as follows:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, func, _ = make_functional_deprecated_v1(model)
func(weights, (x,))
```
And here is an example of applying the grad transform:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, _, func = make_functional_deprecated_v1(model)
grad_weights = grad(func)(weights, (x,))
```
To put the state back into a model, use `load_state`.
|
python
|
torch/_functorch/make_functional.py
| 171
|
[
"model"
] | true
| 2
| 7.12
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
of
|
static SslManagerBundle of(KeyManagerFactory keyManagerFactory, TrustManagerFactory trustManagerFactory) {
Assert.notNull(keyManagerFactory, "'keyManagerFactory' must not be null");
Assert.notNull(trustManagerFactory, "'trustManagerFactory' must not be null");
return new SslManagerBundle() {
@Override
public KeyManagerFactory getKeyManagerFactory() {
return keyManagerFactory;
}
@Override
public TrustManagerFactory getTrustManagerFactory() {
return trustManagerFactory;
}
};
}
|
Factory method to create a new {@link SslManagerBundle} instance.
@param keyManagerFactory the key manager factory
@param trustManagerFactory the trust manager factory
@return a new {@link SslManagerBundle} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslManagerBundle.java
| 100
|
[
"keyManagerFactory",
"trustManagerFactory"
] |
SslManagerBundle
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
ensureOpen
|
private void ensureOpen() {
if (this.closed) {
throw new IllegalStateException("Zip file closed");
}
if (this.resources.zipContent() == null) {
throw new IllegalStateException("The object is not initialized.");
}
}
|
Return if an entry with the given name exists.
@param name the name to check
@return if the entry exists
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
| 412
|
[] |
void
| true
| 3
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
containsAny
|
@Deprecated
public static boolean containsAny(final CharSequence cs, final CharSequence... searchCharSequences) {
return Strings.CS.containsAny(cs, searchCharSequences);
}
|
Tests if the CharSequence contains any of the CharSequences in the given array.
<p>
A {@code null} {@code cs} CharSequence will return {@code false}. A {@code null} or zero length search array will
return {@code false}.
</p>
<pre>
StringUtils.containsAny(null, *) = false
StringUtils.containsAny("", *) = false
StringUtils.containsAny(*, null) = false
StringUtils.containsAny(*, []) = false
StringUtils.containsAny("abcd", "ab", null) = true
StringUtils.containsAny("abcd", "ab", "cd") = true
StringUtils.containsAny("abc", "d", "abc") = true
</pre>
@param cs The CharSequence to check, may be null.
@param searchCharSequences The array of CharSequences to search for, may be null. Individual CharSequences may be
null as well.
@return {@code true} if any of the search CharSequences are found, {@code false} otherwise.
@since 3.4
@deprecated Use {@link Strings#containsAny(CharSequence, CharSequence...) Strings.CS.containsAny(CharSequence, CharSequence...)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,125
|
[
"cs"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
limit
|
public static <T extends @Nullable Object> Iterator<T> limit(
Iterator<T> iterator, int limitSize) {
checkNotNull(iterator);
checkArgument(limitSize >= 0, "limit is negative");
return new Iterator<T>() {
private int count;
@Override
public boolean hasNext() {
return count < limitSize && iterator.hasNext();
}
@Override
@ParametricNullness
public T next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
count++;
return iterator.next();
}
@Override
public void remove() {
iterator.remove();
}
};
}
|
Returns a view containing the first {@code limitSize} elements of {@code iterator}. If {@code
iterator} contains fewer than {@code limitSize} elements, the returned view contains all of its
elements. The returned iterator supports {@code remove()} if {@code iterator} does.
@param iterator the iterator to limit
@param limitSize the maximum number of elements in the returned iterator
@throws IllegalArgumentException if {@code limitSize} is negative
@since 3.0
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 956
|
[
"iterator",
"limitSize"
] | true
| 3
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return Objects.toString(value);
}
|
Returns the String value of this mutable.
@return the mutable value as a string.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableObject.java
| 123
|
[] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
checkedCast
|
@CanIgnoreReturnValue
public static byte checkedCast(long value) {
checkArgument(value >> Byte.SIZE == 0, "out of range: %s", value);
return (byte) value;
}
|
Returns the {@code byte} value that, when treated as unsigned, is equal to {@code value}, if
possible.
@param value a value between 0 and 255 inclusive
@return the {@code byte} value that, when treated as unsigned, equals {@code value}
@throws IllegalArgumentException if {@code value} is negative or greater than 255
|
java
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
| 96
|
[
"value"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
iterator
|
@Override
public Iterator<ConsumerRecord<K, V>> iterator() {
return new ConcatenatedIterable<>(records.values()).iterator();
}
|
Get the partitions which have records contained in this record set.
@return the set of partitions with data in this record set (may be empty if no data was returned)
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
| 96
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
namePredicate
|
private static <T> Predicate<T> namePredicate(final String name, final Function<T, String> nameGetter) {
return (Predicate<T>) t -> t != null && Objects.equals(nameGetter.apply(t), Objects.requireNonNull(name));
}
|
Waits for the given thread to die for the given duration. Implemented using {@link Thread#join(long, int)}.
@param thread The thread to join.
@param duration How long to wait.
@throws InterruptedException if any thread has interrupted the current thread.
@see Thread#join(long, int)
@since 3.12.0
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 483
|
[
"name",
"nameGetter"
] | true
| 2
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isEnabled
|
protected boolean isEnabled(AnnotationMetadata metadata) {
if (getClass() == AutoConfigurationImportSelector.class) {
return getEnvironment().getProperty(EnableAutoConfiguration.ENABLED_OVERRIDE_PROPERTY, Boolean.class, true);
}
return true;
}
|
Return the {@link AutoConfigurationEntry} based on the {@link AnnotationMetadata}
of the importing {@link Configuration @Configuration} class.
@param annotationMetadata the annotation metadata of the configuration class
@return the auto-configurations that should be imported
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
| 162
|
[
"metadata"
] | true
| 2
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
withNewLineAtEnd
|
default JsonWriter<T> withNewLineAtEnd() {
return withSuffix("\n");
}
|
Return a new {@link JsonWriter} instance that appends a new line after the JSON has
been written.
@return a new {@link JsonWriter} instance that appends a new line after the JSON
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 114
|
[] | true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getNewRequires
|
function getNewRequires(moduleSpecifier: string, quotePreference: QuotePreference, defaultImport: Import | undefined, namedImports: readonly Import[] | undefined, namespaceLikeImport: Import | undefined): RequireVariableStatement | readonly RequireVariableStatement[] {
const quotedModuleSpecifier = makeStringLiteral(moduleSpecifier, quotePreference);
let statements: RequireVariableStatement | readonly RequireVariableStatement[] | undefined;
// const { default: foo, bar, etc } = require('./mod');
if (defaultImport || namedImports?.length) {
const bindingElements = namedImports?.map(({ name, propertyName }) => factory.createBindingElement(/*dotDotDotToken*/ undefined, propertyName, name)) || [];
if (defaultImport) {
bindingElements.unshift(factory.createBindingElement(/*dotDotDotToken*/ undefined, "default", defaultImport.name));
}
const declaration = createConstEqualsRequireDeclaration(factory.createObjectBindingPattern(bindingElements), quotedModuleSpecifier);
statements = combine(statements, declaration);
}
// const foo = require('./mod');
if (namespaceLikeImport) {
const declaration = createConstEqualsRequireDeclaration(namespaceLikeImport.name, quotedModuleSpecifier);
statements = combine(statements, declaration);
}
return Debug.checkDefined(statements);
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 2,096
|
[
"moduleSpecifier",
"quotePreference",
"defaultImport",
"namedImports",
"namespaceLikeImport"
] | true
| 6
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_deserialize_operator_extra_links
|
def _deserialize_operator_extra_links(
cls, encoded_op_links: dict[str, str]
) -> dict[str, XComOperatorLink]:
"""
Deserialize Operator Links if the Classes are registered in Airflow Plugins.
Error is raised if the OperatorLink is not found in Plugins too.
:param encoded_op_links: Serialized Operator Link
:return: De-Serialized Operator Link
"""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.registered_operator_link_classes is None:
raise AirflowException("Can't load plugins")
op_predefined_extra_links = {}
for name, xcom_key in encoded_op_links.items():
# Get the name and xcom_key of the encoded operator and use it to create a XComOperatorLink object
# during deserialization.
#
# Example:
# enc_operator['_operator_extra_links'] =
# {
# 'airflow': 'airflow_link_key',
# 'foo-bar': 'link-key',
# 'no_response': 'key',
# 'raise_error': 'key'
# }
op_predefined_extra_link = XComOperatorLink(name=name, xcom_key=xcom_key)
op_predefined_extra_links.update({op_predefined_extra_link.name: op_predefined_extra_link})
return op_predefined_extra_links
|
Deserialize Operator Links if the Classes are registered in Airflow Plugins.
Error is raised if the OperatorLink is not found in Plugins too.
:param encoded_op_links: Serialized Operator Link
:return: De-Serialized Operator Link
|
python
|
airflow-core/src/airflow/serialization/serialized_objects.py
| 1,782
|
[
"cls",
"encoded_op_links"
] |
dict[str, XComOperatorLink]
| true
| 3
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
toString
|
public static String toString(final Character ch) {
return ch != null ? toString(ch.charValue()) : null;
}
|
Converts the character to a String that contains the one character.
<p>For ASCII 7 bit characters, this uses a cache that will return the
same String object each time.</p>
<p>If {@code null} is passed in, {@code null} will be returned.</p>
<pre>
CharUtils.toString(null) = null
CharUtils.toString(' ') = " "
CharUtils.toString('A') = "A"
</pre>
@param ch the character to convert
@return a String containing the one specified character
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 493
|
[
"ch"
] |
String
| true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
locateGradleResourcesDirectory
|
private File locateGradleResourcesDirectory(File standardAdditionalMetadataLocation) throws FileNotFoundException {
String path = standardAdditionalMetadataLocation.getPath();
int index = path.lastIndexOf(CLASSES_DIRECTORY);
if (index < 0) {
throw new FileNotFoundException();
}
String buildDirectoryPath = path.substring(0, index);
File classOutputLocation = standardAdditionalMetadataLocation.getParentFile().getParentFile();
return new File(buildDirectoryPath, RESOURCES_DIRECTORY + '/' + classOutputLocation.getName());
}
|
Read additional {@link ConfigurationMetadata} for the {@link TypeElement} or
{@code null}.
@param typeElement the type to get additional metadata for
@return additional metadata for the given type or {@code null} if none is present
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 224
|
[
"standardAdditionalMetadataLocation"
] |
File
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
whenComplete
|
public abstract KafkaFuture<T> whenComplete(BiConsumer<? super T, ? super Throwable> action);
|
Returns a new KafkaFuture with the same result or exception as this future, that executes the given action
when this future completes.
When this future is done, the given action is invoked with the result (or null if none) and the exception
(or null if none) of this future as arguments.
The returned future is completed when the action returns.
The supplied action should not throw an exception. However, if it does, the following rules apply:
if this future completed normally but the supplied action throws an exception, then the returned future completes
exceptionally with the supplied action's exception.
Or, if this future completed exceptionally and the supplied action throws an exception, then the returned future
completes exceptionally with this future's exception.
The action may be invoked by the thread that calls {@code whenComplete} or it may be invoked by the thread that
completes the future.
@param action the action to perform
@return the new future
|
java
|
clients/src/main/java/org/apache/kafka/common/KafkaFuture.java
| 141
|
[
"action"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
addAll
|
@CanIgnoreReturnValue
@Override
public Builder<E> addAll(Iterator<? extends E> elements) {
checkNotNull(elements);
while (elements.hasNext()) {
add(elements.next());
}
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableSet}, ignoring duplicate
elements (only the first duplicate element is added).
@param elements the elements to add to the {@code ImmutableSet}
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
android/guava/src/com/google/common/collect/ImmutableSet.java
| 567
|
[
"elements"
] | true
| 2
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
|
poll
|
public boolean poll(Timer timer, boolean waitForJoinGroup) {
maybeUpdateSubscriptionMetadata();
invokeCompletedOffsetCommitCallbacks();
if (subscriptions.hasAutoAssignedPartitions()) {
if (protocol == null) {
throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG +
" to empty while trying to subscribe for group protocol to auto assign partitions");
}
// Always update the heartbeat last poll time so that the heartbeat thread does not leave the
// group proactively due to application inactivity even if (say) the coordinator cannot be found.
pollHeartbeat(timer.currentTimeMs());
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
if (rejoinNeededOrPending()) {
// due to a race condition between the initial metadata fetch and the initial rebalance,
// we need to ensure that the metadata is fresh before joining initially. This ensures
// that we have matched the pattern against the cluster's topics at least once before joining.
if (subscriptions.hasPatternSubscription()) {
// For consumer group that uses pattern-based subscription, after a topic is created,
// any consumer that discovers the topic after metadata refresh can trigger rebalance
// across the entire consumer group. Multiple rebalances can be triggered after one topic
// creation if consumers refresh metadata at vastly different times. We can significantly
// reduce the number of rebalances caused by single topic creation by asking consumer to
// refresh metadata before re-joining the group as long as the refresh backoff time has
// passed.
if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) {
this.metadata.requestUpdate(true);
}
if (!client.ensureFreshMetadata(timer)) {
return false;
}
maybeUpdateSubscriptionMetadata();
}
// if not wait for join group, we would just use a timer of 0
if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) {
// since we may use a different timer in the callee, we'd still need
// to update the original timer's current time after the call
timer.update(time.milliseconds());
return false;
}
}
} else {
// For manually assigned partitions, we do not try to pro-actively lookup coordinator;
// instead we only try to refresh metadata when necessary.
// If connections to all nodes fail, wakeups triggered while attempting to send fetch
// requests result in polls returning immediately, causing a tight loop of polls. Without
// the wakeup, poll() with no channels would block for the timeout, delaying re-connection.
// awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop.
if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) {
client.awaitMetadataUpdate(timer);
}
// if there is pending coordinator requests, ensure they have a chance to be transmitted.
client.pollNoWakeup();
}
maybeAutoCommitOffsetsAsync(timer.currentTimeMs());
return true;
}
|
Poll for coordinator events. This ensures that the coordinator is known and that the consumer
has joined the group (if it is using group management). This also handles periodic offset commits
if they are enabled.
<p>
Returns early if the timeout expires or if waiting on rejoin is not required
@param timer Timer bounding how long this method can block
@param waitForJoinGroup Boolean flag indicating if we should wait until re-join group completes
@throws KafkaException if the rebalance callback throws an exception
@return true iff the operation succeeded
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 511
|
[
"timer",
"waitForJoinGroup"
] | true
| 12
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
newHashSetWithExpectedSize
|
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> HashSet<E> newHashSetWithExpectedSize(
int expectedSize) {
return new HashSet<>(Maps.capacity(expectedSize));
}
|
Returns a new hash set using the smallest initial table size that can hold {@code expectedSize}
elements without resizing. Note that this is not what {@link HashSet#HashSet(int)} does, but it
is what most users want and expect it to do.
<p>This behavior can't be broadly guaranteed, but has been tested with OpenJDK 1.7 and 1.8.
@param expectedSize the number of elements you expect to add to the returned set
@return a new, empty hash set with enough capacity to hold {@code expectedSize} elements
without resizing
@throws IllegalArgumentException if {@code expectedSize} is negative
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 263
|
[
"expectedSize"
] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
compose
|
default FailableLongUnaryOperator<E> compose(final FailableLongUnaryOperator<E> before) {
Objects.requireNonNull(before);
return (final long v) -> applyAsLong(before.applyAsLong(v));
}
|
Returns a composed {@link FailableLongUnaryOperator} like {@link LongUnaryOperator#compose(LongUnaryOperator)}.
@param before the operator to apply before this one.
@return a composed {@link FailableLongUnaryOperator} like {@link LongUnaryOperator#compose(LongUnaryOperator)}.
@throws NullPointerException if before is null.
@see #andThen(FailableLongUnaryOperator)
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongUnaryOperator.java
| 86
|
[
"before"
] | true
| 1
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
lastIndexOf
|
public static int lastIndexOf(final byte[] array, final byte valueToFind, int startIndex) {
if (array == null || startIndex < 0) {
return INDEX_NOT_FOUND;
}
if (startIndex >= array.length) {
startIndex = array.length - 1;
}
for (int i = startIndex; i >= 0; i--) {
if (valueToFind == array[i]) {
return i;
}
}
return INDEX_NOT_FOUND;
}
|
Finds the last index of the given value in the array starting at the given index.
<p>
This method returns {@link #INDEX_NOT_FOUND} ({@code -1}) for a {@code null} input array.
</p>
<p>
A negative startIndex will return {@link #INDEX_NOT_FOUND} ({@code -1}). A startIndex larger than the array length will search from the end of the array.
</p>
@param array the array to traverse for looking for the object, may be {@code null}.
@param valueToFind the value to find.
@param startIndex the start index to traverse backwards from.
@return the last index of the value within the array, {@link #INDEX_NOT_FOUND} ({@code -1}) if not found or {@code null} array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 3,833
|
[
"array",
"valueToFind",
"startIndex"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
sigmoid_kernel
|
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""Compute the sigmoid kernel between X and Y.
.. code-block:: text
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
kernel : ndarray of shape (n_samples_X, n_samples_Y)
Sigmoid kernel between two arrays.
Examples
--------
>>> from sklearn.metrics.pairwise import sigmoid_kernel
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> sigmoid_kernel(X, Y)
array([[0.76, 0.76],
[0.87, 0.93]])
"""
xp, _ = get_namespace(X, Y)
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
# compute tanh in-place for numpy
K = _modify_in_place_if_numpy(xp, xp.tanh, K, out=K)
return K
|
Compute the sigmoid kernel between X and Y.
.. code-block:: text
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
kernel : ndarray of shape (n_samples_X, n_samples_Y)
Sigmoid kernel between two arrays.
Examples
--------
>>> from sklearn.metrics.pairwise import sigmoid_kernel
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> sigmoid_kernel(X, Y)
array([[0.76, 0.76],
[0.87, 0.93]])
|
python
|
sklearn/metrics/pairwise.py
| 1,509
|
[
"X",
"Y",
"gamma",
"coef0"
] | false
| 2
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
append
|
public StrBuilder append(final double value) {
return append(String.valueOf(value));
}
|
Appends a double value to the string builder using {@code String.valueOf}.
@param value the value to append
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 517
|
[
"value"
] |
StrBuilder
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
requestRunOnDemand
|
public void requestRunOnDemand() {
if (isCancelled() || isCompleted()) {
logger.debug("Not requesting downloader to run on demand because task is cancelled or completed");
return;
}
logger.trace("Requesting downloader run on demand");
// If queuedRuns was greater than 0, then either a run is in progress and it will fire off another run when it finishes,
// or a run is scheduled to run as soon as possible and it will include the latest cluster state.
// If it was 0, we set it to 1 to indicate that a run is scheduled to run as soon as possible and schedule it now.
if (queuedRuns.getAndIncrement() == 0) {
logger.trace("Scheduling downloader run on demand");
threadPool.generic().submit(this::runOnDemand);
}
}
|
This method requests that the downloader runs on the latest cluster state, which likely contains a change in the GeoIP metadata.
This method does nothing if this task is cancelled or completed.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/AbstractGeoIpDownloader.java
| 110
|
[] |
void
| true
| 4
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
get_import_status
|
def get_import_status(self, import_arn: str) -> tuple[str, str | None, str | None]:
"""
Get import status from Dynamodb.
:param import_arn: The Amazon Resource Name (ARN) for the import.
:return: Import status, Error code and Error message
"""
self.log.info("Poking for Dynamodb import %s", import_arn)
try:
describe_import = self.client.describe_import(ImportArn=import_arn)
status = describe_import["ImportTableDescription"]["ImportStatus"]
error_code = describe_import["ImportTableDescription"].get("FailureCode")
error_msg = describe_import["ImportTableDescription"].get("FailureMessage")
return status, error_code, error_msg
except ClientError as e:
error_code = e.response.get("Error", {}).get("Code")
if error_code == "ImportNotFoundException":
raise AirflowException("S3 import into Dynamodb job not found.")
raise e
|
Get import status from Dynamodb.
:param import_arn: The Amazon Resource Name (ARN) for the import.
:return: Import status, Error code and Error message
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/dynamodb.py
| 86
|
[
"self",
"import_arn"
] |
tuple[str, str | None, str | None]
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
setActiveTask
|
public <T> CompletableFuture<T> setActiveTask(final CompletableFuture<T> currentTask) {
Objects.requireNonNull(currentTask, "currentTask cannot be null");
pendingTask.getAndUpdate(task -> {
if (task == null) {
return new ActiveFuture(currentTask);
} else if (task instanceof WakeupFuture) {
currentTask.completeExceptionally(new WakeupException());
return null;
} else if (task instanceof DisabledWakeups) {
return task;
}
// last active state is still active
throw new KafkaException("Last active task is still active");
});
return currentTask;
}
|
If there is no pending task, set the pending task active.
If wakeup was called before setting an active task, the current task will complete exceptionally with
WakeupException right away.
If there is an active task, throw exception.
@param currentTask
@param <T>
@return
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java
| 75
|
[
"currentTask"
] | true
| 4
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_concat_same_type
|
def _concat_same_type(cls, to_concat) -> Self:
"""
Concatenate multiple ArrowExtensionArrays.
Parameters
----------
to_concat : sequence of ArrowExtensionArrays
Returns
-------
ArrowExtensionArray
"""
chunks = [array for ea in to_concat for array in ea._pa_array.iterchunks()]
if to_concat[0].dtype == "string":
# StringDtype has no attribute pyarrow_dtype
pa_dtype = pa.large_string()
else:
pa_dtype = to_concat[0].dtype.pyarrow_dtype
arr = pa.chunked_array(chunks, type=pa_dtype)
return to_concat[0]._from_pyarrow_array(arr)
|
Concatenate multiple ArrowExtensionArrays.
Parameters
----------
to_concat : sequence of ArrowExtensionArrays
Returns
-------
ArrowExtensionArray
|
python
|
pandas/core/arrays/arrow/array.py
| 1,812
|
[
"cls",
"to_concat"
] |
Self
| true
| 3
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_get_task_creator
|
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[True, False],
dag_version_id: UUIDType,
) -> Callable[[Operator, Iterable[int]], Iterator[dict[str, Any]] | Iterator[TI]]:
"""
Get the task creator function.
This function also updates the created_counts dictionary with the number of tasks created.
:param created_counts: Dictionary of task_type -> count of created TIs
:param ti_mutation_hook: task_instance_mutation_hook function
:param hook_is_noop: Whether the task_instance_mutation_hook is a noop
"""
if hook_is_noop:
def create_ti_mapping(task: Operator, indexes: Iterable[int]) -> Iterator[dict[str, Any]]:
created_counts[task.task_type] += 1
for map_index in indexes:
yield TI.insert_mapping(
self.run_id, task, map_index=map_index, dag_version_id=dag_version_id
)
creator = create_ti_mapping
else:
def create_ti(task: Operator, indexes: Iterable[int]) -> Iterator[TI]:
for map_index in indexes:
ti = TI(task, run_id=self.run_id, map_index=map_index, dag_version_id=dag_version_id)
ti_mutation_hook(ti)
if ti.operator:
created_counts[ti.operator] += 1
yield ti
creator = create_ti
return creator
|
Get the task creator function.
This function also updates the created_counts dictionary with the number of tasks created.
:param created_counts: Dictionary of task_type -> count of created TIs
:param ti_mutation_hook: task_instance_mutation_hook function
:param hook_is_noop: Whether the task_instance_mutation_hook is a noop
|
python
|
airflow-core/src/airflow/models/dagrun.py
| 1,838
|
[
"self",
"created_counts",
"ti_mutation_hook",
"hook_is_noop",
"dag_version_id"
] |
Callable[[Operator, Iterable[int]], Iterator[dict[str, Any]] | Iterator[TI]]
| true
| 6
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
APOS_ESCAPE
|
public static String[][] APOS_ESCAPE() {
return APOS_ESCAPE.clone();
}
|
Mapping to escape the apostrophe character to its XML character entity.
@return the mapping table.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
| 362
|
[] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
ScopedEventBaseThread
|
ScopedEventBaseThread(ScopedEventBaseThread&& other) = delete;
|
Runs the passed-in function on the event base of the thread.
@param func Function to be run on event base of the thread.
@methodset Operations
|
cpp
|
folly/io/async/ScopedEventBaseThread.h
| 116
|
[] | true
| 2
| 6.8
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
asList
|
public static List<Short> asList(short... backingArray) {
if (backingArray.length == 0) {
return Collections.emptyList();
}
return new ShortArrayAsList(backingArray);
}
|
Returns a fixed-size list backed by the specified array, similar to {@link
Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)}, but any attempt to
set a value to {@code null} will result in a {@link NullPointerException}.
<p>The returned list maintains the values, but not the identities, of {@code Short} objects
written to or read from it. For example, whether {@code list.get(0) == list.get(0)} is true for
the returned list is unspecified.
<p>The returned list is serializable.
@param backingArray the array to back the list
@return a list view of the array
|
java
|
android/guava/src/com/google/common/primitives/Shorts.java
| 618
|
[] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
copyOf
|
@IgnoreJRERequirement // Users will use this only if they're already using streams.
public static ImmutableIntArray copyOf(IntStream stream) {
// Note this uses very different growth behavior from copyOf(Iterable) and the builder.
int[] array = stream.toArray();
return (array.length == 0) ? EMPTY : new ImmutableIntArray(array);
}
|
Returns an immutable array containing all the values from {@code stream}, in order.
@since 33.4.0 (but since 22.0 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/primitives/ImmutableIntArray.java
| 173
|
[
"stream"
] |
ImmutableIntArray
| true
| 2
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
newCall
|
private <K, V> Call newCall(AdminApiDriver<K, V> driver, AdminApiDriver.RequestSpec<K> spec) {
NodeProvider nodeProvider = spec.scope.destinationBrokerId().isPresent() ?
new ConstantNodeIdProvider(spec.scope.destinationBrokerId().getAsInt()) :
new LeastLoadedNodeProvider();
return new Call(spec.name, spec.nextAllowedTryMs, spec.tries, spec.deadlineMs, nodeProvider) {
@Override
AbstractRequest.Builder<?> createRequest(int timeoutMs) {
return spec.request;
}
@Override
void handleResponse(AbstractResponse response) {
long currentTimeMs = time.milliseconds();
driver.onResponse(currentTimeMs, spec, response, this.curNode());
maybeSendRequests(driver, currentTimeMs);
}
@Override
void handleFailure(Throwable throwable) {
long currentTimeMs = time.milliseconds();
driver.onFailure(currentTimeMs, spec, throwable);
maybeSendRequests(driver, currentTimeMs);
}
@Override
void maybeRetry(long currentTimeMs, Throwable throwable) {
if (throwable instanceof DisconnectException) {
// Disconnects are a special case. We want to give the driver a chance
// to retry lookup rather than getting stuck on a node which is down.
// For example, if a partition leader shuts down after our metadata query,
// then we might get a disconnect. We want to try to find the new partition
// leader rather than retrying on the same node.
driver.onFailure(currentTimeMs, spec, throwable);
maybeSendRequests(driver, currentTimeMs);
} else {
super.maybeRetry(currentTimeMs, throwable);
}
}
};
}
|
Forcefully terminates an ongoing transaction for a given transactional ID.
<p>
This API is intended for well-formed but long-running transactions that are known to the
transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows,
where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed.
</p>
@param transactionalId The transactional ID whose active transaction should be forcefully terminated.
@return a {@link TerminateTransactionResult} that can be used to await the operation result.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 5,100
|
[
"driver",
"spec"
] |
Call
| true
| 3
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
updateLastAckedOffset
|
private void updateLastAckedOffset(ProduceResponse.PartitionResponse response, ProducerBatch batch) {
if (response.baseOffset == ProduceResponse.INVALID_OFFSET)
return;
long lastOffset = response.baseOffset + batch.recordCount - 1;
txnPartitionMap.updateLastAckedOffset(batch.topicPartition, isTransactional(), lastOffset);
}
|
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
the lowest sequence number.
@return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
If there are no inflight requests being tracked for this partition, this method will return
RecordBatch.NO_SEQUENCE.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 740
|
[
"response",
"batch"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.