function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
init
|
private static void init() {
init_X86_32Bit();
init_X86_64Bit();
init_IA64_32Bit();
init_IA64_64Bit();
init_PPC_32Bit();
init_PPC_64Bit();
init_Aarch_64Bit();
init_RISCV_32Bit();
init_RISCV_64Bit();
}
|
Gets a {@link Processor} object the given value {@link String}. The {@link String} must be like a value returned by the {@code "os.arch"} system
property.
@param value A {@link String} like a value returned by the {@code os.arch} System Property.
@return A {@link Processor} when it exists, else {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArchUtils.java
| 91
|
[] |
void
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
formatter
|
DocValueFormat formatter() {
return format;
}
|
Returns the normalized value. If no normalised factor has been specified
this method will return {@link #value()}
@return the normalized value
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/Derivative.java
| 60
|
[] |
DocValueFormat
| true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
complement
|
@Override
public RangeSet<C> complement() {
RangeSet<C> result = complement;
return (result == null) ? complement = new Complement() : result;
}
|
Returns a {@code TreeRangeSet} representing the union of the specified ranges.
<p>This is the smallest {@code RangeSet} which encloses each of the specified ranges. An
element will be contained in this {@code RangeSet} if and only if it is contained in at least
one {@code Range} in {@code ranges}.
@since 21.0
|
java
|
android/guava/src/com/google/common/collect/TreeRangeSet.java
| 278
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
write
|
private void write(JarFile sourceJar, AbstractJarWriter writer, PackagedLibraries libraries) throws IOException {
if (isLayered()) {
writer.useLayers(this.layers, this.layersIndex);
}
writer.writeManifest(buildManifest(sourceJar));
writeLoaderClasses(writer);
writer.writeEntries(sourceJar, getEntityTransformer(), libraries.getUnpackHandler(),
libraries.getLibraryLookup());
Map<String, Library> writtenLibraries = libraries.write(writer);
writeNativeImageArgFile(writer, sourceJar, writtenLibraries);
if (isLayered()) {
writeLayerIndex(writer);
}
writeSignatureFileIfNecessary(writtenLibraries, writer);
}
|
Sets if jarmode jars relevant for the packaging should be automatically included.
@param includeRelevantJarModeJars if relevant jars are included
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 202
|
[
"sourceJar",
"writer",
"libraries"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
groups
|
def groups(self) -> list:
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
See Also
--------
HDFStore.get_node : Returns the node with the key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
|
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
See Also
--------
HDFStore.get_node : Returns the node with the key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
|
python
|
pandas/io/pytables.py
| 1,555
|
[
"self"
] |
list
| true
| 5
| 7.28
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
read_spss
|
def read_spss(
path: str | Path,
usecols: Sequence[str] | None = None,
convert_categoricals: bool = True,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
**kwargs: Any,
) -> DataFrame:
"""
Load an SPSS file from the file path, returning a DataFrame.
Parameters
----------
path : str or Path
File path.
usecols : list-like, optional
Return a subset of the columns. If None, return all columns.
convert_categoricals : bool, default is True
Convert categorical columns into pd.Categorical.
dtype_backend : {'numpy_nullable', 'pyarrow'}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed
nullable :class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
**kwargs
Additional keyword arguments that can be passed to :func:`pyreadstat.read_sav`.
.. versionadded:: 3.0
Returns
-------
DataFrame
DataFrame based on the SPSS file.
See Also
--------
read_csv : Read a comma-separated values (csv) file into a pandas DataFrame.
read_excel : Read an Excel file into a pandas DataFrame.
read_sas : Read an SAS file into a pandas DataFrame.
read_orc : Load an ORC object into a pandas DataFrame.
read_feather : Load a feather-format object into a pandas DataFrame.
Examples
--------
>>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP
"""
pyreadstat = import_optional_dependency("pyreadstat")
check_dtype_backend(dtype_backend)
if usecols is not None:
if not is_list_like(usecols):
raise TypeError("usecols must be list-like.")
usecols = list(usecols) # pyreadstat requires a list
df, metadata = pyreadstat.read_sav(
stringify_path(path),
usecols=usecols,
apply_value_formats=convert_categoricals,
**kwargs,
)
df.attrs = metadata.__dict__
if dtype_backend is not lib.no_default:
df = df.convert_dtypes(dtype_backend=dtype_backend)
return df
|
Load an SPSS file from the file path, returning a DataFrame.
Parameters
----------
path : str or Path
File path.
usecols : list-like, optional
Return a subset of the columns. If None, return all columns.
convert_categoricals : bool, default is True
Convert categorical columns into pd.Categorical.
dtype_backend : {'numpy_nullable', 'pyarrow'}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed
nullable :class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
**kwargs
Additional keyword arguments that can be passed to :func:`pyreadstat.read_sav`.
.. versionadded:: 3.0
Returns
-------
DataFrame
DataFrame based on the SPSS file.
See Also
--------
read_csv : Read a comma-separated values (csv) file into a pandas DataFrame.
read_excel : Read an Excel file into a pandas DataFrame.
read_sas : Read an SAS file into a pandas DataFrame.
read_orc : Load an ORC object into a pandas DataFrame.
read_feather : Load a feather-format object into a pandas DataFrame.
Examples
--------
>>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP
|
python
|
pandas/io/spss.py
| 27
|
[
"path",
"usecols",
"convert_categoricals",
"dtype_backend"
] |
DataFrame
| true
| 4
| 7.92
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
nextTokenCanFollowModifier
|
function nextTokenCanFollowModifier() {
switch (token()) {
case SyntaxKind.ConstKeyword:
// 'const' is only a modifier if followed by 'enum'.
return nextToken() === SyntaxKind.EnumKeyword;
case SyntaxKind.ExportKeyword:
nextToken();
if (token() === SyntaxKind.DefaultKeyword) {
return lookAhead(nextTokenCanFollowDefaultKeyword);
}
if (token() === SyntaxKind.TypeKeyword) {
return lookAhead(nextTokenCanFollowExportModifier);
}
return canFollowExportModifier();
case SyntaxKind.DefaultKeyword:
return nextTokenCanFollowDefaultKeyword();
case SyntaxKind.StaticKeyword:
nextToken();
return canFollowModifier();
case SyntaxKind.GetKeyword:
case SyntaxKind.SetKeyword:
nextToken();
return canFollowGetOrSetKeyword();
default:
return nextTokenIsOnSameLineAndCanFollowModifier();
}
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 2,766
|
[] | false
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
mean
|
function mean(array) {
return baseMean(array, identity);
}
|
Computes the mean of the values in `array`.
@static
@memberOf _
@since 4.0.0
@category Math
@param {Array} array The array to iterate over.
@returns {number} Returns the mean.
@example
_.mean([4, 2, 8, 6]);
// => 5
|
javascript
|
lodash.js
| 16,464
|
[
"array"
] | false
| 1
| 6.4
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
loadBeanDefinitions
|
public int loadBeanDefinitions(String location, @Nullable Set<Resource> actualResources) throws BeanDefinitionStoreException {
ResourceLoader resourceLoader = getResourceLoader();
if (resourceLoader == null) {
throw new BeanDefinitionStoreException(
"Cannot load bean definitions from location [" + location + "]: no ResourceLoader available");
}
if (resourceLoader instanceof ResourcePatternResolver resourcePatternResolver) {
// Resource pattern matching available.
try {
Resource[] resources = resourcePatternResolver.getResources(location);
int count = loadBeanDefinitions(resources);
if (actualResources != null) {
Collections.addAll(actualResources, resources);
}
if (logger.isTraceEnabled()) {
logger.trace("Loaded " + count + " bean definitions from location pattern [" + location + "]");
}
return count;
}
catch (IOException ex) {
throw new BeanDefinitionStoreException(
"Could not resolve bean definition resource pattern [" + location + "]", ex);
}
}
else {
// Can only load single resources by absolute URL.
Resource resource = resourceLoader.getResource(location);
int count = loadBeanDefinitions(resource);
if (actualResources != null) {
actualResources.add(resource);
}
if (logger.isTraceEnabled()) {
logger.trace("Loaded " + count + " bean definitions from location [" + location + "]");
}
return count;
}
}
|
Load bean definitions from the specified resource location.
<p>The location can also be a location pattern, provided that the
ResourceLoader of this bean definition reader is a ResourcePatternResolver.
@param location the resource location, to be loaded with the ResourceLoader
(or ResourcePatternResolver) of this bean definition reader
@param actualResources a Set to be filled with the actual Resource objects
that have been resolved during the loading process. May be {@code null}
to indicate that the caller is not interested in those Resource objects.
@return the number of bean definitions found
@throws BeanDefinitionStoreException in case of loading or parsing errors
@see #getResourceLoader()
@see #loadBeanDefinitions(org.springframework.core.io.Resource)
@see #loadBeanDefinitions(org.springframework.core.io.Resource[])
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanDefinitionReader.java
| 205
|
[
"location",
"actualResources"
] | true
| 8
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getNestedLibraryTime
|
private long getNestedLibraryTime(Library library) {
try {
try (JarInputStream jarStream = new JarInputStream(library.openStream())) {
JarEntry entry = jarStream.getNextJarEntry();
while (entry != null) {
if (!entry.isDirectory()) {
return entry.getTime();
}
entry = jarStream.getNextJarEntry();
}
}
}
catch (Exception ex) {
// Ignore and just use the library timestamp
}
return library.getLastModified();
}
|
Write a simple index file containing the specified UTF-8 lines.
@param location the location of the index file
@param lines the lines to write
@throws IOException if the write fails
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/AbstractJarWriter.java
| 185
|
[
"library"
] | true
| 4
| 7.2
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
estimateMax
|
public static OptionalDouble estimateMax(
ZeroBucket zeroBucket,
ExponentialHistogram.Buckets negativeBuckets,
ExponentialHistogram.Buckets positiveBuckets
) {
int scale = negativeBuckets.iterator().scale();
assert scale == positiveBuckets.iterator().scale();
OptionalLong positiveMaxIndex = positiveBuckets.maxBucketIndex();
if (positiveMaxIndex.isPresent()) {
return OptionalDouble.of(ExponentialScaleUtils.getUpperBucketBoundary(positiveMaxIndex.getAsLong(), scale));
}
if (zeroBucket.count() > 0) {
return OptionalDouble.of(zeroBucket.zeroThreshold());
}
BucketIterator negativeBucketsIt = negativeBuckets.iterator();
if (negativeBucketsIt.hasNext()) {
return OptionalDouble.of(-ExponentialScaleUtils.getLowerBucketBoundary(negativeBucketsIt.peekIndex(), scale));
}
return OptionalDouble.empty();
}
|
Estimates the maximum value of the histogram based on the populated buckets.
The returned value is guaranteed to be greater than or equal to the exact maximum value of the histogram values.
If the histogram is empty, an empty Optional is returned.
<p>
Note that this method can return +-Infinity if the histogram bucket boundaries are not representable in a double.
@param zeroBucket the zero bucket of the histogram
@param negativeBuckets the negative buckets of the histogram
@param positiveBuckets the positive buckets of the histogram
@return the estimated minimum
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramUtils.java
| 118
|
[
"zeroBucket",
"negativeBuckets",
"positiveBuckets"
] |
OptionalDouble
| true
| 4
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
downgrade
|
def downgrade():
"""Unapply Add run_id to Log and increase Log event name length."""
with op.batch_alter_table("log") as batch_op:
batch_op.drop_column("run_id")
conn = op.get_bind()
if conn.dialect.name == "mssql":
with op.batch_alter_table("log") as batch_op:
batch_op.drop_index("idx_log_event")
batch_op.alter_column("event", type_=sa.String(30))
batch_op.create_index("idx_log_event", ["event"])
else:
with op.batch_alter_table("log") as batch_op:
batch_op.alter_column("event", type_=sa.String(30))
|
Unapply Add run_id to Log and increase Log event name length.
|
python
|
airflow-core/src/airflow/migrations/versions/0010_2_9_0_add_run_id_to_audit_log_table_and_change_event_name_length.py
| 53
|
[] | false
| 3
| 6.08
|
apache/airflow
| 43,597
|
unknown
| false
|
|
_accumulate
|
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> ArrowExtensionArray | ExtensionArray:
"""
Return an ExtensionArray performing an accumulation operation.
The underlying data type might change.
Parameters
----------
name : str
Name of the function, supported values are:
- cummin
- cummax
- cumsum
- cumprod
skipna : bool, default True
If True, skip NA values.
**kwargs
Additional keyword arguments passed to the accumulation function.
Currently, there is no supported kwarg.
Returns
-------
array
Raises
------
NotImplementedError : subclass does not define accumulations
"""
if is_string_dtype(self):
return self._str_accumulate(name=name, skipna=skipna, **kwargs)
pyarrow_name = {
"cummax": "cumulative_max",
"cummin": "cumulative_min",
"cumprod": "cumulative_prod_checked",
"cumsum": "cumulative_sum_checked",
}.get(name, name)
pyarrow_meth = getattr(pc, pyarrow_name, None)
if pyarrow_meth is None:
return super()._accumulate(name, skipna=skipna, **kwargs)
data_to_accum = self._pa_array
pa_dtype = data_to_accum.type
convert_to_int = (
pa.types.is_temporal(pa_dtype) and name in ["cummax", "cummin"]
) or (pa.types.is_duration(pa_dtype) and name == "cumsum")
if convert_to_int:
if pa_dtype.bit_width == 32:
data_to_accum = data_to_accum.cast(pa.int32())
else:
data_to_accum = data_to_accum.cast(pa.int64())
try:
result = pyarrow_meth(data_to_accum, skip_nulls=skipna, **kwargs)
except pa.ArrowNotImplementedError as err:
msg = f"operation '{name}' not supported for dtype '{self.dtype}'"
raise TypeError(msg) from err
if convert_to_int:
result = result.cast(pa_dtype)
return self._from_pyarrow_array(result)
|
Return an ExtensionArray performing an accumulation operation.
The underlying data type might change.
Parameters
----------
name : str
Name of the function, supported values are:
- cummin
- cummax
- cumsum
- cumprod
skipna : bool, default True
If True, skip NA values.
**kwargs
Additional keyword arguments passed to the accumulation function.
Currently, there is no supported kwarg.
Returns
-------
array
Raises
------
NotImplementedError : subclass does not define accumulations
|
python
|
pandas/core/arrays/arrow/array.py
| 1,833
|
[
"self",
"name",
"skipna"
] |
ArrowExtensionArray | ExtensionArray
| true
| 10
| 6.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
iter_file_metadata
|
def iter_file_metadata(
self,
prefix: str,
bucket_name: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> Iterator:
"""
Yield metadata objects from a bucket under a prefix.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param prefix: a key prefix
:param bucket_name: the name of the bucket
:param page_size: pagination size
:param max_items: maximum items to return
:return: an Iterator of metadata of objects
"""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("list_objects_v2")
params = {
"Bucket": bucket_name,
"Prefix": prefix,
"PaginationConfig": config,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
response = paginator.paginate(**params)
for page in response:
if "Contents" in page:
yield from page["Contents"]
|
Yield metadata objects from a bucket under a prefix.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param prefix: a key prefix
:param bucket_name: the name of the bucket
:param page_size: pagination size
:param max_items: maximum items to return
:return: an Iterator of metadata of objects
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 957
|
[
"self",
"prefix",
"bucket_name",
"page_size",
"max_items"
] |
Iterator
| true
| 4
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
newLinkedHashSet
|
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> LinkedHashSet<E> newLinkedHashSet() {
return new LinkedHashSet<>();
}
|
Creates a <i>mutable</i>, empty {@code LinkedHashSet} instance.
<p><b>Note:</b> if mutability is not required, use {@link ImmutableSet#of()} instead.
<p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
use the {@code LinkedHashSet} constructor directly, taking advantage of <a
href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
syntax</a>.
@return a new, empty {@code LinkedHashSet}
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 316
|
[] | true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
toJsonString
|
default String toJsonString() {
try {
StringBuilder stringBuilder = new StringBuilder();
to(stringBuilder);
return stringBuilder.toString();
}
catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
|
Write the JSON to a {@link String}.
@return the JSON string
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/WritableJson.java
| 53
|
[] |
String
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
pickle_flatten
|
def pickle_flatten(
obj: object, cls: type[T] | tuple[type[T], ...]
) -> tuple[list[T], FlattenRest]:
"""
Use the pickle machinery to extract objects out of an arbitrary container.
Unlike regular ``pickle.dumps``, this function always succeeds.
Parameters
----------
obj : object
The object to pickle.
cls : type | tuple[type, ...]
One or multiple classes to extract from the object.
The instances of these classes inside ``obj`` will not be pickled.
Returns
-------
instances : list[cls]
All instances of ``cls`` found inside ``obj`` (not pickled).
rest
Opaque object containing the pickled bytes plus all other objects where
``__reduce__`` / ``__reduce_ex__`` is either not implemented or raised.
These are unpickleable objects, types, modules, and functions.
This object is *typically* hashable save for fairly exotic objects
that are neither pickleable nor hashable.
This object is pickleable if everything except ``instances`` was pickleable
in the input object.
See Also
--------
pickle_unflatten : Reverse function.
Examples
--------
>>> class A:
... def __repr__(self):
... return "<A>"
>>> class NS:
... def __repr__(self):
... return "<NS>"
... def __reduce__(self):
... assert False, "not serializable"
>>> obj = {1: A(), 2: [A(), NS(), A()]}
>>> instances, rest = pickle_flatten(obj, A)
>>> instances
[<A>, <A>, <A>]
>>> pickle_unflatten(instances, rest)
{1: <A>, 2: [<A>, <NS>, <A>]}
This can be also used to swap inner objects; the only constraint is that
the number of objects in and out must be the same:
>>> pickle_unflatten(["foo", "bar", "baz"], rest)
{1: "foo", 2: ["bar", <NS>, "baz"]}
"""
instances: list[T] = []
rest: list[object] = []
class Pickler(pickle.Pickler): # numpydoc ignore=GL08
"""
Use the `pickle.Pickler.persistent_id` hook to extract objects.
"""
@override
def persistent_id(
self, obj: object
) -> Literal[0, 1, None]: # numpydoc ignore=GL08
if isinstance(obj, cls):
instances.append(obj) # type: ignore[arg-type]
return 0
typ_ = type(obj)
if typ_ in _BASIC_PICKLED_TYPES: # No subclasses!
# If obj is a collection, recursively descend inside it
return None
if typ_ in _BASIC_REST_TYPES:
rest.append(obj)
return 1
try:
# Note: a class that defines __slots__ without defining __getstate__
# cannot be pickled with __reduce__(), but can with __reduce_ex__(5)
_ = obj.__reduce_ex__(pickle.HIGHEST_PROTOCOL)
except Exception: # pylint: disable=broad-exception-caught
rest.append(obj)
return 1
# Object can be pickled. Let the Pickler recursively descend inside it.
return None
f = io.BytesIO()
p = Pickler(f, protocol=pickle.HIGHEST_PROTOCOL)
p.dump(obj)
return instances, (f.getvalue(), *rest)
|
Use the pickle machinery to extract objects out of an arbitrary container.
Unlike regular ``pickle.dumps``, this function always succeeds.
Parameters
----------
obj : object
The object to pickle.
cls : type | tuple[type, ...]
One or multiple classes to extract from the object.
The instances of these classes inside ``obj`` will not be pickled.
Returns
-------
instances : list[cls]
All instances of ``cls`` found inside ``obj`` (not pickled).
rest
Opaque object containing the pickled bytes plus all other objects where
``__reduce__`` / ``__reduce_ex__`` is either not implemented or raised.
These are unpickleable objects, types, modules, and functions.
This object is *typically* hashable save for fairly exotic objects
that are neither pickleable nor hashable.
This object is pickleable if everything except ``instances`` was pickleable
in the input object.
See Also
--------
pickle_unflatten : Reverse function.
Examples
--------
>>> class A:
... def __repr__(self):
... return "<A>"
>>> class NS:
... def __repr__(self):
... return "<NS>"
... def __reduce__(self):
... assert False, "not serializable"
>>> obj = {1: A(), 2: [A(), NS(), A()]}
>>> instances, rest = pickle_flatten(obj, A)
>>> instances
[<A>, <A>, <A>]
>>> pickle_unflatten(instances, rest)
{1: <A>, 2: [<A>, <NS>, <A>]}
This can be also used to swap inner objects; the only constraint is that
the number of objects in and out must be the same:
>>> pickle_unflatten(["foo", "bar", "baz"], rest)
{1: "foo", 2: ["bar", <NS>, "baz"]}
|
python
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
| 362
|
[
"obj",
"cls"
] |
tuple[list[T], FlattenRest]
| true
| 4
| 8.4
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
is_dataclass
|
def is_dataclass(item: object) -> bool:
"""
Checks if the object is a data-class instance
Parameters
----------
item : object
Returns
--------
is_dataclass : bool
True if the item is an instance of a data-class,
will return false if you pass the data class itself
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> is_dataclass(Point)
False
>>> is_dataclass(Point(0, 2))
True
"""
try:
import dataclasses
return dataclasses.is_dataclass(item) and not isinstance(item, type)
except ImportError:
return False
|
Checks if the object is a data-class instance
Parameters
----------
item : object
Returns
--------
is_dataclass : bool
True if the item is an instance of a data-class,
will return false if you pass the data class itself
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> is_dataclass(Point)
False
>>> is_dataclass(Point(0, 2))
True
|
python
|
pandas/core/dtypes/inference.py
| 485
|
[
"item"
] |
bool
| true
| 2
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
commitAsyncExceptionForError
|
private Throwable commitAsyncExceptionForError(Throwable error) {
if (error instanceof RetriableException) {
return new RetriableCommitFailedException(error);
}
return error;
}
|
Commit offsets, retrying on expected retriable errors while the retry timeout hasn't expired.
@param offsets Offsets to commit
@param deadlineMs Time until which the request will be retried if it fails with
an expected retriable error.
@return Future that will complete when a successful response
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 496
|
[
"error"
] |
Throwable
| true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
afterPropertiesSet
|
@Override
public void afterPropertiesSet() {
if (this.fallbackToNoOpCache) {
this.cacheManagers.add(new NoOpCacheManager());
}
}
|
Indicate whether a {@link NoOpCacheManager} should be added at the end of the delegate list.
In this case, any {@code getCache} requests not handled by the configured CacheManagers will
be automatically handled by the {@link NoOpCacheManager} (and hence never return {@code null}).
|
java
|
spring-context/src/main/java/org/springframework/cache/support/CompositeCacheManager.java
| 94
|
[] |
void
| true
| 2
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
orderedPermutations
|
public static <E extends Comparable<? super E>> Collection<List<E>> orderedPermutations(
Iterable<E> elements) {
return orderedPermutations(elements, Ordering.natural());
}
|
Returns a {@link Collection} of all the permutations of the specified {@link Iterable}.
<p><i>Notes:</i> This is an implementation of the algorithm for Lexicographical Permutations
Generation, described in Knuth's "The Art of Computer Programming", Volume 4, Chapter 7,
Section 7.2.1.2. The iteration order follows the lexicographical order. This means that the
first permutation will be in ascending order, and the last will be in descending order.
<p>Duplicate elements are considered equal. For example, the list [1, 1] will have only one
permutation, instead of two. This is why the elements have to implement {@link Comparable}.
<p>An empty iterable has only one permutation, which is an empty list.
<p>This method is equivalent to {@code Collections2.orderedPermutations(list,
Ordering.natural())}.
@param elements the original iterable whose elements have to be permuted.
@return an immutable {@link Collection} containing all the different permutations of the
original iterable.
@throws NullPointerException if the specified iterable is null or has any null elements.
@since 12.0
|
java
|
android/guava/src/com/google/common/collect/Collections2.java
| 356
|
[
"elements"
] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
isTrue
|
public static void isTrue(final boolean expression) {
if (!expression) {
throw new IllegalArgumentException(DEFAULT_IS_TRUE_EX_MESSAGE);
}
}
|
Validate that the argument condition is {@code true}; otherwise
throwing an exception. This method is useful when validating according
to an arbitrary boolean expression, such as validating a
primitive number or using your own custom validation expression.
<pre>
Validate.isTrue(i > 0);
Validate.isTrue(myObject.isOk());</pre>
<p>The message of the exception is "The validated expression is
false".</p>
@param expression the boolean expression to check.
@throws IllegalArgumentException if expression is {@code false}.
@see #isTrue(boolean, String, long)
@see #isTrue(boolean, String, double)
@see #isTrue(boolean, String, Object...)
@see #isTrue(boolean, Supplier)
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 496
|
[
"expression"
] |
void
| true
| 2
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nullToEmpty
|
public static boolean[] nullToEmpty(final boolean[] array) {
return isEmpty(array) ? EMPTY_BOOLEAN_ARRAY : array;
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,280
|
[
"array"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
ignore
|
public void ignore(String key) {
used.add(key);
}
|
Called directly after user configs got parsed (and thus default values got set).
This allows to change default values for "secondary defaults" if required.
@param parsedValues unmodifiable map of current configuration
@return a map of updates that should be applied to the configuration (will be validated to prevent bad updates)
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 181
|
[
"key"
] |
void
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
register_dataframe_accessor
|
def register_dataframe_accessor(name: str) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on DataFrame objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for DataFrame.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single DataFrame object
* raises an AttributeError if the DataFrame object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_dataframe_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(
... pandas_obj[col].dtype == "int64" for col in pandas_obj.columns
... ):
... raise AttributeError("All columns must contain integer values only")
... self._obj = pandas_obj
...
... def sum(self):
... return self._obj.sum()
>>> df = pd.DataFrame([[1, 2], ["x", "y"]])
>>> df.int_accessor
Traceback (most recent call last):
...
AttributeError: All columns must contain integer values only.
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> df.int_accessor.sum()
0 4
1 6
dtype: int64
"""
from pandas import DataFrame
return _register_accessor(name, DataFrame)
|
Register a custom accessor on DataFrame objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for DataFrame.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single DataFrame object
* raises an AttributeError if the DataFrame object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_dataframe_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(
... pandas_obj[col].dtype == "int64" for col in pandas_obj.columns
... ):
... raise AttributeError("All columns must contain integer values only")
... self._obj = pandas_obj
...
... def sum(self):
... return self._obj.sum()
>>> df = pd.DataFrame([[1, 2], ["x", "y"]])
>>> df.int_accessor
Traceback (most recent call last):
...
AttributeError: All columns must contain integer values only.
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> df.int_accessor.sum()
0 4
1 6
dtype: int64
|
python
|
pandas/core/accessor.py
| 326
|
[
"name"
] |
Callable[[TypeT], TypeT]
| true
| 1
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toString
|
public static String toString(final Object obj) {
return Objects.toString(obj, StringUtils.EMPTY);
}
|
Gets the {@code toString()} of an {@link Object} or the empty string ({@code ""}) if the input is {@code null}.
<pre>
ObjectUtils.toString(null) = ""
ObjectUtils.toString("") = ""
ObjectUtils.toString("bat") = "bat"
ObjectUtils.toString(Boolean.TRUE) = "true"
</pre>
@param obj the Object to {@code toString()}, may be {@code null}.
@return the input's {@code toString()}, or {@code ""} if the input is {@code null}.
@see Objects#toString(Object)
@see Objects#toString(Object, String)
@see StringUtils#defaultString(String)
@see String#valueOf(Object)
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 1,236
|
[
"obj"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
skew
|
def skew(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series | Any:
"""
Return unbiased skew over requested axis.
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series or scalar
Unbiased skew over requested axis.
See Also
--------
Dataframe.kurt : Returns unbiased kurtosis over requested axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.skew()
0.0
With a DataFrame
>>> df = pd.DataFrame(
... {"a": [1, 2, 3], "b": [2, 3, 4], "c": [1, 3, 5]},
... index=["tiger", "zebra", "cow"],
... )
>>> df
a b c
tiger 1 2 1
zebra 2 3 3
cow 3 4 5
>>> df.skew()
a 0.0
b 0.0
c 0.0
dtype: float64
Using axis=1
>>> df.skew(axis=1)
tiger 1.732051
zebra -1.732051
cow 0.000000
dtype: float64
In this case, `numeric_only` should be set to `True` to avoid
getting an error.
>>> df = pd.DataFrame(
... {"a": [1, 2, 3], "b": ["T", "Z", "X"]}, index=["tiger", "zebra", "cow"]
... )
>>> df.skew(numeric_only=True)
a 0.0
dtype: float64
"""
result = super().skew(
axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs
)
if isinstance(result, Series):
result = result.__finalize__(self, method="skew")
return result
|
Return unbiased skew over requested axis.
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series or scalar
Unbiased skew over requested axis.
See Also
--------
Dataframe.kurt : Returns unbiased kurtosis over requested axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.skew()
0.0
With a DataFrame
>>> df = pd.DataFrame(
... {"a": [1, 2, 3], "b": [2, 3, 4], "c": [1, 3, 5]},
... index=["tiger", "zebra", "cow"],
... )
>>> df
a b c
tiger 1 2 1
zebra 2 3 3
cow 3 4 5
>>> df.skew()
a 0.0
b 0.0
c 0.0
dtype: float64
Using axis=1
>>> df.skew(axis=1)
tiger 1.732051
zebra -1.732051
cow 0.000000
dtype: float64
In this case, `numeric_only` should be set to `True` to avoid
getting an error.
>>> df = pd.DataFrame(
... {"a": [1, 2, 3], "b": ["T", "Z", "X"]}, index=["tiger", "zebra", "cow"]
... )
>>> df.skew(numeric_only=True)
a 0.0
dtype: float64
|
python
|
pandas/core/frame.py
| 13,790
|
[
"self",
"axis",
"skipna",
"numeric_only"
] |
Series | Any
| true
| 2
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
initializerNames
|
public Set<String> initializerNames() {
return Collections.unmodifiableSet(initializers.keySet());
}
|
Returns a set with the names of all {@link BackgroundInitializer} objects managed by the {@link MultiBackgroundInitializer}.
@return an (unmodifiable) set with the names of the managed {@code BackgroundInitializer} objects.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/MultiBackgroundInitializer.java
| 196
|
[] | true
| 1
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
bindAggregate
|
protected abstract @Nullable Object bindAggregate(ConfigurationPropertyName name, Bindable<?> target,
AggregateElementBinder elementBinder);
|
Perform the actual aggregate binding.
@param name the configuration property name to bind
@param target the target to bind
@param elementBinder an element binder
@return the bound result
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/AggregateBinder.java
| 74
|
[
"name",
"target",
"elementBinder"
] |
Object
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
registerFormatters
|
@Override
public void registerFormatters(FormatterRegistry registry) {
DateTimeConverters.registerConverters(registry);
DateTimeFormatter df = getFormatter(Type.DATE);
DateTimeFormatter tf = getFormatter(Type.TIME);
DateTimeFormatter dtf = getFormatter(Type.DATE_TIME);
// Efficient ISO_LOCAL_* variants for printing since they are twice as fast...
registry.addFormatterForFieldType(LocalDate.class,
new TemporalAccessorPrinter(
df == DateTimeFormatter.ISO_DATE ? DateTimeFormatter.ISO_LOCAL_DATE : df),
new TemporalAccessorParser(LocalDate.class, df));
registry.addFormatterForFieldType(LocalTime.class,
new TemporalAccessorPrinter(
tf == DateTimeFormatter.ISO_TIME ? DateTimeFormatter.ISO_LOCAL_TIME : tf),
new TemporalAccessorParser(LocalTime.class, tf));
registry.addFormatterForFieldType(LocalDateTime.class,
new TemporalAccessorPrinter(
dtf == DateTimeFormatter.ISO_DATE_TIME ? DateTimeFormatter.ISO_LOCAL_DATE_TIME : dtf),
new TemporalAccessorParser(LocalDateTime.class, dtf));
registry.addFormatterForFieldType(ZonedDateTime.class,
new TemporalAccessorPrinter(dtf),
new TemporalAccessorParser(ZonedDateTime.class, dtf));
registry.addFormatterForFieldType(OffsetDateTime.class,
new TemporalAccessorPrinter(dtf),
new TemporalAccessorParser(OffsetDateTime.class, dtf));
registry.addFormatterForFieldType(OffsetTime.class,
new TemporalAccessorPrinter(tf),
new TemporalAccessorParser(OffsetTime.class, tf));
registry.addFormatterForFieldType(Instant.class, new InstantFormatter());
registry.addFormatterForFieldType(Period.class, new PeriodFormatter());
registry.addFormatterForFieldType(Duration.class, new DurationFormatter());
registry.addFormatterForFieldType(Year.class, new YearFormatter());
registry.addFormatterForFieldType(Month.class, new MonthFormatter());
registry.addFormatterForFieldType(YearMonth.class, new YearMonthFormatter());
registry.addFormatterForFieldType(MonthDay.class, new MonthDayFormatter());
registry.addFormatterForFieldAnnotation(new Jsr310DateTimeFormatAnnotationFormatterFactory());
registry.addFormatterForFieldAnnotation(new DurationFormatAnnotationFormatterFactory());
}
|
Set the formatter that will be used for objects representing date and time values.
<p>This formatter will be used for {@link LocalDateTime}, {@link ZonedDateTime},
and {@link OffsetDateTime} types. When specified, the
{@link #setDateTimeStyle dateTimeStyle} and
{@link #setUseIsoFormat useIsoFormat} properties will be ignored.
@param formatter the formatter to use
@see #setDateFormatter
@see #setTimeFormatter
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DateTimeFormatterRegistrar.java
| 155
|
[
"registry"
] |
void
| true
| 4
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
generateRandomString
|
function generateRandomString(length: number): string {
const array = new Uint8Array(length);
crypto.getRandomValues(array);
return Array.from(array)
.map(b => b.toString(16).padStart(2, '0'))
.join('')
.substring(0, length);
}
|
Generates a cryptographically secure random string for PKCE code verifier.
@param length The length of the string to generate
@returns A random hex string
|
typescript
|
extensions/github-authentication/src/flows.ts
| 121
|
[
"length"
] | true
| 1
| 6.88
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
stubObject
|
function stubObject() {
return {};
}
|
This method returns a new empty object.
@static
@memberOf _
@since 4.13.0
@category Util
@returns {Object} Returns the new empty object.
@example
var objects = _.times(2, _.stubObject);
console.log(objects);
// => [{}, {}]
console.log(objects[0] === objects[1]);
// => false
|
javascript
|
lodash.js
| 16,190
|
[] | false
| 1
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
request_url
|
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = proxy and scheme != "https"
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith("socks")
url = request.path_url
if url.startswith("//"): # Don't confuse urllib3
url = f"/{url.lstrip('/')}"
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
|
Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
|
python
|
src/requests/adapters.py
| 523
|
[
"self",
"request",
"proxies"
] | false
| 6
| 6.08
|
psf/requests
| 53,586
|
sphinx
| false
|
|
size
|
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
numpy.ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({"a": 1, "b": 2, "c": 3})
>>> s.size
3
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.size
4
"""
return int(np.prod(self.shape))
|
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
numpy.ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({"a": 1, "b": 2, "c": 3})
>>> s.size
3
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.size
4
|
python
|
pandas/core/generic.py
| 669
|
[
"self"
] |
int
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
validate
|
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, str):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = {}
nbempty = 0
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
|
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
|
python
|
numpy/lib/_iotools.py
| 312
|
[
"self",
"names",
"defaultfmt",
"nbfields"
] | false
| 14
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
cos
|
public static double cos(double angle) {
angle = Math.abs(angle);
if (angle > SIN_COS_MAX_VALUE_FOR_INT_MODULO) {
// Faster than using normalizeZeroTwoPi.
angle = remainderTwoPi(angle);
if (angle < 0.0) {
angle += Constants.M_2PI;
}
}
// index: possibly outside tables range.
int index = (int) (angle * SIN_COS_INDEXER + 0.5);
double delta = (angle - index * SIN_COS_DELTA_HI) - index * SIN_COS_DELTA_LO;
// Making sure index is within tables range.
// Last value of each table is the same than first, so we ignore it (tabs size minus one) for modulo.
index &= (SIN_COS_TABS_SIZE - 2); // index % (SIN_COS_TABS_SIZE-1)
double indexCos = cosTab[index];
double indexSin = sinTab[index];
return indexCos + delta * (-indexSin + delta * (-indexCos * ONE_DIV_F2 + delta * (indexSin * ONE_DIV_F3 + delta * indexCos
* ONE_DIV_F4)));
}
|
@param angle Angle in radians.
@return Angle cosine.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java
| 341
|
[
"angle"
] | true
| 3
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_json_to_tensor_description
|
def _json_to_tensor_description(
cls,
json_dict: Optional[str],
tensor_name: Optional[str] = None,
) -> Optional["TensorDescription"]: # type: ignore[name-defined] # noqa: F821
"""Convert JSON string to TensorDescription object.
Args:
json_dict: JSON string representation
tensor_name: Name of the tensor to avoid cache in the same op
Returns:
Optional[TensorDescription]: Reconstructed object or None
"""
if json_dict is None:
return None
tensor_dict = json.loads(json_dict)
from cutlass_library import DataType
from cutlass_library.library import (
ComplexTransform,
LayoutType,
TensorDescription,
)
element = cls._json_to_enum(tensor_dict["element"], DataType)
layout = cls._json_to_enum(tensor_dict["layout"], LayoutType)
alignment = tensor_dict["alignment"]
complex_transform = cls._json_to_enum(
tensor_dict["complex_transform"], ComplexTransform
)
return TensorDescription(element, layout, alignment, complex_transform)
|
Convert JSON string to TensorDescription object.
Args:
json_dict: JSON string representation
tensor_name: Name of the tensor to avoid cache in the same op
Returns:
Optional[TensorDescription]: Reconstructed object or None
|
python
|
torch/_inductor/codegen/cuda/serialization.py
| 425
|
[
"cls",
"json_dict",
"tensor_name"
] |
Optional["TensorDescription"]
| true
| 2
| 7.28
|
pytorch/pytorch
| 96,034
|
google
| false
|
drop
|
def drop(
self,
labels: Index | np.ndarray | Iterable[Hashable],
errors: IgnoreRaise = "raise",
) -> Index:
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like or scalar
Array-like object or a scalar value, representing the labels to be removed
from the Index.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
Raises
------
KeyError
If not all of the labels are found in the selected axis
See Also
--------
Index.dropna : Return Index without NA/NaN values.
Index.drop_duplicates : Return Index with duplicate values removed.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.drop(["a"])
Index(['b', 'c'], dtype='object')
"""
if not isinstance(labels, Index):
# avoid materializing e.g. RangeIndex
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer_for(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask].tolist()} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
|
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like or scalar
Array-like object or a scalar value, representing the labels to be removed
from the Index.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
Raises
------
KeyError
If not all of the labels are found in the selected axis
See Also
--------
Index.dropna : Return Index without NA/NaN values.
Index.drop_duplicates : Return Index with duplicate values removed.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.drop(["a"])
Index(['b', 'c'], dtype='object')
|
python
|
pandas/core/indexes/base.py
| 7,130
|
[
"self",
"labels",
"errors"
] |
Index
| true
| 5
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
generateGetInstanceSupplierMethod
|
private GeneratedMethod generateGetInstanceSupplierMethod(Consumer<MethodSpec.Builder> method) {
return this.generatedMethods.add("getInstanceSupplier", method);
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 398
|
[
"method"
] |
GeneratedMethod
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isCommandInstanceOf
|
private boolean isCommandInstanceOf(Command command, Class<?>[] commandClasses) {
for (Class<?> commandClass : commandClasses) {
if (commandClass.isInstance(command)) {
return true;
}
}
return false;
}
|
Returns if the specified command is an option command.
@param command the command to test
@return {@code true} if the command is an option command
@see #setOptionCommands(Class...)
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/CommandRunner.java
| 128
|
[
"command",
"commandClasses"
] | true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
declaredInSameAspect
|
private boolean declaredInSameAspect(Advisor advisor1, Advisor advisor2) {
return (hasAspectName(advisor1) && hasAspectName(advisor2) &&
getAspectName(advisor1).equals(getAspectName(advisor2)));
}
|
Create an {@code AspectJPrecedenceComparator}, using the given {@link Comparator}
for comparing {@link org.springframework.aop.Advisor} instances.
@param advisorComparator the {@code Comparator} to use for advisors
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/autoproxy/AspectJPrecedenceComparator.java
| 125
|
[
"advisor1",
"advisor2"
] | true
| 3
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
andThen
|
default FailableIntConsumer<E> andThen(final FailableIntConsumer<E> after) {
Objects.requireNonNull(after);
return (final int t) -> {
accept(t);
after.accept(t);
};
}
|
Returns a composed {@link FailableIntConsumer} like {@link IntConsumer#andThen(IntConsumer)}.
@param after the operation to perform after this one.
@return a composed {@link FailableIntConsumer} like {@link IntConsumer#andThen(IntConsumer)}.
@throws NullPointerException if {@code after} is null
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableIntConsumer.java
| 62
|
[
"after"
] | true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getConfiguredCertificates
|
@Override
public Collection<? extends StoredCertificate> getConfiguredCertificates() {
final Path path = resolvePath();
final KeyStore trustStore = readKeyStore(path);
return KeyStoreUtil.stream(trustStore, ex -> keystoreException(path, ex)).map(entry -> {
final X509Certificate certificate = entry.getX509Certificate();
if (certificate != null) {
final boolean hasKey = entry.isKeyEntry();
return new StoredCertificate(certificate, this.truststorePath, this.type, entry.getAlias(), hasKey);
} else {
return null;
}
}).filter(Objects::nonNull).toList();
}
|
@param path The path to the keystore file
@param password The password for the keystore
@param type The {@link KeyStore#getType() type} of the keystore (typically "PKCS12" or "jks").
See {@link KeyStoreUtil#inferKeyStoreType}.
@param algorithm The algorithm to use for the Trust Manager (see {@link javax.net.ssl.TrustManagerFactory#getAlgorithm()}).
@param requireTrustAnchors If true, the truststore will be checked to ensure that it contains at least one valid trust anchor.
@param configBasePath The base path for the configuration directory
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java
| 63
|
[] | true
| 2
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
replaceAdvisor
|
boolean replaceAdvisor(Advisor a, Advisor b) throws AopConfigException;
|
Replace the given advisor.
<p><b>Note:</b> If the advisor is an {@link org.springframework.aop.IntroductionAdvisor}
and the replacement is not or implements different interfaces, the proxy will need
to be re-obtained or the old interfaces won't be supported and the new interface
won't be implemented.
@param a the advisor to replace
@param b the advisor to replace it with
@return whether it was replaced. If the advisor wasn't found in the
list of advisors, this method returns {@code false} and does nothing.
@throws AopConfigException in case of invalid advice
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/Advised.java
| 180
|
[
"a",
"b"
] | true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
compute
|
public double compute(double... dataset) {
return computeInPlace(dataset.clone());
}
|
Computes the quantile value of the given dataset.
@param dataset the dataset to do the calculation on, which must be non-empty, which will not
be mutated by this call (it is copied instead)
@return the quantile value
|
java
|
android/guava/src/com/google/common/math/Quantiles.java
| 253
|
[] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
clean_unused
|
def clean_unused(cls, session: Session = NEW_SESSION) -> None:
"""
Delete all triggers that have no tasks dependent on them and are not associated to an asset.
Triggers have a one-to-many relationship to task instances, so we need to clean those up first.
Afterward we can drop the triggers not referenced by anyone.
"""
# Update all task instances with trigger IDs that are not DEFERRED to remove them
for attempt in run_with_db_retries():
with attempt:
session.execute(
update(TaskInstance)
.where(
TaskInstance.state != TaskInstanceState.DEFERRED, TaskInstance.trigger_id.is_not(None)
)
.values(trigger_id=None)
)
# Get all triggers that have no task instances, assets, or callbacks depending on them and delete them
ids = (
select(cls.id)
.where(~cls.assets.any(), ~cls.callback.has())
.join(TaskInstance, cls.id == TaskInstance.trigger_id, isouter=True)
.group_by(cls.id)
.having(func.count(TaskInstance.trigger_id) == 0)
)
if get_dialect_name(session) == "mysql":
# MySQL doesn't support DELETE with JOIN, so we need to do it in two steps
ids_list = list(session.scalars(ids).all())
session.execute(
delete(Trigger).where(Trigger.id.in_(ids_list)).execution_options(synchronize_session=False)
)
else:
session.execute(
delete(Trigger).where(Trigger.id.in_(ids)).execution_options(synchronize_session=False)
)
|
Delete all triggers that have no tasks dependent on them and are not associated to an asset.
Triggers have a one-to-many relationship to task instances, so we need to clean those up first.
Afterward we can drop the triggers not referenced by anyone.
|
python
|
airflow-core/src/airflow/models/trigger.py
| 209
|
[
"cls",
"session"
] |
None
| true
| 4
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
toString
|
public static String toString(final Object array, final String stringIfNull) {
if (array == null) {
return stringIfNull;
}
return new ToStringBuilder(array, ToStringStyle.SIMPLE_STYLE).append(array).toString();
}
|
Outputs an array as a String handling {@code null}s.
<p>
Multi-dimensional arrays are handled correctly, including
multi-dimensional primitive arrays.
</p>
<p>
The format is that of Java source code, for example {@code {a,b}}.
</p>
@param array the array to get a toString for, may be {@code null}.
@param stringIfNull the String to return if the array is {@code null}.
@return a String representation of the array.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 9,266
|
[
"array",
"stringIfNull"
] |
String
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
fillna
|
def fillna(self, value, limit: int | None = None, copy: bool = True) -> Self:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
limit : int, default None
(Not implemented yet for IntervalArray)
The maximum number of entries where NA values will be filled.
copy : bool, default True
Whether to make a copy of the data before filling. If False, then
the original should be modified and no new memory should be allocated.
For ExtensionArray subclasses that cannot do this, it is at the
author's discretion whether to ignore "copy=False" or to raise.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if copy is False:
raise NotImplementedError
if limit is not None:
raise ValueError("limit must be None")
value_left, value_right = self._validate_scalar(value)
left = self.left.fillna(value=value_left)
right = self.right.fillna(value=value_right)
return self._shallow_copy(left, right)
|
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
limit : int, default None
(Not implemented yet for IntervalArray)
The maximum number of entries where NA values will be filled.
copy : bool, default True
Whether to make a copy of the data before filling. If False, then
the original should be modified and no new memory should be allocated.
For ExtensionArray subclasses that cannot do this, it is at the
author's discretion whether to ignore "copy=False" or to raise.
Returns
-------
filled : IntervalArray with NA/NaN filled
|
python
|
pandas/core/arrays/interval.py
| 1,020
|
[
"self",
"value",
"limit",
"copy"
] |
Self
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
defaultString
|
public static String defaultString(final String str) {
return Objects.toString(str, EMPTY);
}
|
Returns either the passed in String, or if the String is {@code null}, an empty String ("").
<pre>
StringUtils.defaultString(null) = ""
StringUtils.defaultString("") = ""
StringUtils.defaultString("bat") = "bat"
</pre>
@param str the String to check, may be null.
@return the passed in String, or the empty String if it was {@code null}.
@see Objects#toString(Object, String)
@see String#valueOf(Object)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,570
|
[
"str"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
add_template_global
|
def add_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja global.
The :meth:`template_global` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the global as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
self.jinja_env.globals[name or f.__name__] = f
|
Register a function to use as a custom Jinja global.
The :meth:`template_global` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the global as. If not given, uses the
function's name.
.. versionadded:: 0.10
|
python
|
src/flask/sansio/app.py
| 807
|
[
"self",
"f",
"name"
] |
None
| true
| 2
| 6.72
|
pallets/flask
| 70,946
|
sphinx
| false
|
add
|
public void add(Layer layer, String name) {
String[] segments = name.split("/");
Node node = this.root;
for (int i = 0; i < segments.length; i++) {
boolean isDirectory = i < (segments.length - 1);
node = node.updateOrAddNode(segments[i], isDirectory, layer);
}
}
|
Add an item to the index.
@param layer the layer of the item
@param name the name of the item
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/LayersIndex.java
| 79
|
[
"layer",
"name"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
derive
|
private <U extends @Nullable Object> ClosingFuture<U> derive(FluentFuture<U> future) {
ClosingFuture<U> derived = new ClosingFuture<>(future);
becomeSubsumedInto(derived.closeables);
return derived;
}
|
Attempts to cancel execution of this step. This attempt will fail if the step has already
completed, has already been cancelled, or could not be cancelled for some other reason. If
successful, and this step has not started when {@code cancel} is called, this step should never
run.
<p>If successful, causes the objects captured by this step (if already started) and its input
step(s) for later closing to be closed on their respective {@link Executor}s. If any such calls
specified {@link MoreExecutors#directExecutor()}, those objects will be closed synchronously.
@param mayInterruptIfRunning {@code true} if the thread executing this task should be
interrupted; otherwise, in-progress tasks are allowed to complete, but the step will be
cancelled regardless
@return {@code false} if the step could not be cancelled, typically because it has already
completed normally; {@code true} otherwise
|
java
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
| 1,102
|
[
"future"
] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
opj_uint_min
|
static INLINE OPJ_UINT32 opj_uint_min(OPJ_UINT32 a, OPJ_UINT32 b)
{
return a < b ? a : b;
}
|
Get the minimum of two integers
@return Returns a if a < b else b
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 65
|
[
"a",
"b"
] | true
| 2
| 6.48
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
slice
|
def slice(a, start=None, stop=np._NoValue, step=None, /):
"""
Slice the strings in `a` by slices specified by `start`, `stop`, `step`.
Like in the regular Python `slice` object, if only `start` is
specified then it is interpreted as the `stop`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array
start : None, an integer or an array of integers
The start of the slice, broadcasted to `a`'s shape
stop : None, an integer or an array of integers
The end of the slice, broadcasted to `a`'s shape
step : None, an integer or an array of integers
The step for the slice, broadcasted to `a`'s shape
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input type
Examples
--------
>>> import numpy as np
>>> a = np.array(['hello', 'world'])
>>> np.strings.slice(a, 2)
array(['he', 'wo'], dtype='<U5')
>>> np.strings.slice(a, 2, None)
array(['llo', 'rld'], dtype='<U5')
>>> np.strings.slice(a, 1, 5, 2)
array(['el', 'ol'], dtype='<U5')
One can specify different start/stop/step for different array entries:
>>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5]))
array(['ell', 'rld'], dtype='<U5')
Negative slices have the same meaning as in regular Python:
>>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'],
... dtype=np.dtypes.StringDType())
>>> np.strings.slice(b, -2)
array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType())
>>> np.strings.slice(b, -2, None)
array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType())
>>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3])
array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType())
>>> np.strings.slice(b, None, None, -1)
array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'],
dtype=StringDType())
"""
# Just like in the construction of a regular slice object, if only start
# is specified then start will become stop, see logic in slice_new.
if stop is np._NoValue:
stop = start
start = None
# adjust start, stop, step to be integers, see logic in PySlice_Unpack
if step is None:
step = 1
step = np.asanyarray(step)
if not np.issubdtype(step.dtype, np.integer):
raise TypeError(f"unsupported type {step.dtype} for operand 'step'")
if np.any(step == 0):
raise ValueError("slice step cannot be zero")
if start is None:
start = np.where(step < 0, np.iinfo(np.intp).max, 0)
if stop is None:
stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max)
return _slice(a, start, stop, step)
|
Slice the strings in `a` by slices specified by `start`, `stop`, `step`.
Like in the regular Python `slice` object, if only `start` is
specified then it is interpreted as the `stop`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array
start : None, an integer or an array of integers
The start of the slice, broadcasted to `a`'s shape
stop : None, an integer or an array of integers
The end of the slice, broadcasted to `a`'s shape
step : None, an integer or an array of integers
The step for the slice, broadcasted to `a`'s shape
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input type
Examples
--------
>>> import numpy as np
>>> a = np.array(['hello', 'world'])
>>> np.strings.slice(a, 2)
array(['he', 'wo'], dtype='<U5')
>>> np.strings.slice(a, 2, None)
array(['llo', 'rld'], dtype='<U5')
>>> np.strings.slice(a, 1, 5, 2)
array(['el', 'ol'], dtype='<U5')
One can specify different start/stop/step for different array entries:
>>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5]))
array(['ell', 'rld'], dtype='<U5')
Negative slices have the same meaning as in regular Python:
>>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'],
... dtype=np.dtypes.StringDType())
>>> np.strings.slice(b, -2)
array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType())
>>> np.strings.slice(b, -2, None)
array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType())
>>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3])
array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType())
>>> np.strings.slice(b, None, None, -1)
array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'],
dtype=StringDType())
|
python
|
numpy/_core/strings.py
| 1,730
|
[
"a",
"start",
"stop",
"step"
] | false
| 7
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
touch
|
@SuppressWarnings("GoodTime") // reading system time without TimeSource
public static void touch(File file) throws IOException {
checkNotNull(file);
if (!file.createNewFile() && !file.setLastModified(System.currentTimeMillis())) {
throw new IOException("Unable to update modification time of " + file);
}
}
|
Creates an empty file or updates the last updated timestamp on the same as the unix command of
the same name.
@param file the file to create or update
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/Files.java
| 444
|
[
"file"
] |
void
| true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
equals
|
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof CharSet)) {
return false;
}
final CharSet other = (CharSet) obj;
return set.equals(other.set);
}
|
Compares two {@link CharSet} objects, returning true if they represent
exactly the same set of characters defined in the same way.
<p>The two sets {@code abc} and {@code a-c} are <em>not</em>
equal according to this method.</p>
@param obj the object to compare to
@return true if equal
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/CharSet.java
| 238
|
[
"obj"
] | true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
setOutput
|
void setOutput(@Nullable String output) {
if (output != null && output.endsWith("/")) {
this.output = output.substring(0, output.length() - 1);
this.extract = true;
}
else {
this.output = output;
}
}
|
The location of the generated project.
@return the location of the generated project
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ProjectGenerationRequest.java
| 98
|
[
"output"
] |
void
| true
| 3
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
as_unit
|
def as_unit(self, unit: TimeUnit) -> Self:
"""
Convert to a dtype with the given unit resolution.
This method is for converting the dtype of a ``DatetimeIndex`` or
``TimedeltaIndex`` to a new dtype with the given unit
resolution/precision.
Parameters
----------
unit : {'s', 'ms', 'us', 'ns'}
Returns
-------
same type as self
Converted to the specified unit.
See Also
--------
Timestamp.as_unit : Convert to the given unit.
Timedelta.as_unit : Convert to the given unit.
DatetimeIndex.as_unit : Convert to the given unit.
TimedeltaIndex.as_unit : Convert to the given unit.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.DatetimeIndex(["2020-01-02 01:02:03.004005006"])
>>> idx
DatetimeIndex(['2020-01-02 01:02:03.004005006'],
dtype='datetime64[ns]', freq=None)
>>> idx.as_unit("s")
DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta(["1 day 3 min 2 us 42 ns"])
>>> tdelta_idx
TimedeltaIndex(['1 days 00:03:00.000002042'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.as_unit("s")
TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
"""
arr = self._data.as_unit(unit)
return type(self)._simple_new(arr, name=self.name)
|
Convert to a dtype with the given unit resolution.
This method is for converting the dtype of a ``DatetimeIndex`` or
``TimedeltaIndex`` to a new dtype with the given unit
resolution/precision.
Parameters
----------
unit : {'s', 'ms', 'us', 'ns'}
Returns
-------
same type as self
Converted to the specified unit.
See Also
--------
Timestamp.as_unit : Convert to the given unit.
Timedelta.as_unit : Convert to the given unit.
DatetimeIndex.as_unit : Convert to the given unit.
TimedeltaIndex.as_unit : Convert to the given unit.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.DatetimeIndex(["2020-01-02 01:02:03.004005006"])
>>> idx
DatetimeIndex(['2020-01-02 01:02:03.004005006'],
dtype='datetime64[ns]', freq=None)
>>> idx.as_unit("s")
DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta(["1 day 3 min 2 us 42 ns"])
>>> tdelta_idx
TimedeltaIndex(['1 days 00:03:00.000002042'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.as_unit("s")
TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
|
python
|
pandas/core/indexes/datetimelike.py
| 559
|
[
"self",
"unit"
] |
Self
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
validateAndGetRecordBatch
|
private RecordBatch validateAndGetRecordBatch() {
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " +
"with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
return recordBatch;
}
|
Finalize the state of a batch. Final state, once set, is immutable. This function may be called
once or twice on a batch. It may be called twice if
1. An inflight batch expires before a response from the broker is received. The batch's final
state is set to FAILED. But it could succeed on the broker and second time around batch.done() may
try to set SUCCEEDED final state.
2. If a transaction abortion happens or if the producer is closed forcefully, the final state is
ABORTED but again it could succeed if broker responds with a success.
Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged.
Attempted transitions from one failure state to the same or a different failed state are ignored.
Attempted transitions from SUCCEEDED to the same or a failed state throw an exception.
@param baseOffset The base offset of the messages assigned by the server
@param logAppendTime The log append time or -1 if CreateTime is being used
@param topLevelException The exception that occurred (or null if the request was successful)
@param recordExceptions Record exception function mapping batchIndex to the respective record exception
@return true if the batch was completed successfully and false if the batch was previously aborted
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 330
|
[] |
RecordBatch
| true
| 5
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
toSupplier
|
private static Supplier<String> toSupplier(final String message, final Object... values) {
return () -> getMessage(message, values);
}
|
Validate that the specified argument is not {@code null};
otherwise throwing an exception with the specified message.
<pre>Validate.notNull(myObject, "The object must not be null");</pre>
@param <T> the object type.
@param object the object to check.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message.
@return the validated object (never {@code null} for method chaining).
@throws NullPointerException if the object is {@code null}.
@see Objects#requireNonNull(Object)
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 1,064
|
[
"message"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readFrom
|
long readFrom(ScatteringByteChannel channel) throws IOException;
|
Read bytes into this receive from the given channel
@param channel The channel to read from
@return The number of bytes read
@throws IOException If the reading fails
|
java
|
clients/src/main/java/org/apache/kafka/common/network/Receive.java
| 44
|
[
"channel"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
join
|
public StringBuilder join(final StringBuilder stringBuilder, @SuppressWarnings("unchecked") final T... elements) {
return joinSB(stringBuilder, prefix, suffix, delimiter, appender, elements);
}
|
Joins stringified objects from the given array into a StringBuilder.
@param stringBuilder The target.
@param elements The source.
@return the given target StringBuilder.
|
java
|
src/main/java/org/apache/commons/lang3/AppendableJoiner.java
| 283
|
[
"stringBuilder"
] |
StringBuilder
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
unpark
|
void unpark() {
// This is racy with removeWaiter. The consequence of the race is that we may spuriously call
// unpark even though the thread has already removed itself from the list. But even if we did
// use a CAS, that race would still exist (it would just be ever so slightly smaller).
Thread w = thread;
if (w != null) {
thread = null;
LockSupport.unpark(w);
}
}
|
Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this class is loaded
before the ATOMIC_HELPER. Apparently this is possible on some android platforms.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFutureState.java
| 324
|
[] |
void
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
_set_name
|
def _set_name(self, name, inplace: bool = False) -> Series:
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy(deep=False)
ser.name = name
return ser
|
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
|
python
|
pandas/core/series.py
| 1,902
|
[
"self",
"name",
"inplace"
] |
Series
| true
| 2
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_despine
|
def _despine(ax):
"""Remove the top and right spines of the plot.
Parameters
----------
ax : matplotlib.axes.Axes
The axes of the plot to despine.
"""
for s in ["top", "right"]:
ax.spines[s].set_visible(False)
for s in ["bottom", "left"]:
ax.spines[s].set_bounds(0, 1)
|
Remove the top and right spines of the plot.
Parameters
----------
ax : matplotlib.axes.Axes
The axes of the plot to despine.
|
python
|
sklearn/utils/_plotting.py
| 360
|
[
"ax"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
withHook
|
public static <T> T withHook(SpringApplicationHook hook, ThrowingSupplier<T> action) {
applicationHook.set(hook);
try {
return action.get();
}
finally {
applicationHook.remove();
}
}
|
Perform the given action with the given {@link SpringApplicationHook} attached if
the action triggers an {@link SpringApplication#run(String...) application run}.
@param <T> the result type
@param hook the hook to apply
@param action the action to run
@return the result of the action
@since 3.0.0
@see #withHook(SpringApplicationHook, Runnable)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 1,462
|
[
"hook",
"action"
] |
T
| true
| 1
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
format
|
@Deprecated
StringBuffer format(long millis, StringBuffer buf);
|
Formats a millisecond {@code long} value into the
supplied {@link StringBuffer}.
@param millis the millisecond value to format.
@param buf the buffer to format into.
@return the specified string buffer.
@deprecated Use {{@link #format(long, Appendable)}.
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 139
|
[
"millis",
"buf"
] |
StringBuffer
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_extract_multi_indexer_columns
|
def _extract_multi_indexer_columns(
self,
header,
index_names: Sequence[Hashable] | None,
passed_names: bool = False,
) -> tuple[
Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool
]:
"""
Extract and return the names, index_names, col_names if the column
names are a MultiIndex.
Parameters
----------
header: list of lists
The header rows
index_names: list, optional
The names of the future index
passed_names: bool, default False
A flag specifying if names where passed
"""
if len(header) < 2:
return header[0], index_names, None, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, _, _ = self._clean_index_names(index_names, self.index_col)
# extract the columns
field_count = len(header[0])
# check if header lengths are equal
if not all(len(header_iter) == field_count for header_iter in header[1:]):
raise ParserError("Header rows must have an equal number of columns.")
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header), strict=True))
names = columns.copy()
for single_ic in sorted(ic):
names.insert(single_ic, single_ic)
# Clean the column names (if we have an index_col).
if ic:
col_names = [
r[ic[0]]
if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)
else None
for r in header
]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
|
Extract and return the names, index_names, col_names if the column
names are a MultiIndex.
Parameters
----------
header: list of lists
The header rows
index_names: list, optional
The names of the future index
passed_names: bool, default False
A flag specifying if names where passed
|
python
|
pandas/io/parsers/base_parser.py
| 190
|
[
"self",
"header",
"index_names",
"passed_names"
] |
tuple[
Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool
]
| true
| 10
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
finish
|
protected void finish() throws IOException {
if (sawReturn || line.length() > 0) {
finishLine(false);
}
}
|
Subclasses must call this method after finishing character processing, in order to ensure that
any unterminated line in the buffer is passed to {@link #handleLine}.
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/LineBuffer.java
| 104
|
[] |
void
| true
| 3
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
find_valid_index
|
def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:
"""
Retrieves the positional index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
is_valid: np.ndarray
Mask to find na_values.
Returns
-------
int or None
"""
assert how in ["first", "last"]
if len(is_valid) == 0: # early stop
return None
if is_valid.ndim == 2:
# reduce axis 1
is_valid = is_valid.any(axis=1) # type: ignore[assignment]
if how == "first":
idxpos = is_valid[::].argmax()
elif how == "last":
idxpos = len(is_valid) - 1 - is_valid[::-1].argmax()
chk_notna = is_valid[idxpos]
if not chk_notna:
return None
# Incompatible return value type (got "signedinteger[Any]",
# expected "Optional[int]")
return idxpos # type: ignore[return-value]
|
Retrieves the positional index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
is_valid: np.ndarray
Mask to find na_values.
Returns
-------
int or None
|
python
|
pandas/core/missing.py
| 239
|
[
"how",
"is_valid"
] |
int | None
| true
| 6
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
resetPositionsIfNeeded
|
public void resetPositionsIfNeeded() {
Map<TopicPartition, AutoOffsetResetStrategy> partitionAutoOffsetResetStrategyMap =
offsetFetcherUtils.getOffsetResetStrategyForPartitions();
if (partitionAutoOffsetResetStrategyMap.isEmpty())
return;
resetPositionsAsync(partitionAutoOffsetResetStrategyMap);
}
|
Reset offsets for all assigned partitions that require it.
@throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset reset strategy is defined
and one or more partitions aren't awaiting a seekToBeginning() or seekToEnd().
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
| 103
|
[] |
void
| true
| 2
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
isSupportedType
|
public static boolean isSupportedType(EventListener listener) {
for (Class<?> type : SUPPORTED_TYPES) {
if (ClassUtils.isAssignableValue(type, listener)) {
return true;
}
}
return false;
}
|
Returns {@code true} if the specified listener is one of the supported types.
@param listener the listener to test
@return if the listener is of a supported type
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/ServletListenerRegistrationBean.java
| 132
|
[
"listener"
] | true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "ListTransactionsOptions(" +
"filteredStates=" + filteredStates +
", filteredProducerIds=" + filteredProducerIds +
", filteredDuration=" + filteredDuration +
", filteredTransactionalIdPattern=" + filteredTransactionalIdPattern +
", timeoutMs=" + timeoutMs +
')';
}
|
Returns transactional ID being filtered.
@return the current transactional ID pattern filter (empty means no transactional IDs are filtered and all
transactions will be returned)
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java
| 126
|
[] |
String
| true
| 1
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
rebucket
|
private void rebucket() {
rebucketCount++;
LongKeyedBucketOrds oldOrds = bucketOrds;
boolean success = false;
try {
long[] mergeMap = new long[Math.toIntExact(oldOrds.size())];
bucketOrds = new LongKeyedBucketOrds.FromMany(bigArrays());
success = true;
long maxOwning = oldOrds.maxOwningBucketOrd();
for (long owningBucketOrd = 0; owningBucketOrd <= maxOwning; owningBucketOrd++) {
/*
* Check for cancelation during this tight loop as it can take a while and the standard
* cancelation checks don't run during the loop. Becuase it's a tight loop.
*/
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled");
}
LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = oldOrds.ordsEnum(owningBucketOrd);
Rounding.Prepared preparedRounding = preparedRoundings[roundingIndexFor(owningBucketOrd)];
while (ordsEnum.next()) {
long oldKey = ordsEnum.value();
long newKey = preparedRounding.round(oldKey);
long newBucketOrd = bucketOrds.add(owningBucketOrd, newKey);
mergeMap[(int) ordsEnum.ord()] = newBucketOrd >= 0 ? newBucketOrd : -1 - newBucketOrd;
}
liveBucketCountUnderestimate = bigArrays().grow(liveBucketCountUnderestimate, owningBucketOrd + 1);
liveBucketCountUnderestimate.set(owningBucketOrd, Math.toIntExact(bucketOrds.bucketsInOrd(owningBucketOrd)));
}
merge(mergeMap, bucketOrds.size());
} finally {
if (success) {
oldOrds.close();
}
}
}
|
Increase the rounding of {@code owningBucketOrd} using
estimated, bucket counts, {@link FromMany#rebucket() rebucketing} the all
buckets if the estimated number of wasted buckets is too high.
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java
| 568
|
[] |
void
| true
| 6
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getNonSingletonFactoryBeanForTypeCheck
|
private @Nullable FactoryBean<?> getNonSingletonFactoryBeanForTypeCheck(String beanName, RootBeanDefinition mbd) {
if (isPrototypeCurrentlyInCreation(beanName)) {
return null;
}
Object instance;
try {
// Mark this bean as currently in creation, even if just partially.
beforePrototypeCreation(beanName);
// Give BeanPostProcessors a chance to return a proxy instead of the target bean instance.
instance = resolveBeforeInstantiation(beanName, mbd);
if (instance == null) {
BeanWrapper bw = createBeanInstance(beanName, mbd, null);
instance = bw.getWrappedInstance();
}
}
catch (UnsatisfiedDependencyException ex) {
// Don't swallow, probably misconfiguration...
throw ex;
}
catch (BeanCreationException ex) {
// Instantiation failure, maybe too early...
if (logger.isDebugEnabled()) {
logger.debug("Bean creation exception on non-singleton FactoryBean type check: " + ex);
}
onSuppressedException(ex);
return null;
}
finally {
// Finished partial creation of this bean.
afterPrototypeCreation(beanName);
}
return getFactoryBean(beanName, instance);
}
|
Obtain a "shortcut" non-singleton FactoryBean instance to use for a
{@code getObjectType()} call, without full initialization of the FactoryBean.
@param beanName the name of the bean
@param mbd the bean definition for the bean
@return the FactoryBean instance, or {@code null} to indicate
that we couldn't obtain a shortcut FactoryBean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,068
|
[
"beanName",
"mbd"
] | true
| 6
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
visitImportEqualsDeclaration
|
function visitImportEqualsDeclaration(node: ImportEqualsDeclaration): VisitResult<Statement | undefined> {
Debug.assert(isExternalModuleImportEqualsDeclaration(node), "import= for internal module references should be handled in an earlier transformer.");
let statements: Statement[] | undefined;
statements = append(
statements,
setOriginalNode(
setTextRange(
factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList(
[
factory.createVariableDeclaration(
factory.cloneNode(node.name),
/*exclamationToken*/ undefined,
/*type*/ undefined,
createRequireCall(node),
),
],
/*flags*/ languageVersion >= ScriptTarget.ES2015 ? NodeFlags.Const : NodeFlags.None,
),
),
node,
),
node,
),
);
statements = appendExportsOfImportEqualsDeclaration(statements, node);
return singleOrMany(statements);
}
|
Visits an ImportEqualsDeclaration node.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/esnextAnd2015.ts
| 261
|
[
"node"
] | true
| 2
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
partitions
|
public Set<TopicPartition> partitions() {
return Collections.unmodifiableSet(records.keySet());
}
|
Get the partitions which have records contained in this record set.
@return the set of partitions with data in this record set (may be empty if no data was returned)
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
| 92
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
partial_fit
|
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
first_call = not hasattr(self, "classes_")
X, y = self._check_X_y(X, y, reset=first_call)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_classes = len(classes)
self._init_counters(n_classes, n_features)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
|
Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
|
python
|
sklearn/naive_bayes.py
| 664
|
[
"self",
"X",
"y",
"classes",
"sample_weight"
] | false
| 7
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
insertMACSeparators
|
private static String insertMACSeparators(String v) {
// Check that the length is correct for a MAC address without separators.
// And check that there isn't already a separator in the string.
if ((v.length() != EUI48_HEX_LENGTH && v.length() != EUI64_HEX_LENGTH)
|| v.charAt(2) == ':'
|| v.charAt(2) == '-'
|| v.charAt(4) == '.') {
return v;
}
StringBuilder sb = new StringBuilder(EUI64_HEX_WITH_SEPARATOR_MAX_LENGTH);
for (int i = 0; i < v.length(); i++) {
sb.append(v.charAt(i));
if (i < v.length() - 1 && i % 2 != 0) {
sb.append(':');
}
}
return sb.toString();
}
|
A utility method for determining whether a string contains only digits, possibly with a leading '+' or '-'.
That is, does this string have any hope of being parse-able as a Long?
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CefParser.java
| 580
|
[
"v"
] |
String
| true
| 9
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
translateDisplayDataOutput
|
function translateDisplayDataOutput(
output: nbformat.IDisplayData | nbformat.IDisplayUpdate | nbformat.IExecuteResult
): NotebookCellOutput {
// Metadata could be as follows:
// We'll have metadata specific to each mime type as well as generic metadata.
/*
IDisplayData = {
output_type: 'display_data',
data: {
'image/jpg': '/////'
'image/png': '/////'
'text/plain': '/////'
},
metadata: {
'image/png': '/////',
'background': true,
'xyz': '///
}
}
*/
const metadata = getOutputMetadata(output);
const items: NotebookCellOutputItem[] = [];
if (output.data) {
for (const key in output.data) {
items.push(convertJupyterOutputToBuffer(key, output.data[key]));
}
}
return new NotebookCellOutput(sortOutputItemsBasedOnDisplayOrder(items), metadata);
}
|
Concatenates a multiline string or an array of strings into a single string.
Also normalizes line endings to use LF (`\n`) instead of CRLF (`\r\n`).
Same is done in serializer as well.
|
typescript
|
extensions/ipynb/src/deserializers.ts
| 206
|
[
"output"
] | true
| 2
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
readFully
|
public static int readFully(InputStream reader, byte[] dest, int offset, int len) throws IOException {
int read = 0;
while (read < len) {
final int r = reader.read(dest, offset + read, len - read);
if (r == -1) {
break;
}
read += r;
}
return read;
}
|
Read up to {code count} bytes from {@code input} and store them into {@code buffer}.
The buffers position will be incremented by the number of bytes read from the stream.
@param input stream to read from
@param buffer buffer to read into
@param count maximum number of bytes to read
@return number of bytes read from the stream
@throws IOException in case of I/O errors
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Streams.java
| 127
|
[
"reader",
"dest",
"offset",
"len"
] | true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
lagcompanion
|
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Examples
--------
>>> from numpy.polynomial.laguerre import lagcompanion
>>> lagcompanion([1, 2, 3])
array([[ 1. , -0.33333333],
[-1. , 4.33333333]])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[1 + c[0] / c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n + 1]
mid = mat.reshape(-1)[0::n + 1]
bot = mat.reshape(-1)[n::n + 1]
top[...] = -np.arange(1, n)
mid[...] = 2. * np.arange(n) + 1.
bot[...] = top
mat[:, -1] += (c[:-1] / c[-1]) * n
return mat
|
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Examples
--------
>>> from numpy.polynomial.laguerre import lagcompanion
>>> lagcompanion([1, 2, 3])
array([[ 1. , -0.33333333],
[-1. , 4.33333333]])
|
python
|
numpy/polynomial/laguerre.py
| 1,421
|
[
"c"
] | false
| 3
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
mean_variance_axis
|
def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.mean_variance_axis(csr, axis=0)
(array([2. , 0.25, 1.75]), array([12. , 0.1875, 4.1875]))
"""
_raise_error_wrong_axis(axis)
if sp.issparse(X) and X.format == "csr":
if axis == 0:
return _csr_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights
)
else:
return _csc_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights
)
elif sp.issparse(X) and X.format == "csc":
if axis == 0:
return _csc_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights
)
else:
return _csr_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights
)
else:
_raise_typeerror(X)
|
Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.mean_variance_axis(csr, axis=0)
(array([2. , 0.25, 1.75]), array([12. , 0.1875, 4.1875]))
|
python
|
sklearn/utils/sparsefuncs.py
| 101
|
[
"X",
"axis",
"weights",
"return_sum_weights"
] | false
| 10
| 7.6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
assignOwnedPartitions
|
private void assignOwnedPartitions() {
for (Map.Entry<String, List<TopicPartition>> consumerEntry : currentAssignment.entrySet()) {
String consumer = consumerEntry.getKey();
List<TopicPartition> ownedPartitions = consumerEntry.getValue().stream()
.filter(tp -> {
boolean mismatch = rackInfo.racksMismatch(consumer, tp);
if (mismatch) {
maybeRevokedPartitions.put(tp, consumer);
}
return !mismatch;
})
.sorted(Comparator.comparing(TopicPartition::partition).thenComparing(TopicPartition::topic))
.collect(Collectors.toList());
List<TopicPartition> consumerAssignment = assignment.get(consumer);
for (TopicPartition doublyClaimedPartition : partitionsWithMultiplePreviousOwners) {
if (ownedPartitions.contains(doublyClaimedPartition)) {
log.error("Found partition {} still claimed as owned by consumer {}, despite being claimed by multiple "
+ "consumers already in the same generation. Removing it from the ownedPartitions",
doublyClaimedPartition, consumer);
ownedPartitions.remove(doublyClaimedPartition);
}
}
if (ownedPartitions.size() < minQuota) {
// the expected assignment size is more than this consumer has now, so keep all the owned partitions
// and put this member into the unfilled member list
if (ownedPartitions.size() > 0) {
consumerAssignment.addAll(ownedPartitions);
assignedPartitions.addAll(ownedPartitions);
}
unfilledMembersWithUnderMinQuotaPartitions.add(consumer);
} else if (ownedPartitions.size() >= maxQuota && currentNumMembersWithOverMinQuotaPartitions < expectedNumMembersWithOverMinQuotaPartitions) {
// consumer owned the "maxQuota" of partitions or more, and we're still under the number of expected members
// with more than the minQuota partitions, so keep "maxQuota" of the owned partitions, and revoke the rest of the partitions
currentNumMembersWithOverMinQuotaPartitions++;
if (currentNumMembersWithOverMinQuotaPartitions == expectedNumMembersWithOverMinQuotaPartitions) {
unfilledMembersWithExactlyMinQuotaPartitions.clear();
}
List<TopicPartition> maxQuotaPartitions = ownedPartitions.subList(0, maxQuota);
consumerAssignment.addAll(maxQuotaPartitions);
assignedPartitions.addAll(maxQuotaPartitions);
for (TopicPartition topicPartition : ownedPartitions.subList(maxQuota, ownedPartitions.size())) {
maybeRevokedPartitions.put(topicPartition, consumer);
}
} else {
// consumer owned at least "minQuota" of partitions
// so keep "minQuota" of the owned partitions, and revoke the rest of the partitions
List<TopicPartition> minQuotaPartitions = ownedPartitions.subList(0, minQuota);
consumerAssignment.addAll(minQuotaPartitions);
assignedPartitions.addAll(minQuotaPartitions);
for (TopicPartition topicPartition : ownedPartitions.subList(minQuota, ownedPartitions.size())) {
maybeRevokedPartitions.put(topicPartition, consumer);
}
// this consumer is potential maxQuota candidate since we're still under the number of expected members
// with more than the minQuota partitions. Note, if the number of expected members with more than
// the minQuota partitions is 0, it means minQuota == maxQuota, and there are no potentially unfilled
if (currentNumMembersWithOverMinQuotaPartitions < expectedNumMembersWithOverMinQuotaPartitions) {
unfilledMembersWithExactlyMinQuotaPartitions.add(consumer);
}
}
}
}
|
Constructs a constrained assignment builder.
@param partitionsPerTopic The partitions for each subscribed topic
@param rackInfo Rack information for consumers and racks
@param consumerToOwnedPartitions Each consumer's previously owned and still-subscribed partitions
@param partitionsWithMultiplePreviousOwners The partitions being claimed in the previous assignment of multiple consumers
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
| 666
|
[] |
void
| true
| 9
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
_translate_header
|
def _translate_header(self, sparsify_cols: bool, max_cols: int):
"""
Build each <tr> within table <head> as a list
Using the structure:
+----------------------------+---------------+---------------------------+
| index_blanks ... | column_name_0 | column_headers (level_0) |
1) | .. | .. | .. |
| index_blanks ... | column_name_n | column_headers (level_n) |
+----------------------------+---------------+---------------------------+
2) | index_names (level_0 to level_n) ... | column_blanks ... |
+----------------------------+---------------+---------------------------+
Parameters
----------
sparsify_cols : bool
Whether column_headers section will add colspan attributes (>1) to elements.
max_cols : int
Maximum number of columns to render. If exceeded will contain `...` filler.
Returns
-------
head : list
The associated HTML elements needed for template rendering.
"""
# for sparsifying a MultiIndex
col_lengths = _get_level_lengths(
self.columns, sparsify_cols, max_cols, self.hidden_columns
)
clabels = self.data.columns.tolist()
if self.data.columns.nlevels == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels, strict=True))
head = []
# 1) column headers
for r, hide in enumerate(self.hide_columns_):
if hide or not clabels:
continue
header_row = self._generate_col_header_row(
(r, clabels), max_cols, col_lengths
)
head.append(header_row)
# 2) index names
if (
self.data.index.names
and com.any_not_none(*self.data.index.names)
and not all(self.hide_index_)
and not self.hide_index_names
):
index_names_row = self._generate_index_names_row(
clabels, max_cols, col_lengths
)
head.append(index_names_row)
return head
|
Build each <tr> within table <head> as a list
Using the structure:
+----------------------------+---------------+---------------------------+
| index_blanks ... | column_name_0 | column_headers (level_0) |
1) | .. | .. | .. |
| index_blanks ... | column_name_n | column_headers (level_n) |
+----------------------------+---------------+---------------------------+
2) | index_names (level_0 to level_n) ... | column_blanks ... |
+----------------------------+---------------+---------------------------+
Parameters
----------
sparsify_cols : bool
Whether column_headers section will add colspan attributes (>1) to elements.
max_cols : int
Maximum number of columns to render. If exceeded will contain `...` filler.
Returns
-------
head : list
The associated HTML elements needed for template rendering.
|
python
|
pandas/io/formats/style_render.py
| 399
|
[
"self",
"sparsify_cols",
"max_cols"
] | true
| 9
| 6.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
_bisect_failing_config_helper
|
def _bisect_failing_config_helper(
self, results: ResultType, failing_config: list[tuple[str, Any]]
) -> Optional[ConfigType]:
"""
Bisect a failing configuration to find minimal set of configs that cause failure.
Splits it into halves, then fourths, then tries dropping configs one-by-one.
"""
print(f"bisecting config: {failing_config}")
if not failing_config:
return None
def test(x: list[tuple[str, Any]]) -> Status:
d = dict(x)
result = self.test_config(results, d)
return result
if len(failing_config) <= 1:
return dict(failing_config) if test(failing_config).failing() else None
random.shuffle(failing_config)
mid = len(failing_config) // 2
first_half = failing_config[:mid]
second_half = failing_config[mid:]
if test(first_half).failing():
return self._bisect_failing_config_helper(results, first_half)
if test(second_half).failing():
return self._bisect_failing_config_helper(results, second_half)
if len(failing_config) >= 8:
low = len(failing_config) // 4
high = mid + low
quart1 = failing_config[low:]
if test(quart1).failing():
return self._bisect_failing_config_helper(results, quart1)
quart2 = failing_config[:low] + second_half
if test(quart2).failing():
return self._bisect_failing_config_helper(results, quart2)
quart3 = first_half + failing_config[:high]
if test(quart3).failing():
return self._bisect_failing_config_helper(results, quart3)
quart4 = failing_config[high:]
if test(quart4).failing():
return self._bisect_failing_config_helper(results, quart4)
# try dropping one value at a time
for i in range(len(failing_config)):
new_list = [x for j, x in enumerate(failing_config) if j != i]
if test(new_list).failing():
return self._bisect_failing_config_helper(results, new_list)
# we have the minimal set
return dict(failing_config)
|
Bisect a failing configuration to find minimal set of configs that cause failure.
Splits it into halves, then fourths, then tries dropping configs one-by-one.
|
python
|
torch/_inductor/fuzzer.py
| 849
|
[
"self",
"results",
"failing_config"
] |
Optional[ConfigType]
| true
| 13
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
valueIterator
|
@Override
UnmodifiableIterator<V> valueIterator() {
return new UnmodifiableIterator<V>() {
final Iterator<? extends ImmutableCollection<V>> valueCollectionItr = map.values().iterator();
Iterator<V> valueItr = emptyIterator();
@Override
public boolean hasNext() {
return valueItr.hasNext() || valueCollectionItr.hasNext();
}
@Override
public V next() {
if (!valueItr.hasNext()) {
valueItr = valueCollectionItr.next().iterator();
}
return valueItr.next();
}
};
}
|
Returns an immutable collection of the values in this multimap. Its iterator traverses the
values for the first key, the values for the second key, and so on.
|
java
|
android/guava/src/com/google/common/collect/ImmutableMultimap.java
| 770
|
[] | true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
of
|
static SslBundleKey of(@Nullable String password, @Nullable String alias) {
return new SslBundleKey() {
@Override
public @Nullable String getPassword() {
return password;
}
@Override
public @Nullable String getAlias() {
return alias;
}
@Override
public String toString() {
ToStringCreator creator = new ToStringCreator(this);
creator.append("alias", alias);
creator.append("password", (password != null) ? "******" : null);
return creator.toString();
}
};
}
|
Factory method to create a new {@link SslBundleKey} instance.
@param password the password used to access the key
@param alias the alias of the key
@return a new {@link SslBundleKey} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslBundleKey.java
| 87
|
[
"password",
"alias"
] |
SslBundleKey
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
chain_as_binary_tree
|
def chain_as_binary_tree(*tasks):
# type: (BaseOperator) -> None
"""
Chain tasks as a binary tree where task i is child of task (i - 1) // 2.
Example:
t0 -> t1 -> t3 -> t7
| \
| -> t4 -> t8
|
-> t2 -> t5 -> t9
\
-> t6
"""
for i in range(1, len(tasks)):
tasks[i].set_upstream(tasks[(i - 1) // 2])
|
Chain tasks as a binary tree where task i is child of task (i - 1) // 2.
Example:
t0 -> t1 -> t3 -> t7
| \
| -> t4 -> t8
|
-> t2 -> t5 -> t9
\
-> t6
|
python
|
performance/src/performance_dags/performance_dag/performance_dag.py
| 130
|
[] | false
| 2
| 7.12
|
apache/airflow
| 43,597
|
unknown
| false
|
|
stream
|
public Stream<O> stream() {
return stream;
}
|
Converts the FailableStream into an equivalent stream.
@return A stream, which will return the same elements, which this FailableStream would return.
|
java
|
src/main/java/org/apache/commons/lang3/Streams.java
| 450
|
[] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get
|
@Override
public ImmutableList<V> get(K key) {
// This cast is safe as its type is known in constructor.
ImmutableList<V> list = (ImmutableList<V>) map.get(key);
return (list == null) ? ImmutableList.of() : list;
}
|
Returns an immutable list of the values for the given key. If no mappings in the multimap have
the provided key, an empty immutable list is returned. The values are in the same order as the
parameters used to build this multimap.
|
java
|
android/guava/src/com/google/common/collect/ImmutableListMultimap.java
| 456
|
[
"key"
] | true
| 2
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
_extend_region
|
def _extend_region(steep_point, xward_point, start, min_samples):
"""Extend the area until it's maximal.
It's the same function for both upward and downward reagions, depending on
the given input parameters. Assuming:
- steep_{upward/downward}: bool array indicating whether a point is a
steep {upward/downward};
- upward/downward: bool array indicating whether a point is
upward/downward;
To extend an upward reagion, ``steep_point=steep_upward`` and
``xward_point=downward`` are expected, and to extend a downward region,
``steep_point=steep_downward`` and ``xward_point=upward``.
Parameters
----------
steep_point : ndarray of shape (n_samples,), dtype=bool
True if the point is steep downward (upward).
xward_point : ndarray of shape (n_samples,), dtype=bool
True if the point is an upward (respectively downward) point.
start : int
The start of the xward region.
min_samples : int
The same as the min_samples given to OPTICS. Up and down steep
regions can't have more then ``min_samples`` consecutive non-steep
points.
Returns
-------
index : int
The current index iterating over all the samples, i.e. where we are up
to in our search.
end : int
The end of the region, which can be behind the index. The region
includes the ``end`` index.
"""
n_samples = len(steep_point)
non_xward_points = 0
index = start
end = start
# find a maximal area
while index < n_samples:
if steep_point[index]:
non_xward_points = 0
end = index
elif not xward_point[index]:
# it's not a steep point, but still goes up.
non_xward_points += 1
# region should include no more than min_samples consecutive
# non steep xward points.
if non_xward_points > min_samples:
break
else:
return end
index += 1
return end
|
Extend the area until it's maximal.
It's the same function for both upward and downward reagions, depending on
the given input parameters. Assuming:
- steep_{upward/downward}: bool array indicating whether a point is a
steep {upward/downward};
- upward/downward: bool array indicating whether a point is
upward/downward;
To extend an upward reagion, ``steep_point=steep_upward`` and
``xward_point=downward`` are expected, and to extend a downward region,
``steep_point=steep_downward`` and ``xward_point=upward``.
Parameters
----------
steep_point : ndarray of shape (n_samples,), dtype=bool
True if the point is steep downward (upward).
xward_point : ndarray of shape (n_samples,), dtype=bool
True if the point is an upward (respectively downward) point.
start : int
The start of the xward region.
min_samples : int
The same as the min_samples given to OPTICS. Up and down steep
regions can't have more then ``min_samples`` consecutive non-steep
points.
Returns
-------
index : int
The current index iterating over all the samples, i.e. where we are up
to in our search.
end : int
The end of the region, which can be behind the index. The region
includes the ``end`` index.
|
python
|
sklearn/cluster/_optics.py
| 922
|
[
"steep_point",
"xward_point",
"start",
"min_samples"
] | false
| 6
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_cached_transform
|
def _cached_transform(
sub_pipeline, *, cache, param_name, param_value, transform_params
):
"""Transform a parameter value using a sub-pipeline and cache the result.
Parameters
----------
sub_pipeline : Pipeline
The sub-pipeline to be used for transformation.
cache : dict
The cache dictionary to store the transformed values.
param_name : str
The name of the parameter to be transformed.
param_value : object
The value of the parameter to be transformed.
transform_params : dict
The metadata to be used for transformation. This passed to the
`transform` method of the sub-pipeline.
Returns
-------
transformed_value : object
The transformed value of the parameter.
"""
if param_name not in cache:
# If the parameter is a tuple, transform each element of the
# tuple. This is needed to support the pattern present in
# `lightgbm` and `xgboost` where users can pass multiple
# validation sets.
if isinstance(param_value, tuple):
cache[param_name] = tuple(
sub_pipeline.transform(element, **transform_params)
for element in param_value
)
else:
cache[param_name] = sub_pipeline.transform(param_value, **transform_params)
return cache[param_name]
|
Transform a parameter value using a sub-pipeline and cache the result.
Parameters
----------
sub_pipeline : Pipeline
The sub-pipeline to be used for transformation.
cache : dict
The cache dictionary to store the transformed values.
param_name : str
The name of the parameter to be transformed.
param_value : object
The value of the parameter to be transformed.
transform_params : dict
The metadata to be used for transformation. This passed to the
`transform` method of the sub-pipeline.
Returns
-------
transformed_value : object
The transformed value of the parameter.
|
python
|
sklearn/pipeline.py
| 51
|
[
"sub_pipeline",
"cache",
"param_name",
"param_value",
"transform_params"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
fill
|
public static float[] fill(final float[] a, final float val) {
if (a != null) {
Arrays.fill(a, val);
}
return a;
}
|
Fills and returns the given array, assigning the given {@code float} value to each element of the array.
@param a the array to be filled (may be null).
@param val the value to be stored in all elements of the array.
@return the given array.
@see Arrays#fill(float[],float)
|
java
|
src/main/java/org/apache/commons/lang3/ArrayFill.java
| 101
|
[
"a",
"val"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_default
|
def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:
"""
Default implementation for all ops. Override in a subclass to
provide generic op behavior.
Args:
name: name of the op, see OpHandler.{name}
args: positional args passed to the op
kwargs: keyword args passed to the op
Returns:
return value of the op
"""
raise NotImplementedError
|
Default implementation for all ops. Override in a subclass to
provide generic op behavior.
Args:
name: name of the op, see OpHandler.{name}
args: positional args passed to the op
kwargs: keyword args passed to the op
Returns:
return value of the op
|
python
|
torch/_inductor/ops_handler.py
| 751
|
[
"self",
"name",
"args",
"kwargs"
] |
Any
| true
| 1
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
containsNamedArgument
|
public boolean containsNamedArgument() {
for (ValueHolder valueHolder : this.indexedArgumentValues.values()) {
if (valueHolder.getName() != null) {
return true;
}
}
for (ValueHolder valueHolder : this.genericArgumentValues) {
if (valueHolder.getName() != null) {
return true;
}
}
return false;
}
|
Determine whether at least one argument value refers to a name.
@since 6.0.3
@see ValueHolder#getName()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/ConstructorArgumentValues.java
| 361
|
[] | true
| 3
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
format
|
@Override
public <B extends Appendable> B format(final Calendar calendar, final B buf) {
// Don't edit the given Calendar, clone it only if needed.
Calendar actual = calendar;
if (!calendar.getTimeZone().equals(timeZone)) {
actual = (Calendar) calendar.clone();
actual.setTimeZone(timeZone);
}
return applyRules(actual, buf);
}
|
Compares two objects for equality.
@param obj the object to compare to.
@return {@code true} if equal.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,129
|
[
"calendar",
"buf"
] |
B
| true
| 2
| 8.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
wrap
|
public static String wrap(final String str, final char wrapWith) {
if (isEmpty(str) || wrapWith == CharUtils.NUL) {
return str;
}
return wrapWith + str + wrapWith;
}
|
Wraps a string with a char.
<pre>
StringUtils.wrap(null, *) = null
StringUtils.wrap("", *) = ""
StringUtils.wrap("ab", '\0') = "ab"
StringUtils.wrap("ab", 'x') = "xabx"
StringUtils.wrap("ab", '\'') = "'ab'"
StringUtils.wrap("\"ab\"", '\"') = "\"\"ab\"\""
</pre>
@param str the string to be wrapped, may be {@code null}.
@param wrapWith the char that will wrap {@code str}.
@return the wrapped string, or {@code null} if {@code str == null}.
@since 3.4
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 9,070
|
[
"str",
"wrapWith"
] |
String
| true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createDecompressInterceptor
|
function createDecompressInterceptor (options = {}) {
// Emit experimental warning only once
if (!warningEmitted) {
process.emitWarning(
'DecompressInterceptor is experimental and subject to change',
'ExperimentalWarning'
)
warningEmitted = true
}
return (dispatch) => {
return (opts, handler) => {
const decompressHandler = new DecompressHandler(handler, options)
return dispatch(opts, decompressHandler)
}
}
}
|
Creates a decompression interceptor for HTTP responses
@param {DecompressHandlerOptions} [options] - Options for the interceptor
@returns {Function} - Interceptor function
|
javascript
|
deps/undici/src/lib/interceptor/decompress.js
| 235
|
[] | false
| 2
| 6
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
shouldRecord
|
public boolean shouldRecord() {
return this.recordingLevel.shouldRecord(config.recordLevel().id);
}
|
@return true if the sensor's record level indicates that the metric will be recorded, false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java
| 176
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.