function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
load
|
List<PropertySource<?>> load(String name, Resource resource) throws IOException;
|
Load the resource into one or more property sources. Implementations may either
return a list containing a single source, or in the case of a multi-document format
such as yaml a source for each document in the resource.
@param name the root name of the property source. If multiple documents are loaded
an additional suffix should be added to the name for each source loaded.
@param resource the resource to load
@return a list property sources
@throws IOException if the source cannot be loaded
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/env/PropertySourceLoader.java
| 52
|
[
"name",
"resource"
] | true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
asfreq
|
def asfreq(self, freq=None, how: str = "E") -> Self:
"""
Convert the {klass} to the specified frequency `freq`.
Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
to each :class:`~pandas.Period` in this {klass}.
Parameters
----------
freq : str
A frequency.
how : str {{'E', 'S'}}, default 'E'
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
{klass}
The transformed {klass} with the new frequency.
See Also
--------
{other}.asfreq: Convert each Period in a {other_name} to the given frequency.
Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
Examples
--------
>>> pidx = pd.period_range("2010-01-01", "2015-01-01", freq="Y")
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[Y-DEC]')
>>> pidx.asfreq("M")
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]')
>>> pidx.asfreq("M", how="S")
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]')
"""
how = libperiod.validate_end_alias(how)
if isinstance(freq, BaseOffset) and hasattr(freq, "_period_dtype_code"):
freq = PeriodDtype(freq)._freqstr
freq = Period._maybe_convert_freq(freq)
base1 = self._dtype._dtype_code
base2 = freq._period_dtype_code
asi8 = self.asi8
# self.freq.n can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + self.dtype._n - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasna:
new_data[self._isnan] = iNaT
dtype = PeriodDtype(freq)
return type(self)(new_data, dtype=dtype)
|
Convert the {klass} to the specified frequency `freq`.
Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
to each :class:`~pandas.Period` in this {klass}.
Parameters
----------
freq : str
A frequency.
how : str {{'E', 'S'}}, default 'E'
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
{klass}
The transformed {klass} with the new frequency.
See Also
--------
{other}.asfreq: Convert each Period in a {other_name} to the given frequency.
Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
Examples
--------
>>> pidx = pd.period_range("2010-01-01", "2015-01-01", freq="Y")
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[Y-DEC]')
>>> pidx.asfreq("M")
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]')
>>> pidx.asfreq("M", how="S")
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]')
|
python
|
pandas/core/arrays/period.py
| 866
|
[
"self",
"freq",
"how"
] |
Self
| true
| 6
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
iat
|
def iat(self) -> _iAtIndexer:
"""
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
IndexError
When integer position is out of bounds.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.loc : Access a group of rows and columns by label(s).
DataFrame.iloc : Access a group of rows and columns by integer position(s).
Examples
--------
>>> df = pd.DataFrame(
... [[0, 2, 3], [0, 4, 1], [10, 20, 30]], columns=["A", "B", "C"]
... )
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
np.int64(1)
Set value at specified row/column pair
>>> df.iat[1, 2] = 10
>>> df.iat[1, 2]
np.int64(10)
Get value within a series
>>> df.loc[0].iat[1]
np.int64(2)
"""
return _iAtIndexer("iat", self)
|
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
IndexError
When integer position is out of bounds.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.loc : Access a group of rows and columns by label(s).
DataFrame.iloc : Access a group of rows and columns by integer position(s).
Examples
--------
>>> df = pd.DataFrame(
... [[0, 2, 3], [0, 4, 1], [10, 20, 30]], columns=["A", "B", "C"]
... )
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
np.int64(1)
Set value at specified row/column pair
>>> df.iat[1, 2] = 10
>>> df.iat[1, 2]
np.int64(10)
Get value within a series
>>> df.loc[0].iat[1]
np.int64(2)
|
python
|
pandas/core/indexing.py
| 704
|
[
"self"
] |
_iAtIndexer
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
_get_dtype
|
def _get_dtype(arr_or_dtype) -> DtypeObj:
"""
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
"""
if arr_or_dtype is None:
raise TypeError("Cannot deduce dtype from null object")
# fastpath
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
# if we have an array-like
elif hasattr(arr_or_dtype, "dtype"):
arr_or_dtype = arr_or_dtype.dtype
return pandas_dtype(arr_or_dtype)
|
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
|
python
|
pandas/core/dtypes/common.py
| 1,624
|
[
"arr_or_dtype"
] |
DtypeObj
| true
| 5
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
is_nonnegative_int
|
def is_nonnegative_int(value: object) -> None:
"""
Verify that value is None or a positive int.
Parameters
----------
value : None or int
The `value` to be checked.
Raises
------
ValueError
When the value is not None or is a negative integer
"""
if value is None:
return
elif isinstance(value, int):
if value >= 0:
return
msg = "Value must be a nonnegative integer or None"
raise ValueError(msg)
|
Verify that value is None or a positive int.
Parameters
----------
value : None or int
The `value` to be checked.
Raises
------
ValueError
When the value is not None or is a negative integer
|
python
|
pandas/_config/config.py
| 897
|
[
"value"
] |
None
| true
| 4
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getFormat
|
private Format getFormat(final String desc) {
if (registry != null) {
String name = desc;
String args = null;
final int i = desc.indexOf(START_FMT);
if (i > 0) {
name = desc.substring(0, i).trim();
args = desc.substring(i + 1).trim();
}
final FormatFactory factory = registry.get(name);
if (factory != null) {
return factory.getFormat(name, args, getLocale());
}
}
return null;
}
|
Gets a custom format from a format description.
@param desc String
@return Format
|
java
|
src/main/java/org/apache/commons/lang3/text/ExtendedMessageFormat.java
| 278
|
[
"desc"
] |
Format
| true
| 4
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
sort
|
public static void sort(List<?> source, SortDefinition sortDefinition) throws BeansException {
if (StringUtils.hasText(sortDefinition.getProperty())) {
source.sort(new PropertyComparator<>(sortDefinition));
}
}
|
Sort the given List according to the given sort definition.
<p>Note: Contained objects have to provide the given property
in the form of a bean property, i.e. a getXXX method.
@param source the input List
@param sortDefinition the parameters to sort by
@throws java.lang.IllegalArgumentException in case of a missing propertyName
|
java
|
spring-beans/src/main/java/org/springframework/beans/support/PropertyComparator.java
| 135
|
[
"source",
"sortDefinition"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
ts_compile
|
def ts_compile(fx_g: fx.GraphModule, inps) -> Callable:
"""
Compiles the :attr:`fx_g` with Torchscript compiler.
.. warning::
This API is experimental and likely to change.
Args:
fx_g(fx.GraphModule): The input Fx graph module to be compiled.
Returns:
Torch scripted model.
"""
with _disable_jit_autocast():
strip_overloads(fx_g)
for node in fx_g.graph.find_nodes(
op="call_function", target=torch.ops.aten._to_copy
):
if len(node.args) == 1 and len(node.kwargs) == 1 and "dtype" in node.kwargs:
node.target = torch.ops.aten.to
for node in fx_g.graph.nodes:
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = v.type
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.graph.lint()
fx_g.recompile()
f = torch.jit.script(fx_g)
torch._C._jit_pass_remove_mutation(f.graph)
f = torch.jit.freeze(f.eval())
f = torch.jit.optimize_for_inference(f)
if not any(isinstance(t, torch._subclasses.FakeTensor) for t in inps):
f(*inps)
return f
|
Compiles the :attr:`fx_g` with Torchscript compiler.
.. warning::
This API is experimental and likely to change.
Args:
fx_g(fx.GraphModule): The input Fx graph module to be compiled.
Returns:
Torch scripted model.
|
python
|
torch/_functorch/compilers.py
| 56
|
[
"fx_g",
"inps"
] |
Callable
| true
| 9
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
values
|
@Override
public Collection<V> values() {
return (valuesView == null) ? valuesView = createValues() : valuesView;
}
|
Updates the index an iterator is pointing to after a call to remove: returns the index of the
entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
index that *was* the next entry that would be looked at.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 905
|
[] | true
| 2
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
ifNotEmpty
|
public static void ifNotEmpty(@Nullable Map<String, Object> source,
@Nullable Consumer<DefaultPropertiesPropertySource> action) {
if (!CollectionUtils.isEmpty(source) && action != null) {
action.accept(new DefaultPropertiesPropertySource(source));
}
}
|
Create a new {@link DefaultPropertiesPropertySource} instance if the provided
source is not empty.
@param source the {@code Map} source
@param action the action used to consume the
{@link DefaultPropertiesPropertySource}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/env/DefaultPropertiesPropertySource.java
| 72
|
[
"source",
"action"
] |
void
| true
| 3
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
addBucketToResult
|
private void addBucketToResult(long index, long count, boolean isPositive) {
if (resultAlreadyReturned) {
// we cannot modify the result anymore, create a new one
reallocateResultWithCapacity(result.getCapacity(), true);
}
assert resultAlreadyReturned == false;
boolean sufficientCapacity = result.tryAddBucket(index, count, isPositive);
if (sufficientCapacity == false) {
int newCapacity = Math.max(result.getCapacity() * 2, DEFAULT_ESTIMATED_BUCKET_COUNT);
reallocateResultWithCapacity(newCapacity, true);
boolean bucketAdded = result.tryAddBucket(index, count, isPositive);
assert bucketAdded : "Output histogram should have enough capacity";
}
}
|
Sets the given bucket of the negative buckets. If the bucket already exists, it will be replaced.
Buckets may be set in arbitrary order. However, for best performance and minimal allocations,
buckets should be set in order of increasing index and all negative buckets should be set before positive buckets.
@param index the index of the bucket
@param count the count of the bucket, must be at least 1
@return the builder
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
| 206
|
[
"index",
"count",
"isPositive"
] |
void
| true
| 3
| 8.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_get_context_fn_cache_hash
|
def _get_context_fn_cache_hash(context_fn):
"""
Extract a cache hash from a context_fn used for selective activation checkpointing (SAC).
The context_fn determines which ops are saved vs recomputed in the SAC region.
Since context_fn can be an arbitrary Python function, we cannot reliably pickle
it for cache key generation (pickle only captures the function name, not the code).
Users must provide a stable hash by setting a `cache_hash` attribute on the context_fn.
For functools.partial objects, set the cache_hash on the partial object itself, not on
the underlying function.
Returns:
The cache hash if found
None: If no hash is provided (caller should bypass caching)
"""
if hasattr(context_fn, "cache_hash"):
return context_fn.cache_hash
return None
|
Extract a cache hash from a context_fn used for selective activation checkpointing (SAC).
The context_fn determines which ops are saved vs recomputed in the SAC region.
Since context_fn can be an arbitrary Python function, we cannot reliably pickle
it for cache key generation (pickle only captures the function name, not the code).
Users must provide a stable hash by setting a `cache_hash` attribute on the context_fn.
For functools.partial objects, set the cache_hash on the partial object itself, not on
the underlying function.
Returns:
The cache hash if found
None: If no hash is provided (caller should bypass caching)
|
python
|
torch/_functorch/_aot_autograd/autograd_cache.py
| 283
|
[
"context_fn"
] | false
| 2
| 7.12
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
getCircularDependencies
|
function getCircularDependencies(packages: Packages): string[][] {
const circularDeps = [] as string[][]
for (const pkg of Object.values(packages)) {
const uses = [...pkg.uses, ...pkg.usesDev]
const usedBy = [...pkg.usedBy, ...pkg.usedByDev]
const circles = intersection(uses, usedBy)
if (circles.length > 0) {
circularDeps.push(circles)
}
}
return circularDeps
}
|
Runs a command and pipes the stdout & stderr to the current process.
@param cwd cwd for running the command
@param cmd command to run
|
typescript
|
scripts/ci/publish.ts
| 164
|
[
"packages"
] | true
| 2
| 6.56
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
getActualIndentationForListStartLine
|
function getActualIndentationForListStartLine(list: NodeArray<Node>, sourceFile: SourceFile, options: EditorSettings): number {
if (!list) {
return Value.Unknown;
}
return findColumnForFirstNonWhitespaceCharacterInLine(sourceFile.getLineAndCharacterOfPosition(list.pos), sourceFile, options);
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 545
|
[
"list",
"sourceFile",
"options"
] | true
| 2
| 8.32
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
value
|
public XContentBuilder value(Double value) throws IOException {
return (value == null) ? nullValue() : value(value.doubleValue());
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 494
|
[
"value"
] |
XContentBuilder
| true
| 2
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
all
|
public KafkaFuture<Collection<ConfigResource>> all() {
final KafkaFutureImpl<Collection<ConfigResource>> result = new KafkaFutureImpl<>();
future.whenComplete((resources, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
result.complete(resources);
}
});
return result;
}
|
Returns a future that yields either an exception, or the full set of config resources.
In the event of a failure, the future yields nothing but the first exception which
occurred.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesResult.java
| 42
|
[] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
filter
|
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Filter elements from groups that don't satisfy a criterion.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Criterion to apply to each group. Should return True or False.
dropna : bool, optional
Drop groups that do not pass the filter. True by default; if False,
groups that evaluate False are filled with NaNs.
*args : tuple
Optional positional arguments to pass to `func`.
**kwargs : dict
Optional keyword arguments to pass to `func`.
Returns
-------
Series
The filtered subset of the original Series.
See Also
--------
Series.filter: Filter elements of ungrouped Series.
DataFrameGroupBy.filter : Filter elements from groups base on criterion.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": ["foo", "bar", "foo", "bar", "foo", "bar"],
... "B": [1, 2, 3, 4, 5, 6],
... "C": [2.0, 5.0, 8.0, 1.0, 2.0, 9.0],
... }
... )
>>> grouped = df.groupby("A")
>>> df.groupby("A").B.filter(lambda x: x.mean() > 3.0)
1 2
3 4
5 6
Name: B, dtype: int64
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return notna(b) and b
try:
indices = [
self._get_index(name)
for name, group in self._grouper.get_iterator(self._obj_with_exclusions)
if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
|
Filter elements from groups that don't satisfy a criterion.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Criterion to apply to each group. Should return True or False.
dropna : bool, optional
Drop groups that do not pass the filter. True by default; if False,
groups that evaluate False are filled with NaNs.
*args : tuple
Optional positional arguments to pass to `func`.
**kwargs : dict
Optional keyword arguments to pass to `func`.
Returns
-------
Series
The filtered subset of the original Series.
See Also
--------
Series.filter: Filter elements of ungrouped Series.
DataFrameGroupBy.filter : Filter elements from groups base on criterion.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": ["foo", "bar", "foo", "bar", "foo", "bar"],
... "B": [1, 2, 3, 4, 5, 6],
... "C": [2.0, 5.0, 8.0, 1.0, 2.0, 9.0],
... }
... )
>>> grouped = df.groupby("A")
>>> df.groupby("A").B.filter(lambda x: x.mean() > 3.0)
1 2
3 4
5 6
Name: B, dtype: int64
|
python
|
pandas/core/groupby/generic.py
| 889
|
[
"self",
"func",
"dropna"
] | true
| 4
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
nextEscapeIndex
|
@Override
protected final int nextEscapeIndex(CharSequence csq, int index, int end) {
while (index < end) {
char c = csq.charAt(index);
if ((c < replacementsLength && replacements[c] != null)
|| c > safeMaxChar
|| c < safeMinChar) {
break;
}
index++;
}
return index;
}
|
Escapes a single Unicode code point using the replacement array and safe range values. If the
given character does not have an explicit replacement and lies outside the safe range then
{@link #escapeUnsafe} is called.
@return the replacement characters, or {@code null} if no escaping was required
|
java
|
android/guava/src/com/google/common/escape/ArrayBasedUnicodeEscaper.java
| 178
|
[
"csq",
"index",
"end"
] | true
| 6
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
intCastExpression
|
static BindableMatcher<clang::Stmt>
intCastExpression(bool IsSigned, StringRef CastBindName = {}) {
// std::cmp_{} functions trigger a compile-time error if either LHS or RHS
// is a non-integer type, char, enum or bool
// (unsigned char/ signed char are Ok and can be used).
auto IntTypeExpr = expr(hasType(hasCanonicalType(qualType(
IsSigned ? isSignedInteger() : isUnsignedInteger(),
unless(isActualChar()), unless(booleanType()), unless(enumType())))));
const auto ImplicitCastExpr =
CastBindName.empty() ? implicitCastExpr(hasSourceExpression(IntTypeExpr))
: implicitCastExpr(hasSourceExpression(IntTypeExpr))
.bind(CastBindName);
const auto CStyleCastExpr = cStyleCastExpr(has(ImplicitCastExpr));
const auto StaticCastExpr = cxxStaticCastExpr(has(ImplicitCastExpr));
const auto FunctionalCastExpr = cxxFunctionalCastExpr(has(ImplicitCastExpr));
return expr(anyOf(ImplicitCastExpr, CStyleCastExpr, StaticCastExpr,
FunctionalCastExpr));
}
|
not applicable to explicit "signed char" or "unsigned char" types.
|
cpp
|
clang-tools-extra/clang-tidy/modernize/UseIntegerSignComparisonCheck.cpp
| 36
|
[
"IsSigned"
] | true
| 3
| 6
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
removeAdvisor
|
boolean removeAdvisor(Advisor advisor);
|
Remove the given advisor.
@param advisor the advisor to remove
@return {@code true} if the advisor was removed; {@code false}
if the advisor was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/Advised.java
| 150
|
[
"advisor"
] | true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
start
|
public synchronized boolean start() {
// Not yet started?
if (!isStarted()) {
// Determine the executor to use and whether a temporary one has to be created.
final ExecutorService tempExec;
executor = getExternalExecutor();
if (executor == null) {
executor = tempExec = createExecutor();
} else {
tempExec = null;
}
future = executor.submit(createTask(tempExec));
return true;
}
return false;
}
|
Starts the background initialization. With this method the initializer
becomes active and invokes the {@link #initialize()} method in a
background task. A {@link BackgroundInitializer} can be started exactly
once. The return value of this method determines whether the start was
successful: only the first invocation of this method returns <strong>true</strong>,
following invocations will return <strong>false</strong>.
@return a flag whether the initializer could be started successfully.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BackgroundInitializer.java
| 394
|
[] | true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
read
|
private static String read(Path path) {
try {
return Files.readString(path);
} catch (IOException e) {
log.error("Could not read file {} for property {}", path, path.getFileName(), e);
throw new ConfigException("Could not read file " + path + " for property " + path.getFileName());
}
}
|
Retrieves the data contained in the regular files named by {@code keys} in the directory given by {@code path}.
Non-regular files (such as directories) in the given directory are silently ignored.
@param path the directory where data files reside.
@param keys the keys whose values will be retrieved.
@return the configuration data.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java
| 117
|
[
"path"
] |
String
| true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
findHomeDir
|
private File findHomeDir(@Nullable File source) {
File homeDir = source;
homeDir = (homeDir != null) ? homeDir : findDefaultHomeDir();
if (homeDir.isFile()) {
homeDir = homeDir.getParentFile();
}
homeDir = homeDir.exists() ? homeDir : new File(".");
return homeDir.getAbsoluteFile();
}
|
Create a new {@link ApplicationHome} instance for the specified source class.
@param sourceClass the source class or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationHome.java
| 141
|
[
"source"
] |
File
| true
| 4
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
parseLeadingNumber
|
static size_t parseLeadingNumber(const std::string& line) {
auto raw = line.c_str();
char* end;
unsigned long val = strtoul(raw, &end, 10);
if (end == raw || (*end != ',' && *end != '-' && *end != '\n' && *end != 0)) {
throw std::runtime_error(fmt::format("error parsing list '{}'", line));
}
return val;
}
|
line does not start with a number terminated by ',', '-', '\n', or EOS.
|
cpp
|
folly/concurrency/CacheLocality.cpp
| 173
|
[] | true
| 6
| 7.04
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
lint_file
|
def lint_file(
filename: str,
line_remainders: list[str],
allowlist_pattern: str,
replace_pattern: str,
error_name: str,
) -> None:
"""
Lint a file with one or more pattern matches, printing LintMessages as they're created.
Args:
filename: Path to the file being linted
line_remainders: List of line remainders (format: "line:content" without filename prefix)
allowlist_pattern: Pattern to check for allowlisting
replace_pattern: Pattern for sed replacement
error_name: Human-readable error name
"""
if not line_remainders:
return
should_skip = check_allowlist(filename, allowlist_pattern)
if should_skip:
return
# Check if file is too large to compute replacement
file_size = os.path.getsize(filename)
compute_replacement = replace_pattern and file_size <= MAX_ORIGINAL_SIZE
# Apply replacement to entire file if pattern is specified and file is not too large
original = None
replacement = None
if compute_replacement:
# When we have a replacement, report a single message with line=None
try:
with open(filename) as f:
original = f.read()
proc = run_command(["sed", "-r", replace_pattern, filename])
replacement = proc.stdout.decode("utf-8")
except Exception as err:
print_lint_message(
name="command-failed",
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
return
print_lint_message(
path=filename,
name=error_name,
original=original,
replacement=replacement,
)
else:
# When no replacement, report each matching line (up to MAX_MATCHES_PER_FILE)
total_matches = len(line_remainders)
matches_to_report = min(total_matches, MAX_MATCHES_PER_FILE)
for line_remainder in line_remainders[:matches_to_report]:
# line_remainder format: "line_number:content"
split = line_remainder.split(":", 1)
line_number = int(split[0]) if split[0] else None
print_lint_message(
path=filename,
line=line_number,
name=error_name,
)
# If there are more matches than the limit, print an error
if total_matches > MAX_MATCHES_PER_FILE:
print_lint_message(
path=filename,
name="too-many-matches",
description=f"File has {total_matches} matches, only showing first {MAX_MATCHES_PER_FILE}",
)
|
Lint a file with one or more pattern matches, printing LintMessages as they're created.
Args:
filename: Path to the file being linted
line_remainders: List of line remainders (format: "line:content" without filename prefix)
allowlist_pattern: Pattern to check for allowlisting
replace_pattern: Pattern for sed replacement
error_name: Human-readable error name
|
python
|
tools/linter/adapters/grep_linter.py
| 163
|
[
"filename",
"line_remainders",
"allowlist_pattern",
"replace_pattern",
"error_name"
] |
None
| true
| 12
| 6.16
|
pytorch/pytorch
| 96,034
|
google
| false
|
type
|
public byte type() {
return this.type;
}
|
@return the type indicator for this SASL SCRAM mechanism
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ScramMechanism.java
| 81
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
assertBeanFactoryActive
|
protected void assertBeanFactoryActive() {
if (!this.active.get()) {
if (this.closed.get()) {
throw new IllegalStateException(getDisplayName() + " has been closed already");
}
else {
throw new IllegalStateException(getDisplayName() + " has not been refreshed yet");
}
}
}
|
Assert that this context's BeanFactory is currently active,
throwing an {@link IllegalStateException} if it isn't.
<p>Invoked by all {@link BeanFactory} delegation methods that depend
on an active context, i.e. in particular all bean accessor methods.
<p>The default implementation checks the {@link #isActive() 'active'} status
of this context overall. May be overridden for more specific checks, or for a
no-op if {@link #getBeanFactory()} itself throws an exception in such a case.
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 1,259
|
[] |
void
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getReferences
|
private Set<StandardConfigDataReference> getReferences(ConfigDataLocationResolverContext context,
ConfigDataLocation configDataLocation) {
String resourceLocation = getResourceLocation(context, configDataLocation);
try {
if (isDirectory(resourceLocation)) {
return getReferencesForDirectory(configDataLocation, resourceLocation, NO_PROFILE);
}
return getReferencesForFile(configDataLocation, resourceLocation, NO_PROFILE);
}
catch (RuntimeException ex) {
throw new IllegalStateException("Unable to load config data from '" + configDataLocation + "'", ex);
}
}
|
Create a new {@link StandardConfigDataLocationResolver} instance.
@param logFactory the factory for loggers to use
@param binder a binder backed by the initial {@link Environment}
@param resourceLoader a {@link ResourceLoader} used to load resources
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataLocationResolver.java
| 133
|
[
"context",
"configDataLocation"
] | true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_get_backward_memory_from_topologically_sorted_graph
|
def _get_backward_memory_from_topologically_sorted_graph(
self,
node_graph: nx.DiGraph,
node_memories: dict[str, float],
saved_nodes_set: set[str],
peak_memory_after_forward_pass: float,
) -> list[tuple[float, str]]:
"""
Simulates the backward pass and keeps track of the peak memory usage.
High Level Steps:
1. Set Initial Peak/Current Memory
Allows you to set the peak memory after the forward pass, but typically this is
the sum of the estimated memory of the saved nodes.
2. Perform a reverse topological sort of the node_graph.
If full graph is defined then will sort the full graph and only process the subset
of nodes in the node_graph.
3. Iterate through the sorted graph nodes.
If the node is saved then just drop it's memory from current memory.
If the node is not saved then add it's memory to current memory and then traverse it's
predecessors to simulate recomuptation chain. Will check if new peak memory after all
predecessors are processed.
Args:
node_graph (nx.DiGraph): A directed graph representing the recomputable forward nodes.
saved_nodes_set (Set[str]): A set of node names that are saved.
peak_memory_after_forward_pass (float): The peak memory usage after the forward pass.
"""
current_memory = [
(peak_memory_after_forward_pass, "Initial Peak/Current Memory")
]
already_computed = set()
sorted_nodes = list(reversed(list(nx.topological_sort(node_graph))))
dependencies_computed = set()
for node in sorted_nodes:
if node in saved_nodes_set or node in already_computed:
current_memory.append(
(
current_memory[-1][0] - node_memories[node],
f"Dropping Node(already saved): {node}",
)
)
continue
already_computed.add(node)
current_memory.append(
(
current_memory[-1][0] + node_memories[node],
f"Recomputing Node: {node}",
)
)
# Create a queue of dependencies required for recomputation
predecessor_queue = deque(
[
dependency
for dependency, v in node_graph.in_edges(node)
if dependency not in already_computed
]
)
while predecessor_queue:
dep = predecessor_queue.popleft()
already_computed.add(dep)
dependencies_computed.add(dep)
current_memory.append(
(
current_memory[-1][0] + node_memories[dep],
f"Recomputing Predecessor of {node}: {dep}",
)
)
# Add predecessors of the predecessor to the queue if they haven't been recomputed yet
for dependency_of_dependency, _ in node_graph.in_edges(dep):
if (
dependency_of_dependency in already_computed
or dependency_of_dependency in saved_nodes_set
or dependency_of_dependency in predecessor_queue
):
continue
predecessor_queue.append(dependency_of_dependency)
dependencies_computed.clear()
current_memory.append(
(current_memory[-1][0] - node_memories[node], f"Dropping Node: {node}")
)
return current_memory
|
Simulates the backward pass and keeps track of the peak memory usage.
High Level Steps:
1. Set Initial Peak/Current Memory
Allows you to set the peak memory after the forward pass, but typically this is
the sum of the estimated memory of the saved nodes.
2. Perform a reverse topological sort of the node_graph.
If full graph is defined then will sort the full graph and only process the subset
of nodes in the node_graph.
3. Iterate through the sorted graph nodes.
If the node is saved then just drop it's memory from current memory.
If the node is not saved then add it's memory to current memory and then traverse it's
predecessors to simulate recomuptation chain. Will check if new peak memory after all
predecessors are processed.
Args:
node_graph (nx.DiGraph): A directed graph representing the recomputable forward nodes.
saved_nodes_set (Set[str]): A set of node names that are saved.
peak_memory_after_forward_pass (float): The peak memory usage after the forward pass.
|
python
|
torch/_functorch/_activation_checkpointing/knapsack_evaluator.py
| 25
|
[
"self",
"node_graph",
"node_memories",
"saved_nodes_set",
"peak_memory_after_forward_pass"
] |
list[tuple[float, str]]
| true
| 9
| 6.96
|
pytorch/pytorch
| 96,034
|
google
| false
|
hashCode
|
@Override
public int hashCode() {
int code = this.clazz.hashCode();
code = 37 * code + this.methodNamePatterns.hashCode();
return code;
}
|
Determine if the given method name matches the method name pattern.
<p>This method is invoked by {@link #isMatch(String, int)}.
<p>The default implementation checks for direct equality as well as
{@code xxx*}, {@code *xxx}, {@code *xxx*}, and {@code xxx*yyy} matches.
<p>Can be overridden in subclasses — for example, to support a
different style of simple pattern matching.
@param methodName the method name to check
@param methodNamePattern the method name pattern
@return {@code true} if the method name matches the pattern
@since 6.1
@see #isMatch(String, int)
@see PatternMatchUtils#simpleMatch(String, String)
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ControlFlowPointcut.java
| 243
|
[] | true
| 1
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
get_paused_dag_ids
|
def get_paused_dag_ids(dag_ids: list[str], session: Session = NEW_SESSION) -> set[str]:
"""
Given a list of dag_ids, get a set of Paused Dag Ids.
:param dag_ids: List of Dag ids
:param session: ORM Session
:return: Paused Dag_ids
"""
paused_dag_ids = session.scalars(
select(DagModel.dag_id)
.where(DagModel.is_paused == expression.true())
.where(DagModel.dag_id.in_(dag_ids))
)
return set(paused_dag_ids)
|
Given a list of dag_ids, get a set of Paused Dag Ids.
:param dag_ids: List of Dag ids
:param session: ORM Session
:return: Paused Dag_ids
|
python
|
airflow-core/src/airflow/models/dag.py
| 530
|
[
"dag_ids",
"session"
] |
set[str]
| true
| 1
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
asList
|
public static List<Double> asList(double... backingArray) {
if (backingArray.length == 0) {
return Collections.emptyList();
}
return new DoubleArrayAsList(backingArray);
}
|
Returns a fixed-size list backed by the specified array, similar to {@link
Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)}, but any attempt to
set a value to {@code null} will result in a {@link NullPointerException}.
<p>The returned list maintains the values, but not the identities, of {@code Double} objects
written to or read from it. For example, whether {@code list.get(0) == list.get(0)} is true for
the returned list is unspecified.
<p>The returned list may have unexpected behavior if it contains {@code NaN}, or if {@code NaN}
is used as a parameter to any of its methods.
<p>The returned list is serializable.
<p><b>Note:</b> when possible, you should represent your data as an {@link
ImmutableDoubleArray} instead, which has an {@link ImmutableDoubleArray#asList asList} view.
@param backingArray the array to back the list
@return a list view of the array
|
java
|
android/guava/src/com/google/common/primitives/Doubles.java
| 575
|
[] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
getStackFrameList
|
static List<String> getStackFrameList(final Throwable throwable) {
final String stackTrace = getStackTrace(throwable);
final String linebreak = System.lineSeparator();
final StringTokenizer frames = new StringTokenizer(stackTrace, linebreak);
final List<String> list = new ArrayList<>();
boolean traceStarted = false;
while (frames.hasMoreTokens()) {
final String token = frames.nextToken();
// Determine if the line starts with "<whitespace>at"
final int at = token.indexOf("at");
if (at != NOT_FOUND && token.substring(0, at).trim().isEmpty()) {
traceStarted = true;
list.add(token);
} else if (traceStarted) {
break;
}
}
return list;
}
|
Gets a {@link List} of stack frames, the message
is not included. Only the trace of the specified exception is
returned, any caused by trace is stripped.
<p>This works in most cases and will only fail if the exception
message contains a line that starts with: {@code "<whitespace>at"}.</p>
@param throwable is any throwable.
@return List of stack frames.
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
| 401
|
[
"throwable"
] | true
| 5
| 8.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
mintypecode
|
def mintypecode(typechars, typeset='GDFgdf', default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype
Examples
--------
>>> import numpy as np
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
"""
typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars)
intersection = {t for t in typecodes if t in typeset}
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
return min(intersection, key=_typecodes_by_elsize.index)
|
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype
Examples
--------
>>> import numpy as np
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
|
python
|
numpy/lib/_type_check_impl.py
| 26
|
[
"typechars",
"typeset",
"default"
] | false
| 6
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
assert_run_python_script_without_output
|
def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and the stdtout +
stderr should not match the pattern `pattern`.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
pattern : str
Pattern that the stdout + stderr should not match. By default, unless
stdout + stderr are both empty, an error will be raised.
timeout : int, default=60
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix="_src_test_sklearn.py")
os.close(fd)
try:
with open(source_file, "wb") as f:
f.write(source_code.encode("utf-8"))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn_path), ".."))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {"cwd": cwd, "stderr": STDOUT, "env": env}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs["env"]["COVERAGE_PROCESS_START"] = coverage_rc
kwargs["timeout"] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(
"script errored with output:\n%s" % e.output.decode("utf-8")
)
out = out.decode("utf-8")
if re.search(pattern, out):
if pattern == ".+":
expectation = "Expected no output"
else:
expectation = f"The output was not supposed to match {pattern!r}"
message = f"{expectation}, got the following output instead: {out!r}"
raise AssertionError(message)
except TimeoutExpired as e:
raise RuntimeError(
"script timeout, output so far:\n%s" % e.output.decode("utf-8")
)
finally:
os.unlink(source_file)
|
Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and the stdtout +
stderr should not match the pattern `pattern`.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
pattern : str
Pattern that the stdout + stderr should not match. By default, unless
stdout + stderr are both empty, an error will be raised.
timeout : int, default=60
Time in seconds before timeout.
|
python
|
sklearn/utils/_testing.py
| 902
|
[
"source_code",
"pattern",
"timeout"
] | false
| 5
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
scale
|
@Override
public int scale() {
return bucketScale;
}
|
Attempts to add a bucket to the positive or negative range of this histogram.
<br>
Callers must adhere to the following rules:
<ul>
<li>All buckets for the negative values range must be provided before the first one from the positive values range.</li>
<li>For both the negative and positive ranges, buckets must be provided with their indices in ascending order.</li>
<li>It is not allowed to provide the same bucket more than once.</li>
<li>It is not allowed to add empty buckets ({@code count <= 0}).</li>
</ul>
If any of these rules are violated, this call will fail with an exception.
If the bucket cannot be added because the maximum capacity has been reached, the call will not modify the state
of this histogram and will return {@code false}.
@param index the index of the bucket to add
@param count the count to associate with the given bucket
@param isPositive {@code true} if the bucket belongs to the positive range, {@code false} if it belongs to the negative range
@return {@code true} if the bucket was added, {@code false} if it could not be added due to insufficient capacity
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 186
|
[] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
renderKey
|
function renderKey(path, key, dots) {
if (!path) return key;
return path.concat(key).map(function each(token, i) {
// eslint-disable-next-line no-param-reassign
token = removeBrackets(token);
return !dots && i ? '[' + token + ']' : token;
}).join(dots ? '.' : '');
}
|
It takes a path, a key, and a boolean, and returns a string
@param {string} path - The path to the current key.
@param {string} key - The key of the current object being iterated over.
@param {string} dots - If true, the key will be rendered with dots instead of brackets.
@returns {string} The path to the current key.
|
javascript
|
lib/helpers/toFormData.js
| 39
|
[
"path",
"key",
"dots"
] | false
| 5
| 6.4
|
axios/axios
| 108,381
|
jsdoc
| false
|
|
toCharacterObject
|
public static Character toCharacterObject(final String str) {
return StringUtils.isEmpty(str) ? null : Character.valueOf(str.charAt(0));
}
|
Converts the String to a Character using the first character, returning
null for empty Strings.
<p>For ASCII 7 bit characters, this uses a cache that will return the
same Character object each time.</p>
<pre>
CharUtils.toCharacterObject(null) = null
CharUtils.toCharacterObject("") = null
CharUtils.toCharacterObject("A") = 'A'
CharUtils.toCharacterObject("BA") = 'B'
</pre>
@param str the character to convert
@return the Character value of the first letter of the String
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 369
|
[
"str"
] |
Character
| true
| 2
| 7.52
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
APOS_UNESCAPE
|
public static String[][] APOS_UNESCAPE() {
return APOS_UNESCAPE.clone();
}
|
Reverse of {@link #APOS_ESCAPE()} for unescaping purposes.
@return the mapping table.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
| 371
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
onRestart
|
@Override
public void onRestart() {
this.stoppedBeans = null;
if (this.running) {
stopBeans(true);
}
startBeans(true);
this.running = true;
}
|
Stop all registered beans that implement {@link Lifecycle} and <i>are</i>
currently running. Any bean that implements {@link SmartLifecycle} will be
stopped within its 'phase', and all phases will be ordered from highest to
lowest value. All beans that do not implement {@link SmartLifecycle} will be
stopped in the default phase 0. A bean declared as dependent on another bean
will be stopped before the dependency bean regardless of the declared phase.
|
java
|
spring-context/src/main/java/org/springframework/context/support/DefaultLifecycleProcessor.java
| 317
|
[] |
void
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_build_candidate_buffer_map
|
def _build_candidate_buffer_map(
buf_to_snode_last_use: dict,
) -> dict[BaseSchedulerNode, OrderedSet]:
"""
Build inverted index: node -> set of buffers where node appears in successors.
This optimization reduces buffer iteration from O(total_buffers) to O(buffers_per_node).
Since buffer successors are immutable during reordering, this map doesn't need updates.
Returns:
dict mapping each node to the set of buffers that have this node in their successors
"""
node_to_candidate_bufs: dict[BaseSchedulerNode, OrderedSet] = defaultdict(
OrderedSet
)
for buf in buf_to_snode_last_use:
# Add to every successor node's buffer set
for succ_node in buf.mpi_buffer.succ_nodes:
node_to_candidate_bufs[succ_node].add(buf)
return dict(node_to_candidate_bufs)
|
Build inverted index: node -> set of buffers where node appears in successors.
This optimization reduces buffer iteration from O(total_buffers) to O(buffers_per_node).
Since buffer successors are immutable during reordering, this map doesn't need updates.
Returns:
dict mapping each node to the set of buffers that have this node in their successors
|
python
|
torch/_inductor/comms.py
| 388
|
[
"buf_to_snode_last_use"
] |
dict[BaseSchedulerNode, OrderedSet]
| true
| 3
| 8.08
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
endsWith
|
private static boolean endsWith(CharSequence charSequence, char ch) {
return !charSequence.isEmpty() && charSequence.charAt(charSequence.length() - 1) == ch;
}
|
Returns if the bytes read from a {@link DataBlock} starts with the given
{@link CharSequence}.
@param buffer the buffer to use or {@code null}
@param dataBlock the source data block
@param pos the position in the data block where the string starts
@param len the number of bytes to read from the block
@param charSequence the required starting chars
@return {@code -1} if the data block does not start with the char sequence, or a
positive number indicating the number of bytes that contain the starting chars
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipString.java
| 246
|
[
"charSequence",
"ch"
] | true
| 2
| 7.68
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getDocumentationComment
|
function getDocumentationComment(declarations: readonly Declaration[] | undefined, checker: TypeChecker | undefined): SymbolDisplayPart[] {
if (!declarations) return emptyArray;
let doc = JsDoc.getJsDocCommentsFromDeclarations(declarations, checker);
if (checker && (doc.length === 0 || declarations.some(hasJSDocInheritDocTag))) {
const seenSymbols = new Set<Symbol>();
for (const declaration of declarations) {
const inheritedDocs = findBaseOfDeclaration(checker, declaration, symbol => {
if (!seenSymbols.has(symbol)) {
seenSymbols.add(symbol);
if (declaration.kind === SyntaxKind.GetAccessor || declaration.kind === SyntaxKind.SetAccessor) {
return symbol.getContextualDocumentationComment(declaration, checker);
}
return symbol.getDocumentationComment(checker);
}
});
// TODO: GH#16312 Return a ReadonlyArray, avoid copying inheritedDocs
if (inheritedDocs) doc = doc.length === 0 ? inheritedDocs.slice() : inheritedDocs.concat(lineBreakPart(), doc);
}
}
return doc;
}
|
Returns whether or not the given node has a JSDoc "inheritDoc" tag on it.
@param node the Node in question.
@returns `true` if `node` has a JSDoc "inheritDoc" tag on it, otherwise `false`.
|
typescript
|
src/services/services.ts
| 1,028
|
[
"declarations",
"checker"
] | true
| 10
| 8.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
create
|
public static StopWatch create() {
return new StopWatch();
}
|
Creates a StopWatch.
@return StopWatch a StopWatch.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/time/StopWatch.java
| 232
|
[] |
StopWatch
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
cloneDataView
|
function cloneDataView(dataView, isDeep) {
var buffer = isDeep ? cloneArrayBuffer(dataView.buffer) : dataView.buffer;
return new dataView.constructor(buffer, dataView.byteOffset, dataView.byteLength);
}
|
Creates a clone of `dataView`.
@private
@param {Object} dataView The data view to clone.
@param {boolean} [isDeep] Specify a deep clone.
@returns {Object} Returns the cloned data view.
|
javascript
|
lodash.js
| 4,639
|
[
"dataView",
"isDeep"
] | false
| 2
| 6
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
deserialize_value
|
def deserialize_value(result: Any) -> Any:
"""
Deserialize XCom value from a database result.
If deserialization fails, the raw value is returned, which must still be a valid Python JSON-compatible
type (e.g., ``dict``, ``list``, ``str``, ``int``, ``float``, or ``bool``).
XCom values are stored as JSON in the database, and SQLAlchemy automatically handles
serialization (``json.dumps``) and deserialization (``json.loads``). However, we
use a custom encoder for serialization (``serialize_value``) and deserialization to handle special
cases, such as encoding tuples via the Airflow Serialization module. These must be decoded
using ``XComDecoder`` to restore original types.
Some XCom values, such as those set via the Task Execution API, bypass ``serialize_value``
and are stored directly in JSON format. Since these values are already deserialized
by SQLAlchemy, they are returned as-is.
**Example: Handling a tuple**:
.. code-block:: python
original_value = (1, 2, 3)
serialized_value = XComModel.serialize_value(original_value)
print(serialized_value)
# '{"__classname__": "builtins.tuple", "__version__": 1, "__data__": [1, 2, 3]}'
This serialized value is stored in the database. When deserialized, the value is restored to the original tuple.
:param result: The XCom database row or object containing a ``value`` attribute.
:return: The deserialized Python object.
"""
if result.value is None:
return None
try:
return json.loads(result.value, cls=XComDecoder)
except (ValueError, TypeError):
# Already deserialized (e.g., set via Task Execution API)
return result.value
|
Deserialize XCom value from a database result.
If deserialization fails, the raw value is returned, which must still be a valid Python JSON-compatible
type (e.g., ``dict``, ``list``, ``str``, ``int``, ``float``, or ``bool``).
XCom values are stored as JSON in the database, and SQLAlchemy automatically handles
serialization (``json.dumps``) and deserialization (``json.loads``). However, we
use a custom encoder for serialization (``serialize_value``) and deserialization to handle special
cases, such as encoding tuples via the Airflow Serialization module. These must be decoded
using ``XComDecoder`` to restore original types.
Some XCom values, such as those set via the Task Execution API, bypass ``serialize_value``
and are stored directly in JSON format. Since these values are already deserialized
by SQLAlchemy, they are returned as-is.
**Example: Handling a tuple**:
.. code-block:: python
original_value = (1, 2, 3)
serialized_value = XComModel.serialize_value(original_value)
print(serialized_value)
# '{"__classname__": "builtins.tuple", "__version__": 1, "__data__": [1, 2, 3]}'
This serialized value is stored in the database. When deserialized, the value is restored to the original tuple.
:param result: The XCom database row or object containing a ``value`` attribute.
:return: The deserialized Python object.
|
python
|
airflow-core/src/airflow/models/xcom.py
| 354
|
[
"result"
] |
Any
| true
| 2
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
replaceParameters
|
private String replaceParameters(String message, Locale locale, Set<String> visitedParameters) {
StringBuilder buf = new StringBuilder(message);
int parentheses = 0;
int startIndex = -1;
int endIndex = -1;
for (int i = 0; i < buf.length(); i++) {
if (buf.charAt(i) == ESCAPE) {
i++;
}
else if (buf.charAt(i) == PREFIX) {
if (startIndex == -1) {
startIndex = i;
}
parentheses++;
}
else if (buf.charAt(i) == SUFFIX) {
if (parentheses > 0) {
parentheses--;
}
endIndex = i;
}
if (parentheses == 0 && startIndex < endIndex) {
String parameter = buf.substring(startIndex + 1, endIndex);
if (!visitedParameters.add(parameter)) {
throw new IllegalArgumentException("Circular reference '{" + String.join(" -> ", visitedParameters)
+ " -> " + parameter + "}'");
}
String value = replaceParameter(parameter, locale, visitedParameters);
if (value != null) {
buf.replace(startIndex, endIndex + 1, value);
i = startIndex + value.length() - 1;
}
visitedParameters.remove(parameter);
startIndex = -1;
endIndex = -1;
}
}
return buf.toString();
}
|
Recursively replaces all message parameters.
<p>
The message parameter prefix <code>{</code> and suffix <code>}</code> can
be escaped using {@code \}, e.g. <code>\{escaped\}</code>.
@param message the message containing the parameters to be replaced
@param locale the locale to use when resolving replacements
@return the message with parameters replaced
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/validation/MessageSourceMessageInterpolator.java
| 79
|
[
"message",
"locale",
"visitedParameters"
] |
String
| true
| 11
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
containsNone
|
public static boolean containsNone(final CharSequence cs, final String invalidChars) {
if (invalidChars == null) {
return true;
}
return containsNone(cs, invalidChars.toCharArray());
}
|
Tests that the CharSequence does not contain certain characters.
<p>
A {@code null} CharSequence will return {@code true}. A {@code null} invalid character array will return {@code true}. An empty String ("") always
returns true.
</p>
<pre>
StringUtils.containsNone(null, *) = true
StringUtils.containsNone(*, null) = true
StringUtils.containsNone("", *) = true
StringUtils.containsNone("ab", "") = true
StringUtils.containsNone("abab", "xyz") = true
StringUtils.containsNone("ab1", "xyz") = true
StringUtils.containsNone("abz", "xyz") = false
</pre>
@param cs the CharSequence to check, may be null.
@param invalidChars a String of invalid chars, may be null.
@return true if it contains none of the invalid chars, or is null.
@since 2.0
@since 3.0 Changed signature from containsNone(String, String) to containsNone(CharSequence, String)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,261
|
[
"cs",
"invalidChars"
] | true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
decorateCache
|
protected Cache decorateCache(Cache cache) {
return cache;
}
|
Decorate the given Cache object if necessary.
@param cache the Cache object to be added to this CacheManager
@return the decorated Cache object to be used instead,
or simply the passed-in Cache object by default
|
java
|
spring-context/src/main/java/org/springframework/cache/support/AbstractCacheManager.java
| 164
|
[
"cache"
] |
Cache
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
validate_percentile
|
def validate_percentile(q: float | Iterable[float]) -> np.ndarray:
"""
Validate percentiles (used by describe and quantile).
This function checks if the given float or iterable of floats is a valid percentile
otherwise raises a ValueError.
Parameters
----------
q: float or iterable of floats
A single percentile or an iterable of percentiles.
Returns
-------
ndarray
An ndarray of the percentiles if valid.
Raises
------
ValueError if percentiles are not in given interval([0, 1]).
"""
q_arr = np.asarray(q)
# Don't change this to an f-string. The string formatting
# is too expensive for cases where we don't need it.
msg = "percentiles should all be in the interval [0, 1]"
if q_arr.ndim == 0:
if not 0 <= q_arr <= 1:
raise ValueError(msg)
elif not all(0 <= qs <= 1 for qs in q_arr):
raise ValueError(msg)
return q_arr
|
Validate percentiles (used by describe and quantile).
This function checks if the given float or iterable of floats is a valid percentile
otherwise raises a ValueError.
Parameters
----------
q: float or iterable of floats
A single percentile or an iterable of percentiles.
Returns
-------
ndarray
An ndarray of the percentiles if valid.
Raises
------
ValueError if percentiles are not in given interval([0, 1]).
|
python
|
pandas/util/_validators.py
| 339
|
[
"q"
] |
np.ndarray
| true
| 4
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
transform
|
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
if hasattr(X, "dtype") and X.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
G_X = np.zeros((n_queries, n_samples_fit), dtype)
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
|
python
|
sklearn/manifold/_isomap.py
| 387
|
[
"self",
"X"
] | false
| 7
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
nunique
|
def nunique(self):
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
See Also
--------
core.groupby.SeriesGroupBy.nunique : Method nunique for SeriesGroupBy.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 3],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 3
dtype: int64
>>> ser.resample("MS").nunique()
2023-01-01 2
2023-02-01 1
Freq: MS, dtype: int64
"""
return self._downsample("nunique")
|
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
See Also
--------
core.groupby.SeriesGroupBy.nunique : Method nunique for SeriesGroupBy.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 3],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 3
dtype: int64
>>> ser.resample("MS").nunique()
2023-01-01 2
2023-02-01 1
Freq: MS, dtype: int64
|
python
|
pandas/core/resample.py
| 1,761
|
[
"self"
] | false
| 1
| 6.16
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
getJsonParser
|
public static JsonParser getJsonParser() {
if (ClassUtils.isPresent("tools.jackson.databind.ObjectMapper", null)) {
return new JacksonJsonParser();
}
if (ClassUtils.isPresent("com.google.gson.Gson", null)) {
return new GsonJsonParser();
}
return new BasicJsonParser();
}
|
Static factory for the "best" JSON parser available on the classpath. Tries
Jackson, then Gson, and then falls back to the {@link BasicJsonParser}.
@return a {@link JsonParser}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonParserFactory.java
| 37
|
[] |
JsonParser
| true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
throwableOfThrowable
|
public static <T extends Throwable> T throwableOfThrowable(final Throwable throwable, final Class<T> clazz) {
return throwableOf(throwable, clazz, 0, false);
}
|
Returns the first {@link Throwable}
that matches the specified class (exactly) in the exception chain.
Subclasses of the specified class do not match - see
{@link #throwableOfType(Throwable, Class)} for the opposite.
<p>A {@code null} throwable returns {@code null}.
A {@code null} type returns {@code null}.
No match in the chain returns {@code null}.</p>
@param <T> the type of Throwable you are searching.
@param throwable the throwable to inspect, may be null.
@param clazz the class to search for, subclasses do not match, null returns null.
@return the first matching throwable from the throwable chain, null if no match or null input.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
| 954
|
[
"throwable",
"clazz"
] |
T
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
padEnd
|
function padEnd(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
return (length && strLength < length)
? (string + createPadding(length - strLength, chars))
: string;
}
|
Pads `string` on the right side if it's shorter than `length`. Padding
characters are truncated if they exceed `length`.
@static
@memberOf _
@since 4.0.0
@category String
@param {string} [string=''] The string to pad.
@param {number} [length=0] The padding length.
@param {string} [chars=' '] The string used as padding.
@returns {string} Returns the padded string.
@example
_.padEnd('abc', 6);
// => 'abc '
_.padEnd('abc', 6, '_-');
// => 'abc_-_'
_.padEnd('abc', 3);
// => 'abc'
|
javascript
|
lodash.js
| 14,517
|
[
"string",
"length",
"chars"
] | false
| 4
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getAdvisor
|
@Override
public @Nullable Advisor getAdvisor(Method candidateAdviceMethod, MetadataAwareAspectInstanceFactory aspectInstanceFactory,
int declarationOrderInAspect, String aspectName) {
validate(aspectInstanceFactory.getAspectMetadata().getAspectClass());
AspectJExpressionPointcut expressionPointcut = getPointcut(
candidateAdviceMethod, aspectInstanceFactory.getAspectMetadata().getAspectClass());
if (expressionPointcut == null) {
return null;
}
try {
return new InstantiationModelAwarePointcutAdvisorImpl(expressionPointcut, candidateAdviceMethod,
this, aspectInstanceFactory, declarationOrderInAspect, aspectName);
}
catch (IllegalArgumentException | IllegalStateException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Ignoring incompatible advice method: " + candidateAdviceMethod, ex);
}
return null;
}
}
|
Build a {@link org.springframework.aop.aspectj.DeclareParentsAdvisor}
for the given introduction field.
<p>Resulting Advisors will need to be evaluated for targets.
@param introductionField the field to introspect
@return the Advisor instance, or {@code null} if not an Advisor
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/ReflectiveAspectJAdvisorFactory.java
| 200
|
[
"candidateAdviceMethod",
"aspectInstanceFactory",
"declarationOrderInAspect",
"aspectName"
] |
Advisor
| true
| 4
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
diag_indices_from
|
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Examples
--------
>>> import numpy as np
Create a 4 by 4 array.
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Get the indices of the diagonal elements.
>>> di = np.diag_indices_from(a)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[di]
array([ 0, 5, 10, 15])
This is simply syntactic sugar for diag_indices.
>>> np.diag_indices(a.shape[0])
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not np.all(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Examples
--------
>>> import numpy as np
Create a 4 by 4 array.
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Get the indices of the diagonal elements.
>>> di = np.diag_indices_from(a)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[di]
array([ 0, 5, 10, 15])
This is simply syntactic sugar for diag_indices.
>>> np.diag_indices(a.shape[0])
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
|
python
|
numpy/lib/_index_tricks_impl.py
| 998
|
[
"arr"
] | false
| 3
| 7.84
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
stop
|
public void stop() {
if (runningState != State.RUNNING && runningState != State.SUSPENDED) {
throw new IllegalStateException("Stopwatch is not running.");
}
if (runningState == State.RUNNING) {
stopTimeNanos = System.nanoTime();
stopInstant = Instant.now();
}
runningState = State.STOPPED;
}
|
Stops this StopWatch.
<p>
This method ends a new timing session, allowing the time to be retrieved.
</p>
@throws IllegalStateException if this StopWatch is not running.
|
java
|
src/main/java/org/apache/commons/lang3/time/StopWatch.java
| 759
|
[] |
void
| true
| 4
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
url
|
public final URL url() {
URL url = loader.getResource(resourceName);
if (url == null) {
throw new NoSuchElementException(resourceName);
}
return url;
}
|
Returns the url identifying the resource.
<p>See {@link ClassLoader#getResource}
@throws NoSuchElementException if the resource cannot be loaded through the class loader,
despite physically existing in the class path.
|
java
|
android/guava/src/com/google/common/reflect/ClassPath.java
| 231
|
[] |
URL
| true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
mark_unbacked
|
def mark_unbacked(
t: Any,
index: Union[int, list[Any], tuple[Any]],
hint_override: Optional[int] = None,
strict: bool = False,
specialize_on: Optional[list[Any]] = None,
) -> None:
"""
Mark a tensor as having an unbacked dimension. This changes the semantics of operations:
- The size of the specified dimension will always be reported as not equal to zero or one.
- Assertions on this index will be turned into runtime asserts.
- Attempting to get the real value of this dimension will raise an exception.
- In effect, this dimension is treated as data-dependent (its value is unknown).
Args:
t (Any): The tensor to mark as having an unbacked dimension.
index (int or list/tuple of int): The dimension(s) to mark as unbacked. Can be a single integer or a list/tuple of integers.
hint_override (Optional[int], default=None): An optional integer to override the size hint for this dimension.
This is only used by the inductor backend for size hint queries, such as during autotuning.
strict (bool, default=False): If True, an error will be raised if the unbacked dimension is specialized.
By default (strict=False), specialization is allowed and will proceed without error.
specialize_on (Optional[list[Any]], default=None): A list of specialization criteria (e.g., lambdas) for this dimension.
If provided, Dynamo will generate specialized compiled regions for each criterion in addition to a generic trace.
"""
if torch.distributed.is_available() and isinstance(
t, torch.distributed.tensor.DTensor
):
# apply on inner tensor sizes/strides
mark_unbacked(t._local_tensor, index)
else:
# You could have copied the mark_dynamic behavior but I'm not convinced
# it's what you want
assert not is_traceable_wrapper_subclass(t), "not implemented yet"
if isinstance(index, int):
if strict:
if not hasattr(t, "_dynamo_strict_unbacked_indices"):
# pyrefly: ignore [missing-attribute]
t._dynamo_strict_unbacked_indices = set()
# pyrefly: ignore [missing-attribute]
t._dynamo_strict_unbacked_indices.add(index)
return
if not hasattr(t, "_specialized_on"):
# pyrefly: ignore [missing-attribute]
t._specialize_on = {}
if not hasattr(t, "_dynamo_unbacked_indices"):
# pyrefly: ignore [missing-attribute]
t._dynamo_unbacked_indices = set()
if not hasattr(t, "_dynamo_hint_overrides"):
# pyrefly: ignore [missing-attribute]
t._dynamo_hint_overrides = {}
if hint_override:
# pyrefly: ignore [missing-attribute]
t._dynamo_hint_overrides[index] = hint_override
# FX tracers don't respect @forbid_in_graph and choke on the following error since it passes in proxies:
# TypeError: 'Attribute' object does not support item assignment
# pyrefly: ignore [missing-attribute]
if isinstance(t._specialize_on, dict):
# pyrefly: ignore [missing-attribute]
t._specialize_on[index] = specialize_on if specialize_on is not None else []
# pyrefly: ignore [missing-attribute]
t._dynamo_unbacked_indices.add(index)
return
assert isinstance(index, (list, tuple))
for i in index:
mark_unbacked(t, i)
|
Mark a tensor as having an unbacked dimension. This changes the semantics of operations:
- The size of the specified dimension will always be reported as not equal to zero or one.
- Assertions on this index will be turned into runtime asserts.
- Attempting to get the real value of this dimension will raise an exception.
- In effect, this dimension is treated as data-dependent (its value is unknown).
Args:
t (Any): The tensor to mark as having an unbacked dimension.
index (int or list/tuple of int): The dimension(s) to mark as unbacked. Can be a single integer or a list/tuple of integers.
hint_override (Optional[int], default=None): An optional integer to override the size hint for this dimension.
This is only used by the inductor backend for size hint queries, such as during autotuning.
strict (bool, default=False): If True, an error will be raised if the unbacked dimension is specialized.
By default (strict=False), specialization is allowed and will proceed without error.
specialize_on (Optional[list[Any]], default=None): A list of specialization criteria (e.g., lambdas) for this dimension.
If provided, Dynamo will generate specialized compiled regions for each criterion in addition to a generic trace.
|
python
|
torch/_dynamo/decorators.py
| 554
|
[
"t",
"index",
"hint_override",
"strict",
"specialize_on"
] |
None
| true
| 14
| 6.8
|
pytorch/pytorch
| 96,034
|
google
| false
|
pluckHooks
|
function pluckHooks({
globalPreload,
initialize,
resolve,
load,
}) {
const acceptedHooks = { __proto__: null };
if (resolve) {
acceptedHooks.resolve = resolve;
}
if (load) {
acceptedHooks.load = load;
}
if (initialize) {
acceptedHooks.initialize = initialize;
} else if (globalPreload && !globalPreloadWarningWasEmitted) {
process.emitWarning(
'`globalPreload` has been removed; use `initialize` instead.',
'UnsupportedWarning',
);
globalPreloadWarningWasEmitted = true;
}
return acceptedHooks;
}
|
A utility function to pluck the hooks from a user-defined loader.
@param {import('./loader.js').ModuleExports} exports
@returns {ExportedHooks}
|
javascript
|
lib/internal/modules/esm/hooks.js
| 678
|
[] | false
| 7
| 6.64
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
createContributors
|
private ConfigDataEnvironmentContributors createContributors(Binder binder) {
this.logger.trace("Building config data environment contributors");
MutablePropertySources propertySources = this.environment.getPropertySources();
List<ConfigDataEnvironmentContributor> contributors = new ArrayList<>(propertySources.size() + 10);
PropertySource<?> defaultPropertySource = null;
for (PropertySource<?> propertySource : propertySources) {
if (DefaultPropertiesPropertySource.hasMatchingName(propertySource)) {
defaultPropertySource = propertySource;
}
else {
this.logger.trace(LogMessage.format("Creating wrapped config data contributor for '%s'",
propertySource.getName()));
contributors.add(ConfigDataEnvironmentContributor.ofExisting(propertySource,
this.environment.getConversionService()));
}
}
contributors.addAll(getInitialImportContributors(binder));
if (defaultPropertySource != null) {
this.logger.trace("Creating wrapped config data contributor for default property source");
contributors.add(ConfigDataEnvironmentContributor.ofExisting(defaultPropertySource,
this.environment.getConversionService()));
}
return createContributors(contributors);
}
|
Create a new {@link ConfigDataEnvironment} instance.
@param logFactory the deferred log factory
@param bootstrapContext the bootstrap context
@param environment the Spring {@link Environment}.
@param resourceLoader {@link ResourceLoader} to load resource locations
@param additionalProfiles any additional profiles to activate
@param environmentUpdateListener optional
{@link ConfigDataEnvironmentUpdateListener} that can be used to track
{@link Environment} updates.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironment.java
| 165
|
[
"binder"
] |
ConfigDataEnvironmentContributors
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
convertToString
|
public static String convertToString(Object parsedValue, Type type) {
if (parsedValue == null) {
return null;
}
if (type == null) {
return parsedValue.toString();
}
switch (type) {
case BOOLEAN:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case STRING:
case PASSWORD:
return parsedValue.toString();
case LIST:
List<?> valueList = (List<?>) parsedValue;
return valueList.stream().map(Object::toString).collect(Collectors.joining(","));
case CLASS:
Class<?> clazz = (Class<?>) parsedValue;
return clazz.getName();
default:
throw new IllegalStateException("Unknown type.");
}
}
|
Parse a value according to its expected type.
@param name The config name
@param value The config value
@param type The expected type
@return The parsed object
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 793
|
[
"parsedValue",
"type"
] |
String
| true
| 3
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
requiresKnownMemberId
|
public static boolean requiresKnownMemberId(int apiVersion) {
return apiVersion >= 4;
}
|
Since JoinGroupRequest version 4, a client that sends a join group request with
{@link #UNKNOWN_MEMBER_ID} needs to rejoin with a new member id generated
by the server. Once the second join group request is complete, the client is
added as a new member of the group.
Prior to version 4, a client is immediately added as a new member if it sends a
join group request with UNKNOWN_MEMBER_ID.
@param apiVersion The JoinGroupRequest api version.
@return whether a known member id is required or not.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java
| 99
|
[
"apiVersion"
] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
right
|
public static <L, R> Pair<L, R> right(final R right) {
return of(null, right);
}
|
Creates an immutable pair of two objects inferring the generic types.
@param <L> the left element type.
@param <R> the right element type.
@param right the right element, may be null.
@return an immutable formed from the two parameters, not null.
@since 3.11
|
java
|
src/main/java/org/apache/commons/lang3/tuple/ImmutablePair.java
| 146
|
[
"right"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
lint_config
|
def lint_config(args) -> None:
"""
Lint the airflow.cfg file for removed, or renamed configurations.
This function scans the Airflow configuration file for parameters that are removed or renamed in
Airflow 3.0. It provides suggestions for alternative parameters or settings where applicable.
CLI Arguments:
--section: str (optional)
The specific section of the configuration to lint.
Example: --section core
--option: str (optional)
The specific option within a section to lint.
Example: --option check_slas
--ignore-section: str (optional)
A section to ignore during linting.
Example: --ignore-section webserver
--ignore-option: str (optional)
An option to ignore during linting.
Example: --ignore-option smtp_user
--verbose: flag (optional)
Enables detailed output, including the list of ignored sections and options.
Example: --verbose
Examples:
1. Lint all sections and options:
airflow config lint
2. Lint a specific section:
airflow config lint --section core,webserver
3. Lint specific sections and options:
airflow config lint --section smtp --option smtp_user
4. Ignore a section:
airflow config lint --ignore-section webserver,api
5. Ignore an options:
airflow config lint --ignore-option smtp_user,session_lifetime_days
6. Enable verbose output:
airflow config lint --verbose
:param args: The CLI arguments for linting configurations.
"""
console = AirflowConsole()
lint_issues = []
section_to_check_if_provided = args.section or []
option_to_check_if_provided = args.option or []
ignore_sections = args.ignore_section or []
ignore_options = args.ignore_option or []
for configuration in CONFIGS_CHANGES:
if section_to_check_if_provided and configuration.config.section not in section_to_check_if_provided:
continue
if option_to_check_if_provided and configuration.config.option not in option_to_check_if_provided:
continue
if configuration.config.section in ignore_sections or configuration.config.option in ignore_options:
continue
if conf.has_option(
configuration.config.section, configuration.config.option, lookup_from_deprecated=False
):
if configuration.message is not None:
lint_issues.append(configuration.message)
if lint_issues:
console.print("[red]Found issues in your airflow.cfg:[/red]")
for issue in lint_issues:
console.print(f" - [yellow]{issue}[/yellow]")
if args.verbose:
console.print("\n[blue]Detailed Information:[/blue]")
console.print(f"Ignored sections: [green]{', '.join(ignore_sections)}[/green]")
console.print(f"Ignored options: [green]{', '.join(ignore_options)}[/green]")
console.print("\n[red]Please update your configuration file accordingly.[/red]")
else:
console.print("[green]No issues found in your airflow.cfg. It is ready for Airflow 3![/green]")
|
Lint the airflow.cfg file for removed, or renamed configurations.
This function scans the Airflow configuration file for parameters that are removed or renamed in
Airflow 3.0. It provides suggestions for alternative parameters or settings where applicable.
CLI Arguments:
--section: str (optional)
The specific section of the configuration to lint.
Example: --section core
--option: str (optional)
The specific option within a section to lint.
Example: --option check_slas
--ignore-section: str (optional)
A section to ignore during linting.
Example: --ignore-section webserver
--ignore-option: str (optional)
An option to ignore during linting.
Example: --ignore-option smtp_user
--verbose: flag (optional)
Enables detailed output, including the list of ignored sections and options.
Example: --verbose
Examples:
1. Lint all sections and options:
airflow config lint
2. Lint a specific section:
airflow config lint --section core,webserver
3. Lint specific sections and options:
airflow config lint --section smtp --option smtp_user
4. Ignore a section:
airflow config lint --ignore-section webserver,api
5. Ignore an options:
airflow config lint --ignore-option smtp_user,session_lifetime_days
6. Enable verbose output:
airflow config lint --verbose
:param args: The CLI arguments for linting configurations.
|
python
|
airflow-core/src/airflow/cli/commands/config_command.py
| 825
|
[
"args"
] |
None
| true
| 18
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
generate_run_id
|
def generate_run_id(
*, run_type: DagRunType, logical_date: datetime | None = None, run_after: datetime
) -> str:
"""
Generate Run ID based on Run Type, run_after and logical Date.
:param run_type: type of DagRun
:param logical_date: the logical date
:param run_after: the date before which dag run won't start.
"""
# _Ensure_ run_type is a DagRunType, not just a string from user code
if logical_date:
return DagRunType(run_type).generate_run_id(suffix=run_after.isoformat())
return DagRunType(run_type).generate_run_id(suffix=f"{run_after.isoformat()}_{get_random_string()}")
|
Generate Run ID based on Run Type, run_after and logical Date.
:param run_type: type of DagRun
:param logical_date: the logical date
:param run_after: the date before which dag run won't start.
|
python
|
airflow-core/src/airflow/models/dagrun.py
| 774
|
[
"run_type",
"logical_date",
"run_after"
] |
str
| true
| 2
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
assertContainsAlias
|
default void assertContainsAlias(@Nullable KeyStore keyStore) {
String alias = getAlias();
if (StringUtils.hasLength(alias) && keyStore != null) {
try {
Assert.state(keyStore.containsAlias(alias),
() -> String.format("Keystore does not contain alias '%s'", alias));
}
catch (KeyStoreException ex) {
throw new IllegalStateException(
String.format("Could not determine if keystore contains alias '%s'", alias), ex);
}
}
}
|
Assert that the alias is contained in the given keystore.
@param keyStore the keystore to check
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslBundleKey.java
| 58
|
[
"keyStore"
] |
void
| true
| 4
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
rawValuesAggregates
|
private Aggregates rawValuesAggregates() {
if (valueCount == 0) {
return new Aggregates(0, Double.NaN, Double.NaN);
}
double sum = 0;
double min = Double.MAX_VALUE;
double max = -Double.MAX_VALUE;
for (int i = 0; i < valueCount; i++) {
sum += rawValueBuffer[i];
min = Math.min(min, rawValueBuffer[i]);
max = Math.max(max, rawValueBuffer[i]);
}
return new Aggregates(sum, min, max);
}
|
Returns the histogram representing the distribution of all accumulated values.
@return the histogram representing the distribution of all accumulated values
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramGenerator.java
| 168
|
[] |
Aggregates
| true
| 3
| 7.44
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_excel2num
|
def _excel2num(x: str) -> int:
"""
Convert Excel column name like 'AB' to 0-based column index.
Parameters
----------
x : str
The Excel column name to convert to a 0-based column index.
Returns
-------
num : int
The column index corresponding to the name.
Raises
------
ValueError
Part of the Excel column name was invalid.
"""
index = 0
for c in x.upper().strip():
cp = ord(c)
if cp < ord("A") or cp > ord("Z"):
raise ValueError(f"Invalid column name: {x}")
index = index * 26 + cp - ord("A") + 1
return index - 1
|
Convert Excel column name like 'AB' to 0-based column index.
Parameters
----------
x : str
The Excel column name to convert to a 0-based column index.
Returns
-------
num : int
The column index corresponding to the name.
Raises
------
ValueError
Part of the Excel column name was invalid.
|
python
|
pandas/io/excel/_util.py
| 98
|
[
"x"
] |
int
| true
| 4
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
mean
|
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average : Weighted average.
Examples
--------
>>> import numpy as np
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data=[1, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if self._mask is nomask:
result = super().mean(axis=axis, dtype=dtype, **kwargs)[()]
else:
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)):
dtype = mu.dtype('f8')
elif issubclass(self.dtype.type, ntypes.float16):
dtype = mu.dtype('f4')
is_float16_result = True
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
elif is_float16_result:
result = self.dtype.type(dsum * 1. / cnt)
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
return result
|
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average : Weighted average.
Examples
--------
>>> import numpy as np
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data=[1, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.mean()
1.5
|
python
|
numpy/ma/core.py
| 5,374
|
[
"self",
"axis",
"dtype",
"out",
"keepdims"
] | false
| 14
| 6
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
beanOfTypeIncludingAncestors
|
public static <T> T beanOfTypeIncludingAncestors(
ListableBeanFactory lbf, Class<T> type, boolean includeNonSingletons, boolean allowEagerInit)
throws BeansException {
Map<String, T> beansOfType = beansOfTypeIncludingAncestors(lbf, type, includeNonSingletons, allowEagerInit);
return uniqueBean(type, beansOfType);
}
|
Return a single bean of the given type or subtypes, also picking up beans
defined in ancestor bean factories if the current bean factory is a
HierarchicalBeanFactory. Useful convenience method when we expect a
single bean and don't care about the bean name.
<p>Does consider objects created by FactoryBeans if the "allowEagerInit" flag is set,
which means that FactoryBeans will get initialized. If the object created by the
FactoryBean doesn't match, the raw FactoryBean itself will be matched against the
type. If "allowEagerInit" is not set, only raw FactoryBeans will be checked
(which doesn't require initialization of each FactoryBean).
<p><b>Note: Beans of the same name will take precedence at the 'lowest' factory level,
i.e. such beans will be returned from the lowest factory that they are being found in,
hiding corresponding beans in ancestor factories.</b> This feature allows for
'replacing' beans by explicitly choosing the same bean name in a child factory;
the bean in the ancestor factory won't be visible then, not even for by-type lookups.
@param lbf the bean factory
@param type the type of bean to match
@param includeNonSingletons whether to include prototype or scoped beans too
or just singletons (also applies to FactoryBeans)
@param allowEagerInit whether to initialize <i>lazy-init singletons</i> and
<i>objects created by FactoryBeans</i> (or by factory methods with a
"factory-bean" reference) for the type check. Note that FactoryBeans need to be
eagerly initialized to determine their type: So be aware that passing in "true"
for this flag will initialize FactoryBeans and "factory-bean" references.
@return the matching bean instance
@throws NoSuchBeanDefinitionException if no bean of the given type was found
@throws NoUniqueBeanDefinitionException if more than one bean of the given type was found
@throws BeansException if the bean could not be created
@see #beansOfTypeIncludingAncestors(ListableBeanFactory, Class, boolean, boolean)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryUtils.java
| 447
|
[
"lbf",
"type",
"includeNonSingletons",
"allowEagerInit"
] |
T
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
deleteAll
|
public StrBuilder deleteAll(final String str) {
final int len = StringUtils.length(str);
if (len > 0) {
int index = indexOf(str, 0);
while (index >= 0) {
deleteImpl(index, index + len, len);
index = indexOf(str, index);
}
}
return this;
}
|
Deletes the string wherever it occurs in the builder.
@param str the string to delete, null causes no action
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,701
|
[
"str"
] |
StrBuilder
| true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_maybe_mask_results
|
def _maybe_mask_results(
self, result: np.ndarray, fill_value=iNaT, convert=None
) -> np.ndarray:
"""
Parameters
----------
result : np.ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasna:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
np.putmask(result, self._isnan, fill_value)
return result
|
Parameters
----------
result : np.ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
|
python
|
pandas/core/arrays/datetimelike.py
| 839
|
[
"self",
"result",
"fill_value",
"convert"
] |
np.ndarray
| true
| 4
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
visitSourceFile
|
function visitSourceFile(node: SourceFile): SourceFile {
const ancestorFacts = enterSubtree(HierarchyFacts.SourceFileExcludes, HierarchyFacts.SourceFileIncludes);
const prologue: Statement[] = [];
const statements: Statement[] = [];
startLexicalEnvironment();
const statementOffset = factory.copyPrologue(node.statements, prologue, /*ensureUseStrict*/ false, visitor);
addRange(statements, visitNodes(node.statements, visitor, isStatement, statementOffset));
if (taggedTemplateStringDeclarations) {
statements.push(
factory.createVariableStatement(/*modifiers*/ undefined, factory.createVariableDeclarationList(taggedTemplateStringDeclarations)),
);
}
factory.mergeLexicalEnvironment(prologue, endLexicalEnvironment());
insertCaptureThisForNodeIfNeeded(prologue, node);
exitSubtree(ancestorFacts, HierarchyFacts.None, HierarchyFacts.None);
return factory.updateSourceFile(
node,
setTextRange(factory.createNodeArray(concatenate(prologue, statements)), node.statements),
);
}
|
Restores the `HierarchyFacts` for this node's ancestor after visiting this node's
subtree, propagating specific facts from the subtree.
@param ancestorFacts The `HierarchyFacts` of the ancestor to restore after visiting the subtree.
@param excludeFacts The existing `HierarchyFacts` of the subtree that should not be propagated.
@param includeFacts The new `HierarchyFacts` of the subtree that should be propagated.
|
typescript
|
src/compiler/transformers/es2015.ts
| 781
|
[
"node"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get
|
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Object to retrieve from file. Raises KeyError if not found.
Returns
-------
object
Same type as object stored in file.
See Also
--------
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
|
Retrieve pandas object stored in file.
Parameters
----------
key : str
Object to retrieve from file. Raises KeyError if not found.
Returns
-------
object
Same type as object stored in file.
See Also
--------
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> store.close() # doctest: +SKIP
|
python
|
pandas/io/pytables.py
| 804
|
[
"self",
"key"
] | true
| 2
| 8.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
_compute_gram
|
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
xp, _ = get_namespace(X)
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = xp.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (
safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T,
X_mean,
)
|
Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
|
python
|
sklearn/linear_model/_ridge.py
| 1,831
|
[
"self",
"X",
"sqrt_sw"
] | false
| 3
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
communityId
|
public static String communityId(
String sourceIpAddrString,
String destIpAddrString,
Object ianaNumber,
Object transport,
Object sourcePort,
Object destinationPort,
Object icmpType,
Object icmpCode,
int seed
) {
return CommunityIdProcessor.apply(
sourceIpAddrString,
destIpAddrString,
ianaNumber,
transport,
sourcePort,
destinationPort,
icmpType,
icmpCode,
seed
);
}
|
Uses {@link CommunityIdProcessor} to compute community ID for network flow data.
@param sourceIpAddrString source IP address
@param destIpAddrString destination IP address
@param ianaNumber IANA number
@param transport transport protocol
@param sourcePort source port
@param destinationPort destination port
@param icmpType ICMP type
@param icmpCode ICMP code
@param seed hash seed (must be between 0 and 65535)
@return Community ID
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java
| 130
|
[
"sourceIpAddrString",
"destIpAddrString",
"ianaNumber",
"transport",
"sourcePort",
"destinationPort",
"icmpType",
"icmpCode",
"seed"
] |
String
| true
| 1
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getLogLevelConfigurer
|
private BiConsumer<String, @Nullable LogLevel> getLogLevelConfigurer(LoggingSystem system) {
return (name, level) -> {
try {
name = name.equalsIgnoreCase(LoggingSystem.ROOT_LOGGER_NAME) ? null : name;
system.setLogLevel(name, level);
}
catch (RuntimeException ex) {
this.logger.error(LogMessage.format("Cannot set level '%s' for '%s'", level, name));
}
};
}
|
Set logging levels based on relevant {@link Environment} properties.
@param system the logging system
@param environment the environment
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/logging/LoggingApplicationListener.java
| 413
|
[
"system"
] | true
| 3
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
chebder
|
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([14., 12., 24.])
>>> C.chebder(c,3)
array([96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([12., 96.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._as_int(m, "the order of derivation")
iaxis = pu._as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1] * 0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2 * j) * c[j]
c[j - 2] += (j * c[j]) / (j - 2)
if n > 1:
der[1] = 4 * c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
|
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([14., 12., 24.])
>>> C.chebder(c,3)
array([96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([12., 96.])
|
python
|
numpy/polynomial/chebyshev.py
| 872
|
[
"c",
"m",
"scl",
"axis"
] | false
| 9
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
postProcessReconnectBackoffConfigs
|
public static Map<String, Object> postProcessReconnectBackoffConfigs(AbstractConfig config,
Map<String, Object> parsedValues) {
HashMap<String, Object> rval = new HashMap<>();
Map<String, Object> originalConfig = config.originals();
if ((!originalConfig.containsKey(RECONNECT_BACKOFF_MAX_MS_CONFIG)) &&
originalConfig.containsKey(RECONNECT_BACKOFF_MS_CONFIG)) {
log.warn("Disabling exponential reconnect backoff because {} is set, but {} is not.",
RECONNECT_BACKOFF_MS_CONFIG, RECONNECT_BACKOFF_MAX_MS_CONFIG);
rval.put(RECONNECT_BACKOFF_MAX_MS_CONFIG, parsedValues.get(RECONNECT_BACKOFF_MS_CONFIG));
}
return rval;
}
|
Postprocess the configuration so that exponential backoff is disabled when reconnect backoff
is explicitly configured but the maximum reconnect backoff is not explicitly configured.
@param config The config object.
@param parsedValues The parsedValues as provided to postProcessParsedConfig.
@return The new values which have been set as described in postProcessParsedConfig.
|
java
|
clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
| 259
|
[
"config",
"parsedValues"
] | true
| 3
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
flag
|
static Option flag(String name, String description) {
return new Option(name, null, description, false);
}
|
Factory method to create a flag/switch option.
@param name the name of the option
@param description a description of the option
@return a new {@link Option} instance
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 358
|
[
"name",
"description"
] |
Option
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
stringToArray
|
function stringToArray(string) {
return hasUnicode(string)
? unicodeToArray(string)
: asciiToArray(string);
}
|
Converts `string` to an array.
@private
@param {string} string The string to convert.
@returns {Array} Returns the converted array.
|
javascript
|
lodash.js
| 1,350
|
[
"string"
] | false
| 2
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getLast
|
@ParametricNullness
public static <T extends @Nullable Object> T getLast(Iterator<T> iterator) {
while (true) {
T current = iterator.next();
if (!iterator.hasNext()) {
return current;
}
}
}
|
Advances {@code iterator} to the end, returning the last element.
@return the last element of {@code iterator}
@throws NoSuchElementException if the iterator is empty
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 903
|
[
"iterator"
] |
T
| true
| 3
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
truePredicate
|
@SuppressWarnings("unchecked")
// method name cannot be "true".
public static <T> Predicate<T> truePredicate() {
return (Predicate<T>) TRUE;
}
|
Gets the Predicate singleton that always returns true.
@param <T> the type of the input to the predicate.
@return the Predicate singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/Predicates.java
| 50
|
[] | true
| 1
| 7.2
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getPropertyResolver
|
private PropertyResolver getPropertyResolver() {
if (this.environment instanceof ConfigurableEnvironment configurableEnvironment) {
PropertySourcesPropertyResolver resolver = new PropertySourcesPropertyResolver(
configurableEnvironment.getPropertySources());
resolver.setConversionService(configurableEnvironment.getConversionService());
resolver.setIgnoreUnresolvableNestedPlaceholders(true);
return resolver;
}
return this.environment;
}
|
Returns the {@link Console} to use.
@return the {@link Console} to use
@since 3.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/LoggingSystemProperties.java
| 116
|
[] |
PropertyResolver
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
generateValueCode
|
private CodeBlock generateValueCode() {
if (this.warnings.size() == 1) {
return CodeBlock.of("$S", this.warnings.iterator().next());
}
CodeBlock values = CodeBlock.join(this.warnings.stream()
.map(warning -> CodeBlock.of("$S", warning)).toList(), ", ");
return CodeBlock.of("{ $L }", values);
}
|
Return the currently registered warnings.
@return the warnings
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/CodeWarnings.java
| 163
|
[] |
CodeBlock
| true
| 2
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
registrySuffix
|
public @Nullable InternetDomainName registrySuffix() {
return hasRegistrySuffix() ? ancestor(registrySuffixIndex()) : null;
}
|
Returns the {@linkplain #isRegistrySuffix() registry suffix} portion of the domain name, or
{@code null} if no registry suffix is present.
@since 23.3
|
java
|
android/guava/src/com/google/common/net/InternetDomainName.java
| 516
|
[] |
InternetDomainName
| true
| 2
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
_get_intervals
|
def _get_intervals(
self, event: PGEvent
) -> tuple[Optional[tuple[int, int]], list[tuple[int, int]]]:
"""Get (execution_interval, hiding_intervals) for a collective event.
Returns:
(execution_interval, hiding_intervals) where:
- execution_interval is (start_pos, wait_pos) or None
- hiding_intervals is a list of (start_pos, compute_pos) tuples, one for each hiding node
Works for both start and wait events by looking up the collective info.
"""
# For start events, directly use the node
if event.is_start:
coll = event.node
# For wait events, look up the start node from the event's args
elif event.is_wait:
wait_input = event.node.args[0]
if not isinstance(wait_input, fx.Node):
return None, []
coll = wait_input
else:
return None, []
if coll not in self.collective_info:
return None, []
info = self.collective_info[coll]
start_event = self.node_to_event[coll]
wait_event = self.node_to_event[info.wait_node]
execution_interval = (start_event.position, wait_event.position)
hiding_intervals = []
if info.hiding_nodes:
for hiding_node in info.hiding_nodes:
hiding_intervals.append(
(
start_event.position,
self.node_to_event[hiding_node].position,
)
)
return execution_interval, hiding_intervals
|
Get (execution_interval, hiding_intervals) for a collective event.
Returns:
(execution_interval, hiding_intervals) where:
- execution_interval is (start_pos, wait_pos) or None
- hiding_intervals is a list of (start_pos, compute_pos) tuples, one for each hiding node
Works for both start and wait events by looking up the collective info.
|
python
|
torch/_inductor/fx_passes/overlap_preserving_bucketer.py
| 475
|
[
"self",
"event"
] |
tuple[Optional[tuple[int, int]], list[tuple[int, int]]]
| true
| 8
| 6.24
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
pollOnClose
|
@Override
public PollResult pollOnClose(long currentTimeMs) {
if (membershipManager().isLeavingGroup()) {
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, true);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(request));
}
return EMPTY;
}
|
Generate a heartbeat request to leave the group if the state is still LEAVING when this is
called to close the consumer.
<p/>
Note that when closing the consumer, even though an event to Unsubscribe is generated
(triggers callbacks and sends leave group), it could be the case that the Unsubscribe event
processing does not complete in time and moves on to close the managers (ex. calls to
close with zero timeout). So we could end up on this pollOnClose with the member in
{@link MemberState#PREPARE_LEAVING} (ex. app thread did not have the time to process the
event to execute callbacks), or {@link MemberState#LEAVING} (ex. the leave request could
not be sent due to coordinator not available at that time). In all cases, the pollOnClose
will be triggered right before sending the final requests, so we ensure that we generate
the request to leave if needed.
@param currentTimeMs The current system time in milliseconds at which the method was called
@return PollResult containing the request to send
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java
| 230
|
[
"currentTimeMs"
] |
PollResult
| true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
findAutowiringMetadata
|
private InjectionMetadata findAutowiringMetadata(String beanName, Class<?> clazz, @Nullable PropertyValues pvs) {
// Fall back to class name as cache key, for backwards compatibility with custom callers.
String cacheKey = (StringUtils.hasLength(beanName) ? beanName : clazz.getName());
// Quick check on the concurrent map first, with minimal locking.
InjectionMetadata metadata = this.injectionMetadataCache.get(cacheKey);
if (InjectionMetadata.needsRefresh(metadata, clazz)) {
synchronized (this.injectionMetadataCache) {
metadata = this.injectionMetadataCache.get(cacheKey);
if (InjectionMetadata.needsRefresh(metadata, clazz)) {
if (metadata != null) {
metadata.clear(pvs);
}
metadata = buildAutowiringMetadata(clazz);
this.injectionMetadataCache.put(cacheKey, metadata);
}
}
}
return metadata;
}
|
<em>Native</em> processing method for direct calls with an arbitrary target
instance, resolving all of its fields and methods which are annotated with
one of the configured 'autowired' annotation types.
@param bean the target instance to process
@throws BeanCreationException if autowiring failed
@see #setAutowiredAnnotationTypes(Set)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessor.java
| 526
|
[
"beanName",
"clazz",
"pvs"
] |
InjectionMetadata
| true
| 5
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
transform
|
def transform(self, X, copy=None):
"""Binarize each element of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
# TODO: This should be refactored because binarize also calls
# check_array
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
force_writeable=True,
copy=copy,
reset=False,
)
return binarize(X, threshold=self.threshold, copy=False)
|
Binarize each element of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
|
python
|
sklearn/preprocessing/_data.py
| 2,381
|
[
"self",
"X",
"copy"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
decodeMsDosFormatDateTime
|
private long decodeMsDosFormatDateTime(short date, short time) {
int year = getChronoValue(((date >> 9) & 0x7f) + 1980, ChronoField.YEAR);
int month = getChronoValue((date >> 5) & 0x0f, ChronoField.MONTH_OF_YEAR);
int day = getChronoValue(date & 0x1f, ChronoField.DAY_OF_MONTH);
int hour = getChronoValue((time >> 11) & 0x1f, ChronoField.HOUR_OF_DAY);
int minute = getChronoValue((time >> 5) & 0x3f, ChronoField.MINUTE_OF_HOUR);
int second = getChronoValue((time << 1) & 0x3e, ChronoField.SECOND_OF_MINUTE);
return ZonedDateTime.of(year, month, day, hour, minute, second, 0, ZoneId.systemDefault())
.toInstant()
.truncatedTo(ChronoUnit.SECONDS)
.toEpochMilli();
}
|
Decode MS-DOS Date Time details. See <a href=
"https://docs.microsoft.com/en-gb/windows/desktop/api/winbase/nf-winbase-dosdatetimetofiletime">
Microsoft's documentation</a> for more details of the format.
@param date the date
@param time the time
@return the date and time as milliseconds since the epoch
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipCentralDirectoryFileHeaderRecord.java
| 114
|
[
"date",
"time"
] | true
| 1
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_block_check_depths_match
|
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if isinstance(arrays, tuple):
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
f'{_block_format_index(parent_index)} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'
)
elif isinstance(arrays, list) and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at "
f"depth {len(first_index)}, but there is an element at "
f"depth {len(index)} ({_block_format_index(index)})"
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif isinstance(arrays, list) and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
|
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
|
python
|
numpy/_core/shape_base.py
| 557
|
[
"arrays",
"parent_index"
] | false
| 11
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
appendUncheckedWithOffset
|
public void appendUncheckedWithOffset(long offset, LegacyRecord record) {
ensureOpenForRecordAppend();
try {
int size = record.sizeInBytes();
AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size);
ByteBuffer buffer = record.buffer().duplicate();
appendStream.write(buffer.array(), buffer.arrayOffset(), buffer.limit());
recordWritten(offset, record.timestamp(), size + Records.LOG_OVERHEAD);
} catch (IOException e) {
throw new KafkaException("I/O exception when writing to the append stream, closing", e);
}
}
|
Add a legacy record without doing offset/magic validation (this should only be used in testing).
@param offset The offset of the record
@param record The record to add
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 678
|
[
"offset",
"record"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
bindJSDocImportTag
|
function bindJSDocImportTag(node: JSDocImportTag) {
// don't bind the importClause yet; that's delayed until bindJSDocImports
bind(node.tagName);
bind(node.moduleSpecifier);
bind(node.attributes);
if (typeof node.comment !== "string") {
bindEach(node.comment);
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,133
|
[
"node"
] | false
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
masked_greater
|
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data=[0, 1, 2, --],
mask=[False, False, False, True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
|
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data=[0, 1, 2, --],
mask=[False, False, False, True],
fill_value=999999)
|
python
|
numpy/ma/core.py
| 1,997
|
[
"x",
"value",
"copy"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
isReusableClassMember
|
function isReusableClassMember(node: Node) {
if (node) {
switch (node.kind) {
case SyntaxKind.Constructor:
case SyntaxKind.IndexSignature:
case SyntaxKind.GetAccessor:
case SyntaxKind.SetAccessor:
case SyntaxKind.PropertyDeclaration:
case SyntaxKind.SemicolonClassElement:
return true;
case SyntaxKind.MethodDeclaration:
// Method declarations are not necessarily reusable. An object-literal
// may have a method calls "constructor(...)" and we must reparse that
// into an actual .ConstructorDeclaration.
const methodDeclaration = node as MethodDeclaration;
const nameIsConstructor = methodDeclaration.name.kind === SyntaxKind.Identifier &&
methodDeclaration.name.escapedText === "constructor";
return !nameIsConstructor;
}
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,281
|
[
"node"
] | false
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
print
|
@Override
public String print(Duration object, Locale locale) {
if (this.defaultUnit == null) {
//delegate the ultimate of the default unit to the style
return DurationFormatterUtils.print(object, this.style);
}
return DurationFormatterUtils.print(object, this.style, this.defaultUnit);
}
|
Create a {@code DurationFormatter} in a specific {@link DurationFormat.Style} with an
optional {@code DurationFormat.Unit}.
<p>If a {@code defaultUnit} is specified, it may be used in parsing cases when no
unit is present in the string (provided the style allows for such a case). It will
also be used as the representation's resolution when printing in the
{@link DurationFormat.Style#SIMPLE} style. Otherwise, the style defines its default
unit.
@param style the {@code DurationStyle} to use
@param defaultUnit the {@code DurationFormat.Unit} to fall back to when parsing and printing
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatter.java
| 87
|
[
"object",
"locale"
] |
String
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
is_float_dtype
|
def is_float_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a float dtype.
The function checks for floating-point data types, which represent real numbers
that may have fractional components.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a float dtype.
See Also
--------
api.types.is_numeric_dtype : Check whether the provided array or dtype is of
a numeric dtype.
api.types.is_integer_dtype : Check whether the provided array or dtype is of
an integer dtype.
api.types.is_object_dtype : Check whether an array-like or dtype is of the
object dtype.
Examples
--------
>>> from pandas.api.types import is_float_dtype
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(["a", "b"]))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.0]))
True
"""
return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"
)
|
Check whether the provided array or dtype is of a float dtype.
The function checks for floating-point data types, which represent real numbers
that may have fractional components.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a float dtype.
See Also
--------
api.types.is_numeric_dtype : Check whether the provided array or dtype is of
a numeric dtype.
api.types.is_integer_dtype : Check whether the provided array or dtype is of
an integer dtype.
api.types.is_object_dtype : Check whether an array-like or dtype is of the
object dtype.
Examples
--------
>>> from pandas.api.types import is_float_dtype
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(["a", "b"]))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.0]))
True
|
python
|
pandas/core/dtypes/common.py
| 1,345
|
[
"arr_or_dtype"
] |
bool
| true
| 3
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.