Datasets:
function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
parseTypeParameter
|
function parseTypeParameter(): TypeParameterDeclaration {
const pos = getNodePos();
const modifiers = parseModifiers(/*allowDecorators*/ false, /*permitConstAsModifier*/ true);
const name = parseIdentifier();
let constraint: TypeNode | undefined;
let expression: Expression | undefined;
if (parseOptional(SyntaxKind.ExtendsKeyword)) {
// It's not uncommon for people to write improper constraints to a generic. If the
// user writes a constraint that is an expression and not an actual type, then parse
// it out as an expression (so we can recover well), but report that a type is needed
// instead.
if (isStartOfType() || !isStartOfExpression()) {
constraint = parseType();
}
else {
// It was not a type, and it looked like an expression. Parse out an expression
// here so we recover well. Note: it is important that we call parseUnaryExpression
// and not parseExpression here. If the user has:
//
// <T extends "">
//
// We do *not* want to consume the `>` as we're consuming the expression for "".
expression = parseUnaryExpressionOrHigher();
}
}
const defaultType = parseOptional(SyntaxKind.EqualsToken) ? parseType() : undefined;
const node = factory.createTypeParameterDeclaration(modifiers, name, constraint, defaultType);
node.expression = expression;
return finishNode(node, pos);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,955
|
[] | true
| 6
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
convert
|
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
# Note: in tests we never have timedelta64 or datetime64,
# so the _get_data_and_dtype_name may be unnecessary
converted, dtype_name = _get_data_and_dtype_name(values)
kind = _dtype_to_kind(dtype_name)
else:
converted = values
dtype_name = self.dtype
kind = self.kind
assert isinstance(converted, np.ndarray) # for mypy
# use the meta if needed
meta = self.meta
metadata = self.metadata
ordered = self.ordered
tz = self.tz
assert dtype_name is not None
# convert to the correct dtype
dtype = dtype_name
# reverse converts
if dtype.startswith("datetime64"):
# recreate with tz if indicated
converted = _set_tz(converted, tz, dtype)
elif dtype.startswith("timedelta64"):
if dtype == "timedelta64":
# from before we started storing timedelta64 unit
converted = np.asarray(converted, dtype="m8[ns]")
else:
converted = np.asarray(converted, dtype=dtype)
elif dtype == "date":
try:
converted = np.asarray(
[date.fromordinal(v) for v in converted], dtype=object
)
except ValueError:
converted = np.asarray(
[date.fromtimestamp(v) for v in converted], dtype=object
)
elif meta == "category":
# we have a categorical
categories = metadata
codes = converted.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum()._values
converted = Categorical.from_codes(
codes, categories=categories, ordered=ordered, validate=False
)
else:
try:
converted = converted.astype(dtype, copy=False)
except TypeError:
converted = converted.astype("O", copy=False)
# convert nans / decode
if kind == "string":
converted = _unconvert_string_array(
converted, nan_rep=nan_rep, encoding=encoding, errors=errors
)
return self.values, converted
|
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
|
python
|
pandas/io/pytables.py
| 2,655
|
[
"self",
"values",
"nan_rep",
"encoding",
"errors"
] | true
| 15
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Location other = (Location) obj;
boolean result = true;
result = result && this.line == other.line;
result = result && this.column == other.column;
return result;
}
|
Return the column of the text resource where the property originated.
@return the column number (zero indexed)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/TextResourceOrigin.java
| 166
|
[
"obj"
] | true
| 6
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
parseAddressRange
|
bool parseAddressRange(const char *Str, uint64_t &StartAddress,
uint64_t &EndAddress) {
if (!Str)
return false;
// Parsed string format: <hex1>-<hex2>
StartAddress = hexToLong(Str, '-');
while (*Str && *Str != '-')
++Str;
if (!*Str)
return false;
++Str; // swallow '-'
EndAddress = hexToLong(Str);
return true;
}
|
Get string with address and parse it to hex pair <StartAddress, EndAddress>
|
cpp
|
bolt/runtime/instr.cpp
| 660
|
[] | true
| 5
| 6.4
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
_iter
|
def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):
"""
Generate (name, trans, columns, weight) tuples.
Parameters
----------
fitted : bool
If True, use the fitted transformers (``self.transformers_``) to
iterate through transformers, else use the transformers passed by
the user (``self.transformers``).
column_as_labels : bool
If True, columns are returned as string labels. If False, columns
are returned as they were given by the user. This can only be True
if the ``ColumnTransformer`` is already fitted.
skip_drop : bool
If True, 'drop' transformers are filtered out.
skip_empty_columns : bool
If True, transformers with empty selected columns are filtered out.
Yields
------
A generator of tuples containing:
- name : the name of the transformer
- transformer : the transformer object
- columns : the columns for that transformer
- weight : the weight of the transformer
"""
if fitted:
transformers = self.transformers_
else:
# interleave the validated column specifiers
transformers = [
(name, trans, column)
for (name, trans, _), column in zip(self.transformers, self._columns)
]
# add transformer tuple for remainder
if self._remainder[2]:
transformers = chain(transformers, [self._remainder])
get_weight = (self.transformer_weights or {}).get
for name, trans, columns in transformers:
if skip_drop and trans == "drop":
continue
if skip_empty_columns and _is_empty_column_selection(columns):
continue
if column_as_labels:
# Convert all columns to using their string labels
columns_is_scalar = np.isscalar(columns)
indices = self._transformer_to_input_indices[name]
columns = self.feature_names_in_[indices]
if columns_is_scalar:
# selection is done with one dimension
columns = columns[0]
yield (name, trans, columns, get_weight(name))
|
Generate (name, trans, columns, weight) tuples.
Parameters
----------
fitted : bool
If True, use the fitted transformers (``self.transformers_``) to
iterate through transformers, else use the transformers passed by
the user (``self.transformers``).
column_as_labels : bool
If True, columns are returned as string labels. If False, columns
are returned as they were given by the user. This can only be True
if the ``ColumnTransformer`` is already fitted.
skip_drop : bool
If True, 'drop' transformers are filtered out.
skip_empty_columns : bool
If True, transformers with empty selected columns are filtered out.
Yields
------
A generator of tuples containing:
- name : the name of the transformer
- transformer : the transformer object
- columns : the columns for that transformer
- weight : the weight of the transformer
|
python
|
sklearn/compose/_column_transformer.py
| 437
|
[
"self",
"fitted",
"column_as_labels",
"skip_drop",
"skip_empty_columns"
] | false
| 12
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
containsLocalBean
|
boolean containsLocalBean(String name);
|
Return whether the local bean factory contains a bean of the given name,
ignoring beans defined in ancestor contexts.
<p>This is an alternative to {@code containsBean}, ignoring a bean
of the given name from an ancestor bean factory.
@param name the name of the bean to query
@return whether a bean with the given name is defined in the local factory
@see BeanFactory#containsBean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/HierarchicalBeanFactory.java
| 50
|
[
"name"
] | true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
advance
|
@Override
public void advance() {
boolean hasNextA = itA.hasNext();
boolean hasNextB = itB.hasNext();
endReached = hasNextA == false && hasNextB == false;
if (endReached) {
return;
}
long idxA = 0;
long idxB = 0;
if (hasNextA) {
idxA = itA.peekIndex();
}
if (hasNextB) {
idxB = itB.peekIndex();
}
boolean advanceA = hasNextA && (hasNextB == false || idxA <= idxB);
boolean advanceB = hasNextB && (hasNextA == false || idxB <= idxA);
long countA = 0;
long countB = 0;
if (advanceA) {
currentIndex = idxA;
countA = itA.peekCount();
itA.advance();
}
if (advanceB) {
currentIndex = idxB;
countB = itB.peekCount();
itB.advance();
}
currentCount = countMergeOperator.applyAsLong(countA, countB);
}
|
Creates a new merging iterator, using the provided operator to merge the counts.
Note that the resulting count can be negative if the operator produces negative results.
@param itA the first iterator to merge
@param itB the second iterator to merge
@param countMergeOperator the operator to use to merge counts of buckets with the same index
@param targetScale the histogram scale to which both iterators should be aligned
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/MergingBucketIterator.java
| 68
|
[] |
void
| true
| 11
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_check_for_default_values
|
def _check_for_default_values(fname, arg_val_dict, compat_args) -> None:
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or (v1 is None and v2 is not None):
match = False
else:
match = v1 == v2
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = arg_val_dict[key] is compat_args[key]
if not match:
raise ValueError(
f"the '{key}' parameter is not supported in "
f"the pandas implementation of {fname}()"
)
|
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
|
python
|
pandas/util/_validators.py
| 51
|
[
"fname",
"arg_val_dict",
"compat_args"
] |
None
| true
| 9
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
elapsedNanos
|
private long elapsedNanos() {
return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos;
}
|
Sets the elapsed time for this stopwatch to zero, and places it in a stopped state.
@return this {@code Stopwatch} instance
|
java
|
android/guava/src/com/google/common/base/Stopwatch.java
| 199
|
[] | true
| 2
| 8
|
google/guava
| 51,352
|
javadoc
| false
|
|
construct_from_string
|
def construct_from_string(cls, string: str) -> SparseDtype:
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
except ValueError as err:
raise TypeError(msg) from err
else:
result = SparseDtype(sub_type)
msg = (
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead."
)
if has_fill_value and str(result) != string:
raise TypeError(msg)
return result
else:
raise TypeError(msg)
|
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
|
python
|
pandas/core/dtypes/dtypes.py
| 1,927
|
[
"cls",
"string"
] |
SparseDtype
| true
| 7
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getOptionsDiagnostics
|
function getOptionsDiagnostics(): SortedReadonlyArray<Diagnostic> {
return sortAndDeduplicateDiagnostics(concatenate(
programDiagnostics.getCombinedDiagnostics(program).getGlobalDiagnostics(),
getOptionsDiagnosticsOfConfigFile(),
));
}
|
@returns The line index marked as preceding the diagnostic, or -1 if none was.
|
typescript
|
src/compiler/program.ts
| 3,262
|
[] | true
| 1
| 7.04
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
addPendingString
|
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
private void addPendingString(StringBuilder builder) {
// Capture current builder length so it can be truncated if this future ends up completing while
// the toString is being calculated
int truncateLength = builder.length();
builder.append("PENDING");
@RetainedLocalRef Object localValue = value();
if (localValue instanceof DelegatingToFuture) {
builder.append(", setFuture=[");
appendUserObject(builder, ((DelegatingToFuture) localValue).future);
builder.append("]");
} else {
String pendingDescription;
try {
pendingDescription = Strings.emptyToNull(pendingToString());
} catch (Throwable e) {
/*
* We want to catch (Exception | StackOverflowError), but we can't under environments where
* StackOverflowError doesn't exist.
*/
rethrowIfErrorOtherThanStackOverflow(e);
// The Throwable is either a RuntimeException, an Error, or sneaky checked exception.
//
// Don't call getMessage or toString() on the exception, in case the exception thrown by the
// subclass is implemented with bugs similar to the subclass.
pendingDescription = "Exception thrown from implementation: " + e.getClass();
}
if (pendingDescription != null) {
builder.append(", info=[").append(pendingDescription).append("]");
}
}
// The future may complete while calculating the toString, so we check once more to see if the
// future is done
if (isDone()) {
// Truncate anything that was appended before realizing this future is done
builder.delete(truncateLength, builder.length());
addDoneString(builder);
}
}
|
Provide a human-readable explanation of why this future has not yet completed.
@return null if an explanation cannot be provided (e.g. because the future is done).
@since 23.0
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
| 893
|
[
"builder"
] |
void
| true
| 5
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
randomLong
|
public long randomLong() {
return randomLong(Long.MAX_VALUE);
}
|
Generates a random long between 0 (inclusive) and Long.MAX_VALUE (exclusive).
@return the random long.
@see #randomLong(long, long)
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 415
|
[] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
computeCacheOperations
|
private @Nullable Collection<CacheOperation> computeCacheOperations(Method method, @Nullable Class<?> targetClass) {
// Don't allow non-public methods, as configured.
if (allowPublicMethodsOnly() && !Modifier.isPublic(method.getModifiers())) {
return null;
}
// Skip setBeanFactory method on BeanFactoryAware.
if (method.getDeclaringClass() == BeanFactoryAware.class) {
return null;
}
// The method may be on an interface, but we need metadata from the target class.
// If the target class is null, the method will be unchanged.
Method specificMethod = AopUtils.getMostSpecificMethod(method, targetClass);
// First try is the method in the target class.
Collection<CacheOperation> opDef = findCacheOperations(specificMethod);
if (opDef != null) {
return opDef;
}
// Second try is the caching operation on the target class.
opDef = findCacheOperations(specificMethod.getDeclaringClass());
if (opDef != null && ClassUtils.isUserLevelMethod(method)) {
return opDef;
}
if (specificMethod != method) {
// Fallback is to look at the original method.
opDef = findCacheOperations(method);
if (opDef != null) {
return opDef;
}
// Last fallback is the class of the original method.
opDef = findCacheOperations(method.getDeclaringClass());
if (opDef != null && ClassUtils.isUserLevelMethod(method)) {
return opDef;
}
}
return null;
}
|
Determine a cache key for the given method and target class.
<p>Must not produce same key for overloaded methods.
Must produce same key for different instances of the same method.
@param method the method (never {@code null})
@param targetClass the target class (may be {@code null})
@return the cache key (never {@code null})
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/AbstractFallbackCacheOperationSource.java
| 135
|
[
"method",
"targetClass"
] | true
| 11
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toTimestamp
|
ZonedDateTime toTimestamp(String value) {
// First, try parsing as milliseconds
if (isDigits(value)) {
try {
long milliseconds = Long.parseLong(value);
return Instant.ofEpochMilli(milliseconds).atZone(timezone);
} catch (NumberFormatException ignored) {
// Not a millisecond timestamp, continue to format parsing
}
}
// Try parsing with different layouts
try {
TemporalAccessor accessor = TIME_FORMAT.parse(value);
// if there is no year nor year-of-era, we fall back to the current one and
// fill the rest of the date up with the parsed date
if (accessor.isSupported(ChronoField.YEAR) == false
&& accessor.isSupported(ChronoField.YEAR_OF_ERA) == false
&& accessor.isSupported(WeekFields.ISO.weekBasedYear()) == false
&& accessor.isSupported(WeekFields.of(Locale.ROOT).weekBasedYear()) == false
&& accessor.isSupported(ChronoField.INSTANT_SECONDS) == false) {
int year = LocalDate.now(ZoneOffset.UTC).getYear();
ZonedDateTime newTime = Instant.EPOCH.atZone(ZoneOffset.UTC).withYear(year);
for (ChronoField field : CHRONO_FIELDS) {
if (accessor.isSupported(field)) {
newTime = newTime.with(field, accessor.get(field));
}
}
accessor = newTime.withZoneSameLocal(timezone);
}
return DateFormatters.from(accessor, Locale.ROOT, timezone).withZoneSameInstant(timezone);
} catch (DateTimeParseException ignored) {
throw new IllegalArgumentException("Value is not a valid timestamp: " + value);
}
}
|
A utility method for determining whether a string contains only digits, possibly with a leading '+' or '-'.
That is, does this string have any hope of being parse-able as a Long?
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CefParser.java
| 523
|
[
"value"
] |
ZonedDateTime
| true
| 10
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
loadKey
|
private String loadKey(StringBuilder buffer, CharacterReader reader) throws IOException {
buffer.setLength(0);
boolean previousWhitespace = false;
while (!reader.isEndOfLine()) {
if (reader.isPropertyDelimiter()) {
reader.read();
return buffer.toString();
}
if (!reader.isWhiteSpace() && previousWhitespace) {
return buffer.toString();
}
previousWhitespace = reader.isWhiteSpace();
buffer.append(reader.getCharacter());
reader.read();
}
return buffer.toString();
}
|
Load {@code .properties} data and return a map of {@code String} ->
{@link OriginTrackedValue}.
@param expandLists if list {@code name[]=a,b,c} shortcuts should be expanded
@return the loaded properties
@throws IOException on read error
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/env/OriginTrackedPropertiesLoader.java
| 132
|
[
"buffer",
"reader"
] |
String
| true
| 5
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
of
|
static EnvironmentPostProcessorsFactory of(@Nullable ClassLoader classLoader, String... classNames) {
return new ReflectionEnvironmentPostProcessorsFactory(classLoader, classNames);
}
|
Return a {@link EnvironmentPostProcessorsFactory} that reflectively creates post
processors from the given class names.
@param classLoader the source class loader
@param classNames the post processor class names
@return an {@link EnvironmentPostProcessorsFactory} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorsFactory.java
| 85
|
[
"classLoader"
] |
EnvironmentPostProcessorsFactory
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
to_pydatetime
|
def to_pydatetime(self) -> Series:
"""
Return the data as a Series of :class:`datetime.datetime` objects.
Timezone information is retained if present.
.. warning::
Python's datetime uses microsecond resolution, which is lower than
pandas (nanosecond). The values are truncated.
Returns
-------
numpy.ndarray
Object dtype array containing native Python datetime objects.
See Also
--------
datetime.datetime : Standard library value for a datetime.
Examples
--------
>>> s = pd.Series(pd.date_range("20180310", periods=2))
>>> s
0 2018-03-10
1 2018-03-11
dtype: datetime64[us]
>>> s.dt.to_pydatetime()
0 2018-03-10 00:00:00
1 2018-03-11 00:00:00
dtype: object
pandas' nanosecond precision is truncated to microseconds.
>>> s = pd.Series(pd.date_range("20180310", periods=2, freq="ns"))
>>> s
0 2018-03-10 00:00:00.000000000
1 2018-03-10 00:00:00.000000001
dtype: datetime64[ns]
>>> s.dt.to_pydatetime()
0 2018-03-10 00:00:00
1 2018-03-10 00:00:00
dtype: object
"""
# GH#20306
from pandas import Series
return Series(self._get_values().to_pydatetime(), dtype=object)
|
Return the data as a Series of :class:`datetime.datetime` objects.
Timezone information is retained if present.
.. warning::
Python's datetime uses microsecond resolution, which is lower than
pandas (nanosecond). The values are truncated.
Returns
-------
numpy.ndarray
Object dtype array containing native Python datetime objects.
See Also
--------
datetime.datetime : Standard library value for a datetime.
Examples
--------
>>> s = pd.Series(pd.date_range("20180310", periods=2))
>>> s
0 2018-03-10
1 2018-03-11
dtype: datetime64[us]
>>> s.dt.to_pydatetime()
0 2018-03-10 00:00:00
1 2018-03-11 00:00:00
dtype: object
pandas' nanosecond precision is truncated to microseconds.
>>> s = pd.Series(pd.date_range("20180310", periods=2, freq="ns"))
>>> s
0 2018-03-10 00:00:00.000000000
1 2018-03-10 00:00:00.000000001
dtype: datetime64[ns]
>>> s.dt.to_pydatetime()
0 2018-03-10 00:00:00
1 2018-03-10 00:00:00
dtype: object
|
python
|
pandas/core/indexes/accessors.py
| 324
|
[
"self"
] |
Series
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
zipObjectDeep
|
function zipObjectDeep(props, values) {
return baseZipObject(props || [], values || [], baseSet);
}
|
This method is like `_.zipObject` except that it supports property paths.
@static
@memberOf _
@since 4.1.0
@category Array
@param {Array} [props=[]] The property identifiers.
@param {Array} [values=[]] The property values.
@returns {Object} Returns the new object.
@example
_.zipObjectDeep(['a.b[0].c', 'a.b[1].d'], [1, 2]);
// => { 'a': { 'b': [{ 'c': 1 }, { 'd': 2 }] } }
|
javascript
|
lodash.js
| 8,777
|
[
"props",
"values"
] | false
| 3
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
is_task_schedulable
|
def is_task_schedulable(task: Operator) -> bool:
"""
Determine if the task should be scheduled instead of being short-circuited to ``success``.
A task requires scheduling if it is not a trivial EmptyOperator, i.e. one of the
following conditions holds:
* it does **not** inherit from ``EmptyOperator``
* it defines an ``on_execute_callback``
* it defines an ``on_success_callback``
* it declares any ``outlets``
* it declares any ``inlets``
If none of these are true, the task is considered empty and is immediately marked
successful without being scheduled.
Note: keeping this check as a separate public method is important so it can also be used
by listeners (when a task is not scheduled, listeners are never called). For example,
the OpenLineage listener checks all tasks at DAG start, and using this method lets
it consistently determine whether the listener will run for each task.
"""
return bool(
not task.inherits_from_empty_operator
or task.has_on_execute_callback
or task.has_on_success_callback
or task.outlets
or task.inlets
)
|
Determine if the task should be scheduled instead of being short-circuited to ``success``.
A task requires scheduling if it is not a trivial EmptyOperator, i.e. one of the
following conditions holds:
* it does **not** inherit from ``EmptyOperator``
* it defines an ``on_execute_callback``
* it defines an ``on_success_callback``
* it declares any ``outlets``
* it declares any ``inlets``
If none of these are true, the task is considered empty and is immediately marked
successful without being scheduled.
Note: keeping this check as a separate public method is important so it can also be used
by listeners (when a task is not scheduled, listeners are never called). For example,
the OpenLineage listener checks all tasks at DAG start, and using this method lets
it consistently determine whether the listener will run for each task.
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 2,153
|
[
"task"
] |
bool
| true
| 5
| 6.56
|
apache/airflow
| 43,597
|
unknown
| false
|
elementCompareTo
|
public int elementCompareTo(final T element) {
// Comparable API says throw NPE on null
Objects.requireNonNull(element, "element");
if (isAfter(element)) {
return -1;
}
if (isBefore(element)) {
return 1;
}
return 0;
}
|
Checks where the specified element occurs relative to this range.
<p>The API is reminiscent of the Comparable interface returning {@code -1} if
the element is before the range, {@code 0} if contained within the range and
{@code 1} if the element is after the range.</p>
@param element the element to check for, not null.
@return -1, 0 or +1 depending on the element's location relative to the range.
@throws NullPointerException if {@code element} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 282
|
[
"element"
] | true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
containsDescendantOf
|
default ConfigurationPropertyState containsDescendantOf(ConfigurationPropertyName name) {
return ConfigurationPropertyState.UNKNOWN;
}
|
Returns if the source contains any descendants of the specified name. May return
{@link ConfigurationPropertyState#PRESENT} or
{@link ConfigurationPropertyState#ABSENT} if an answer can be determined or
{@link ConfigurationPropertyState#UNKNOWN} if it's not possible to determine a
definitive answer.
@param name the name to check
@return if the source contains any descendants
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertySource.java
| 57
|
[
"name"
] |
ConfigurationPropertyState
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
doScan
|
protected Set<BeanDefinitionHolder> doScan(String... basePackages) {
Assert.notEmpty(basePackages, "At least one base package must be specified");
Set<BeanDefinitionHolder> beanDefinitions = new LinkedHashSet<>();
for (String basePackage : basePackages) {
Set<BeanDefinition> candidates = findCandidateComponents(basePackage);
for (BeanDefinition candidate : candidates) {
ScopeMetadata scopeMetadata = this.scopeMetadataResolver.resolveScopeMetadata(candidate);
candidate.setScope(scopeMetadata.getScopeName());
String beanName = this.beanNameGenerator.generateBeanName(candidate, this.registry);
if (candidate instanceof AbstractBeanDefinition abstractBeanDefinition) {
postProcessBeanDefinition(abstractBeanDefinition, beanName);
}
if (candidate instanceof AnnotatedBeanDefinition annotatedBeanDefinition) {
AnnotationConfigUtils.processCommonDefinitionAnnotations(annotatedBeanDefinition);
}
if (checkCandidate(beanName, candidate)) {
BeanDefinitionHolder definitionHolder = new BeanDefinitionHolder(candidate, beanName);
definitionHolder =
AnnotationConfigUtils.applyScopedProxyMode(scopeMetadata, definitionHolder, this.registry);
beanDefinitions.add(definitionHolder);
registerBeanDefinition(definitionHolder, this.registry);
}
}
}
return beanDefinitions;
}
|
Perform a scan within the specified base packages,
returning the registered bean definitions.
<p>This method does <i>not</i> register an annotation config processor
but rather leaves this up to the caller.
@param basePackages the packages to check for annotated classes
@return set of beans registered if any for tooling registration purposes (never {@code null})
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ClassPathBeanDefinitionScanner.java
| 272
|
[] | true
| 4
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
endObject
|
public XContentBuilder endObject() throws IOException {
generator.writeEndObject();
return this;
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 346
|
[] |
XContentBuilder
| true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
refreshIfEmpty
|
void refreshIfEmpty() {
if (ancestor != null) {
ancestor.refreshIfEmpty();
if (ancestor.getDelegate() != ancestorDelegate) {
throw new ConcurrentModificationException();
}
} else if (delegate.isEmpty()) {
Collection<V> newDelegate = map.get(key);
if (newDelegate != null) {
delegate = newDelegate;
}
}
}
|
If the delegate collection is empty, but the multimap has values for the key, replace the
delegate with the new collection for the key.
<p>For a subcollection, refresh its ancestor and validate that the ancestor delegate hasn't
changed.
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 351
|
[] |
void
| true
| 5
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
isParentOf
|
public boolean isParentOf(ConfigurationPropertyName name) {
Assert.notNull(name, "'name' must not be null");
if (getNumberOfElements() != name.getNumberOfElements() - 1) {
return false;
}
return isAncestorOf(name);
}
|
Returns {@code true} if this element is an immediate parent of the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 284
|
[
"name"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
arraycopy
|
public static <T> T arraycopy(final T source, final int sourcePos, final int destPos, final int length, final Supplier<T> allocator) {
return arraycopy(source, sourcePos, allocator.get(), destPos, length);
}
|
A fluent version of {@link System#arraycopy(Object, int, Object, int, int)} that returns the destination array.
@param <T> the type.
@param source the source array.
@param sourcePos starting position in the source array.
@param destPos starting position in the destination data.
@param length the number of array elements to be copied.
@param allocator allocates the array to populate and return.
@return dest
@throws IndexOutOfBoundsException if copying would cause access of data outside array bounds.
@throws ArrayStoreException if an element in the {@code src} array could not be stored into the {@code dest} array because of a type
mismatch.
@throws NullPointerException if either {@code src} or {@code dest} is {@code null}.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,419
|
[
"source",
"sourcePos",
"destPos",
"length",
"allocator"
] |
T
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return "AutoOffsetResetStrategy{" +
"type=" + type +
(duration.map(value -> ", duration=" + value).orElse("")) +
'}';
}
|
Return the timestamp to be used for the ListOffsetsRequest.
@return the timestamp for the OffsetResetStrategy,
if the strategy is EARLIEST or LATEST or duration is provided
else return Optional.empty()
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategy.java
| 150
|
[] |
String
| true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
length
|
public int length() {
return this.values.size();
}
|
Returns the number of values in this array.
@return the length of this array
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 124
|
[] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
count
|
public static int count(final String str, final String... set) {
if (isEmpty(str, set)) {
return 0;
}
final CharSet chars = CharSet.getInstance(set);
int count = 0;
for (final char c : str.toCharArray()) {
if (chars.contains(c)) {
count++;
}
}
return count;
}
|
Takes an argument in set-syntax, see evaluateSet,
and returns the number of characters present in the specified string.
<pre>
CharSetUtils.count(null, *) = 0
CharSetUtils.count("", *) = 0
CharSetUtils.count(*, null) = 0
CharSetUtils.count(*, "") = 0
CharSetUtils.count("hello", "k-p") = 3
CharSetUtils.count("hello", "a-e") = 1
</pre>
@see CharSet#getInstance(String...) for set-syntax.
@param str String to count characters in, may be null
@param set String[] set of characters to count, may be null
@return the character count, zero if null string input
|
java
|
src/main/java/org/apache/commons/lang3/CharSetUtils.java
| 84
|
[
"str"
] | true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
addOrMergeIndexedArgumentValue
|
private void addOrMergeIndexedArgumentValue(Integer key, ValueHolder newValue) {
ValueHolder currentValue = this.indexedArgumentValues.get(key);
if (currentValue != null && newValue.getValue() instanceof Mergeable mergeable) {
if (mergeable.isMergeEnabled()) {
newValue.setValue(mergeable.merge(currentValue.getValue()));
}
}
this.indexedArgumentValues.put(key, newValue);
}
|
Add an argument value for the given index in the constructor argument list,
merging the new value (typically a collection) with the current value
if demanded: see {@link org.springframework.beans.Mergeable}.
@param key the index in the constructor argument list
@param newValue the argument value in the form of a ValueHolder
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/ConstructorArgumentValues.java
| 123
|
[
"key",
"newValue"
] |
void
| true
| 4
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
writeOperation
|
function writeOperation(operationIndex: number): void {
tryEnterLabel(operationIndex);
tryEnterOrLeaveBlock(operationIndex);
// early termination, nothing else to process in this label
if (lastOperationWasAbrupt) {
return;
}
lastOperationWasAbrupt = false;
lastOperationWasCompletion = false;
const opcode = operations![operationIndex];
if (opcode === OpCode.Nop) {
return;
}
else if (opcode === OpCode.Endfinally) {
return writeEndfinally();
}
const args = operationArguments![operationIndex]!;
if (opcode === OpCode.Statement) {
return writeStatement(args[0] as Statement);
}
const location = operationLocations![operationIndex];
switch (opcode) {
case OpCode.Assign:
return writeAssign(args[0] as Expression, args[1] as Expression, location);
case OpCode.Break:
return writeBreak(args[0] as Label, location);
case OpCode.BreakWhenTrue:
return writeBreakWhenTrue(args[0] as Label, args[1] as Expression, location);
case OpCode.BreakWhenFalse:
return writeBreakWhenFalse(args[0] as Label, args[1] as Expression, location);
case OpCode.Yield:
return writeYield(args[0] as Expression, location);
case OpCode.YieldStar:
return writeYieldStar(args[0] as Expression, location);
case OpCode.Return:
return writeReturn(args[0] as Expression, location);
case OpCode.Throw:
return writeThrow(args[0] as Expression, location);
}
}
|
Writes an operation as a statement to the current label's statement list.
@param operation The OpCode of the operation
|
typescript
|
src/compiler/transformers/generators.ts
| 3,027
|
[
"operationIndex"
] | true
| 6
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
describe_replications
|
def describe_replications(self, filters: list[dict[str, Any]] | None = None, **kwargs) -> list[dict]:
"""
Return list of serverless replications.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replications`
:param filters: List of filter objects
:return: List of replications
"""
filters = filters if filters is not None else []
try:
resp = self.conn.describe_replications(Filters=filters, **kwargs)
return resp.get("Replications", [])
except Exception as ex:
self.log.error("Error while describing replications: %s", str(ex))
raise ex
|
Return list of serverless replications.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replications`
:param filters: List of filter objects
:return: List of replications
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/dms.py
| 300
|
[
"self",
"filters"
] |
list[dict]
| true
| 2
| 7.28
|
apache/airflow
| 43,597
|
sphinx
| false
|
listShareGroupOffsets
|
ListShareGroupOffsetsResult listShareGroupOffsets(Map<String, ListShareGroupOffsetsSpec> groupSpecs, ListShareGroupOffsetsOptions options);
|
List the share group offsets available in the cluster for the specified share groups.
@param groupSpecs Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for.
@param options The options to use when listing the share group offsets.
@return The ListShareGroupOffsetsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,978
|
[
"groupSpecs",
"options"
] |
ListShareGroupOffsetsResult
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
instantiateType
|
public @Nullable T instantiateType(Class<?> type) {
Assert.notNull(type, "'type' must not be null");
return instantiate(TypeSupplier.forType(type));
}
|
Instantiate the given class, injecting constructor arguments as necessary.
@param type the type to instantiate
@return an instantiated instance
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/Instantiator.java
| 159
|
[
"type"
] |
T
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createInitialManifest
|
private Manifest createInitialManifest(JarFile source) throws IOException {
if (source.getManifest() != null) {
return new Manifest(source.getManifest());
}
Manifest manifest = new Manifest();
manifest.getMainAttributes().putValue("Manifest-Version", "1.0");
return manifest;
}
|
Writes a signature file if necessary for the given {@code writtenLibraries}.
@param writtenLibraries the libraries
@param writer the writer to use to write the signature file if necessary
@throws IOException if a failure occurs when writing the signature file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 310
|
[
"source"
] |
Manifest
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return (this.matcher != null) ? this.matcher.group(2) : "";
}
|
Return the extension from the hint or return the parameter if the hint is not
{@link #isPresent() present}.
@param extension the fallback extension
@return the extension either from the hint or fallback
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/FileExtensionHint.java
| 59
|
[] |
String
| true
| 2
| 7.68
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getDeadlineMsForTimeout
|
long getDeadlineMsForTimeout(final long timeoutMs) {
long expiration = time.milliseconds() + timeoutMs;
if (expiration < 0) {
return Long.MAX_VALUE;
}
return expiration;
}
|
Reconcile the assignment that has been received from the server. If for some topics, the
topic ID cannot be matched to a topic name, a metadata update will be triggered and only
the subset of topics that are resolvable will be reconciled. Reconciliation will trigger the
callbacks and update the subscription state.
There are three conditions under which no reconciliation will be triggered:
- We have already reconciled the assignment (the target assignment is the same as the current assignment).
- Another reconciliation is already in progress.
- There are topics that haven't been added to the current assignment yet, but all their topic IDs
are missing from the target assignment.
@param canCommit Controls whether reconciliation can proceed when auto-commit is enabled.
Set to true only when the current offset positions are safe to commit.
If false and auto-commit enabled, the reconciliation will be skipped.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 922
|
[
"timeoutMs"
] | true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
axes
|
def axes(self) -> list[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
See Also
--------
DataFrame.index: The index (row labels) of the DataFrame.
DataFrame.columns: The column labels of the DataFrame.
Examples
--------
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='str')]
"""
return [self.index, self.columns]
|
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
See Also
--------
DataFrame.index: The index (row labels) of the DataFrame.
DataFrame.columns: The column labels of the DataFrame.
Examples
--------
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='str')]
|
python
|
pandas/core/frame.py
| 1,024
|
[
"self"
] |
list[Index]
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
intersects
|
public boolean intersects(final FluentBitSet set) {
return bitSet.intersects(set.bitSet);
}
|
Returns true if the specified {@link BitSet} has any bits set to {@code true} that are also set to {@code true} in
this {@link BitSet}.
@param set {@link BitSet} to intersect with.
@return boolean indicating whether this {@link BitSet} intersects the specified {@link BitSet}.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 284
|
[
"set"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
build
|
public SimpleAsyncTaskExecutor build() {
return configure(new SimpleAsyncTaskExecutor());
}
|
Build a new {@link SimpleAsyncTaskExecutor} instance and configure it using this
builder.
@return a configured {@link SimpleAsyncTaskExecutor} instance.
@see #build(Class)
@see #configure(SimpleAsyncTaskExecutor)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskExecutorBuilder.java
| 241
|
[] |
SimpleAsyncTaskExecutor
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
parseInitializer
|
function parseInitializer(): Expression | undefined {
return parseOptional(SyntaxKind.EqualsToken) ? parseAssignmentExpressionOrHigher(/*allowReturnTypeInArrowFunction*/ true) : undefined;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,065
|
[] | true
| 2
| 6.64
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
softValues
|
@GwtIncompatible // java.lang.ref.SoftReference
@CanIgnoreReturnValue
public CacheBuilder<K, V> softValues() {
return setValueStrength(Strength.SOFT);
}
|
Specifies that each value (not key) stored in the cache should be wrapped in a {@link
SoftReference} (by default, strong references are used). Softly-referenced objects will be
garbage-collected in a <i>globally</i> least-recently-used manner, in response to memory
demand.
<p><b>Warning:</b> in most circumstances it is better to set a per-cache {@linkplain
#maximumSize(long) maximum size} instead of using soft references. You should only use this
method if you are well familiar with the practical consequences of soft references.
<p><b>Note:</b> when this method is used, the resulting cache will use identity ({@code ==})
comparison to determine equality of values.
<p>Entries with values that have been garbage collected may be counted in {@link Cache#size},
but will never be visible to read or write operations; such entries are cleaned up as part of
the routine maintenance described in the class javadoc.
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalStateException if the value strength was already set
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 686
|
[] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
completeFutureAndFireCallbacks
|
private void completeFutureAndFireCallbacks(
long baseOffset,
long logAppendTime,
Function<Integer, RuntimeException> recordExceptions
) {
// Set the future before invoking the callbacks as we rely on its state for the `onCompletion` call
produceFuture.set(baseOffset, logAppendTime, recordExceptions);
// execute callbacks
for (int i = 0; i < thunks.size(); i++) {
try {
Thunk thunk = thunks.get(i);
if (thunk.callback != null) {
if (recordExceptions == null) {
RecordMetadata metadata = thunk.future.value();
thunk.callback.onCompletion(metadata, null);
} else {
RuntimeException exception = recordExceptions.apply(i);
thunk.callback.onCompletion(null, exception);
}
}
} catch (Exception e) {
log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, e);
}
}
produceFuture.done();
}
|
Finalize the state of a batch. Final state, once set, is immutable. This function may be called
once or twice on a batch. It may be called twice if
1. An inflight batch expires before a response from the broker is received. The batch's final
state is set to FAILED. But it could succeed on the broker and second time around batch.done() may
try to set SUCCEEDED final state.
2. If a transaction abortion happens or if the producer is closed forcefully, the final state is
ABORTED but again it could succeed if broker responds with a success.
Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged.
Attempted transitions from one failure state to the same or a different failed state are ignored.
Attempted transitions from SUCCEEDED to the same or a failed state throw an exception.
@param baseOffset The base offset of the messages assigned by the server
@param logAppendTime The log append time or -1 if CreateTime is being used
@param topLevelException The exception that occurred (or null if the request was successful)
@param recordExceptions Record exception function mapping batchIndex to the respective record exception
@return true if the batch was completed successfully and false if the batch was previously aborted
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 294
|
[
"baseOffset",
"logAppendTime",
"recordExceptions"
] |
void
| true
| 5
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
createBeanDefinition
|
protected AbstractBeanDefinition createBeanDefinition(@Nullable String className, @Nullable String parentName)
throws ClassNotFoundException {
return BeanDefinitionReaderUtils.createBeanDefinition(
parentName, className, this.readerContext.getBeanClassLoader());
}
|
Create a bean definition for the given class name and parent name.
@param className the name of the bean class
@param parentName the name of the bean's parent bean
@return the newly created bean definition
@throws ClassNotFoundException if bean class resolution was attempted but failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/BeanDefinitionParserDelegate.java
| 635
|
[
"className",
"parentName"
] |
AbstractBeanDefinition
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
loadBeanDefinitions
|
public int loadBeanDefinitions(InputSource inputSource) throws BeanDefinitionStoreException {
return loadBeanDefinitions(inputSource, "resource loaded through SAX InputSource");
}
|
Load bean definitions from the specified XML file.
@param inputSource the SAX InputSource to read from
@return the number of bean definitions found
@throws BeanDefinitionStoreException in case of loading or parsing errors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/XmlBeanDefinitionReader.java
| 365
|
[
"inputSource"
] | true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
doubleValue
|
@Override
public double doubleValue() {
if (value >= 0) {
return (double) value;
}
// The top bit is set, which means that the double value is going to come from the top 53 bits.
// So we can ignore the bottom 11, except for rounding. We can unsigned-shift right 1, aka
// unsigned-divide by 2, and convert that. Then we'll get exactly half of the desired double
// value. But in the specific case where the bottom two bits of the original number are 01, we
// want to replace that with 1 in the shifted value for correct rounding.
return (double) ((value >>> 1) | (value & 1)) * 2.0;
}
|
Returns the value of this {@code UnsignedLong} as a {@code double}, analogous to a widening
primitive conversion from {@code long} to {@code double}, and correctly rounded.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLong.java
| 209
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
_serialize_params_dict
|
def _serialize_params_dict(cls, params: ParamsDict | dict) -> list[tuple[str, dict]]:
"""Serialize Params dict for a DAG or task as a list of tuples to ensure ordering."""
serialized_params = []
for k, raw_v in params.items():
# Use native param object, not resolved value if possible
v = params.get_param(k) if isinstance(params, ParamsDict) else raw_v
try:
class_identity = f"{v.__module__}.{v.__class__.__name__}"
except AttributeError:
class_identity = ""
if class_identity == "airflow.sdk.definitions.param.Param":
serialized_params.append((k, cls._serialize_param(v)))
else:
# Auto-box other values into Params object like it is done by DAG parsing as well
serialized_params.append((k, cls._serialize_param(Param(v))))
return serialized_params
|
Serialize Params dict for a DAG or task as a list of tuples to ensure ordering.
|
python
|
airflow-core/src/airflow/serialization/serialized_objects.py
| 908
|
[
"cls",
"params"
] |
list[tuple[str, dict]]
| true
| 5
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
unused
|
def unused(fn: Callable[_P, _R]) -> Callable[_P, _R]:
"""
This decorator indicates to the compiler that a function or method should
be ignored and replaced with the raising of an exception. This allows you
to leave code in your model that is not yet TorchScript compatible and still
export your model.
Example (using ``@torch.jit.unused`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self, use_memory_efficient):
super().__init__()
self.use_memory_efficient = use_memory_efficient
@torch.jit.unused
def memory_efficient(self, x):
import pdb
pdb.set_trace()
return x + 10
def forward(self, x):
# Use not-yet-scriptable memory efficient mode
if self.use_memory_efficient:
return self.memory_efficient(x)
else:
return x + 10
m = torch.jit.script(MyModule(use_memory_efficient=False))
m.save("m.pt")
m = torch.jit.script(MyModule(use_memory_efficient=True))
# exception raised
m(torch.rand(100))
"""
if isinstance(fn, property):
prop = fn
setattr( # noqa: B010
prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
)
if prop.fset:
setattr( # noqa: B010
prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
)
return prop # pyrefly: ignore [bad-return]
fn._torchscript_modifier = FunctionModifiers.UNUSED # type: ignore[attr-defined]
return fn
|
This decorator indicates to the compiler that a function or method should
be ignored and replaced with the raising of an exception. This allows you
to leave code in your model that is not yet TorchScript compatible and still
export your model.
Example (using ``@torch.jit.unused`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self, use_memory_efficient):
super().__init__()
self.use_memory_efficient = use_memory_efficient
@torch.jit.unused
def memory_efficient(self, x):
import pdb
pdb.set_trace()
return x + 10
def forward(self, x):
# Use not-yet-scriptable memory efficient mode
if self.use_memory_efficient:
return self.memory_efficient(x)
else:
return x + 10
m = torch.jit.script(MyModule(use_memory_efficient=False))
m.save("m.pt")
m = torch.jit.script(MyModule(use_memory_efficient=True))
# exception raised
m(torch.rand(100))
|
python
|
torch/_jit_internal.py
| 721
|
[
"fn"
] |
Callable[_P, _R]
| true
| 3
| 6.4
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
minimalEmpty
|
public static ZeroBucket minimalEmpty() {
return MINIMAL_EMPTY;
}
|
@return A singleton instance of an empty zero bucket with the smallest possible threshold.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
| 102
|
[] |
ZeroBucket
| true
| 1
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
postProcessObjectFromFactoryBean
|
protected Object postProcessObjectFromFactoryBean(Object object, String beanName) throws BeansException {
return object;
}
|
Post-process the given object that has been obtained from the FactoryBean.
The resulting object will get exposed for bean references.
<p>The default implementation simply returns the given object as-is.
Subclasses may override this, for example, to apply post-processors.
@param object the object obtained from the FactoryBean.
@param beanName the name of the bean
@return the object to expose
@throws org.springframework.beans.BeansException if any post-processing failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/FactoryBeanRegistrySupport.java
| 259
|
[
"object",
"beanName"
] |
Object
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
shallBeRetried
|
boolean shallBeRetried() {
return timeSupplier.get() - deadUntilNanos > 0;
}
|
Indicates whether it's time to retry to failed host or not.
@return true if the host should be retried, false otherwise
|
java
|
client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java
| 75
|
[] | true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
first
|
@SuppressWarnings("nullness") // Unsafe, but we can't do much about it now.
public final Optional<@NonNull E> first() {
Iterator<E> iterator = getDelegate().iterator();
return iterator.hasNext() ? Optional.of(iterator.next()) : Optional.absent();
}
|
Returns an {@link Optional} containing the first element in this fluent iterable. If the
iterable is empty, {@code Optional.absent()} is returned.
<p><b>{@code Stream} equivalent:</b> if the goal is to obtain any element, {@link
Stream#findAny}; if it must specifically be the <i>first</i> element, {@code Stream#findFirst}.
@throws NullPointerException if the first element is null; if this is a possibility, use {@code
iterator().next()} or {@link Iterables#getFirst} instead.
|
java
|
android/guava/src/com/google/common/collect/FluentIterable.java
| 519
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
onTasksAssignedCallbackCompleted
|
public void onTasksAssignedCallbackCompleted(final StreamsOnTasksAssignedCallbackCompletedEvent event) {
Optional<KafkaException> error = event.error();
CompletableFuture<Void> future = event.future();
if (error.isPresent()) {
Exception e = error.get();
log.warn("The onTasksAssigned callback completed with an error ({}); " +
"signaling to continue to the next phase of rebalance", e.getMessage());
future.completeExceptionally(e);
} else {
log.debug("The onTasksAssigned callback completed successfully; signaling to continue to the next phase of rebalance");
future.complete(null);
}
}
|
Completes the future that marks the completed execution of the onTasksAssigned callback.
@param event The event containing the future sent from the application thread to the network thread to
confirm the execution of the callback.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 1,320
|
[
"event"
] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
get_unanimous_names
|
def get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]:
"""
Return common name if all indices agree, otherwise None (level-by-level).
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the unanimous 'names' found.
"""
name_tups = (tuple(i.names) for i in indexes)
name_sets = ({*ns} for ns in zip_longest(*name_tups))
names = tuple(ns.pop() if len(ns) == 1 else None for ns in name_sets)
return names
|
Return common name if all indices agree, otherwise None (level-by-level).
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the unanimous 'names' found.
|
python
|
pandas/core/indexes/base.py
| 7,837
|
[] |
tuple[Hashable, ...]
| true
| 2
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
__init__
|
def __init__(self, *args, **kwargs):
"""
Initialize on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> from django.contrib.gis.geos import LinearRing, Polygon
>>> shell = hole1 = hole2 = LinearRing()
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
>>> # Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)),
... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
super().__init__(self._create_polygon(0, None), **kwargs)
return
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring, *init_holes = args
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing))
# [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if not init_holes[0]:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, [ext_ring, *init_holes])
super().__init__(polygon, **kwargs)
|
Initialize on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> from django.contrib.gis.geos import LinearRing, Polygon
>>> shell = hole1 = hole2 = LinearRing()
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
>>> # Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)),
... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
|
python
|
django/contrib/gis/geos/polygon.py
| 10
|
[
"self"
] | false
| 6
| 6.48
|
django/django
| 86,204
|
unknown
| false
|
|
buildAllFieldTypes
|
function buildAllFieldTypes(
inputTypes: readonly DMMF.InputTypeRef[],
context: GenerateContext,
source?: string,
): ts.TypeBuilder {
const inputObjectTypes = inputTypes.filter((t) => t.location === 'inputObjectTypes' && !t.isList)
const otherTypes = inputTypes.filter((t) => t.location !== 'inputObjectTypes' || t.isList)
const tsInputObjectTypes = inputObjectTypes.map((type) => buildSingleFieldType(type, context.genericArgsInfo, source))
const tsOtherTypes = otherTypes.map((type) => buildSingleFieldType(type, context.genericArgsInfo, source))
if (tsOtherTypes.length === 0) {
return xorTypes(tsInputObjectTypes)
}
if (tsInputObjectTypes.length === 0) {
return ts.unionType(tsOtherTypes)
}
return ts.unionType(xorTypes(tsInputObjectTypes)).addVariants(tsOtherTypes)
}
|
Examples:
T[], T => T | T[]
T, U => XOR<T,U>
T[], T, U => XOR<T, U> | T[]
T[], U => T[] | U
T, U, null => XOR<T,U> | null
T, U, V, W, null => XOR<T, XOR<U, XOR<V, W>>> | null
1. Separate XOR and non XOR items (objects and non-objects)
2. Generate them out and `|` them
|
typescript
|
packages/client-generator-js/src/TSClient/Input.ts
| 96
|
[
"inputTypes",
"context",
"source?"
] | true
| 5
| 7.12
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
identity
|
static <E extends Throwable> FailableLongUnaryOperator<E> identity() {
return t -> t;
}
|
Returns a unary operator that always returns its input argument.
@param <E> The kind of thrown exception or error.
@return a unary operator that always returns its input argument
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongUnaryOperator.java
| 41
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
search
|
static <T> ConfigurationPropertyState search(T[] source, int startInclusive, int endExclusive,
Predicate<T> predicate) {
Assert.notNull(source, "'source' must not be null");
Assert.notNull(predicate, "'predicate' must not be null");
for (int i = startInclusive; i < endExclusive; i++) {
if (predicate.test(source[i])) {
return PRESENT;
}
}
return ABSENT;
}
|
Search the given iterable using a predicate to determine if content is
{@link #PRESENT} or {@link #ABSENT}.
@param <T> the data type
@param source the source iterable to search
@param startInclusive the first index to cover
@param endExclusive index immediately past the last index to cover
@param predicate the predicate used to test for presence
@return {@link #PRESENT} if the iterable contains a matching item, otherwise
{@link #ABSENT}.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyState.java
| 80
|
[
"source",
"startInclusive",
"endExclusive",
"predicate"
] |
ConfigurationPropertyState
| true
| 3
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getPrimitivePromotionCost
|
private static float getPrimitivePromotionCost(final Class<?> srcClass, final Class<?> destClass) {
if (srcClass == null) {
return 1.5f;
}
float cost = 0.0f;
Class<?> cls = srcClass;
if (!cls.isPrimitive()) {
// slight unwrapping penalty
cost += 0.1f;
cls = ClassUtils.wrapperToPrimitive(cls);
}
// Increase the cost as the loop widens the type.
for (int i = 0; cls != destClass && i < WIDENING_PRIMITIVE_TYPES.length; i++) {
if (cls == WIDENING_PRIMITIVE_TYPES[i]) {
cost += 0.1f;
if (i < WIDENING_PRIMITIVE_TYPES.length - 1) {
cls = WIDENING_PRIMITIVE_TYPES[i + 1];
}
}
}
return cost;
}
|
Gets the number of steps required to promote a primitive to another type.
@param srcClass the (primitive) source class.
@param destClass the (primitive) destination class.
@return The cost of promoting the primitive.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MemberUtils.java
| 169
|
[
"srcClass",
"destClass"
] | true
| 7
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isParamMismatch
|
private boolean isParamMismatch(Method uniqueCandidate, Method candidate) {
int uniqueCandidateParameterCount = uniqueCandidate.getParameterCount();
int candidateParameterCount = candidate.getParameterCount();
return (uniqueCandidateParameterCount != candidateParameterCount ||
!Arrays.equals(uniqueCandidate.getParameterTypes(), candidate.getParameterTypes()));
}
|
Resolve the factory method in the specified bean definition, if possible.
{@link RootBeanDefinition#getResolvedFactoryMethod()} can be checked for the result.
@param mbd the bean definition to check
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/ConstructorResolver.java
| 360
|
[
"uniqueCandidate",
"candidate"
] | true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
make_hastie_10_2
|
def make_hastie_10_2(n_samples=12000, *, random_state=None):
"""Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
Examples
--------
>>> from sklearn.datasets import make_hastie_10_2
>>> X, y = make_hastie_10_2(n_samples=24000, random_state=42)
>>> X.shape
(24000, 10)
>>> y.shape
(24000,)
>>> list(y[:5])
[np.float64(-1.0), np.float64(1.0), np.float64(-1.0), np.float64(1.0),
np.float64(-1.0)]
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
y[y == 0.0] = -1.0
return X, y
|
Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
Examples
--------
>>> from sklearn.datasets import make_hastie_10_2
>>> X, y = make_hastie_10_2(n_samples=24000, random_state=42)
>>> X.shape
(24000, 10)
>>> y.shape
(24000,)
>>> list(y[:5])
[np.float64(-1.0), np.float64(1.0), np.float64(-1.0), np.float64(1.0),
np.float64(-1.0)]
|
python
|
sklearn/datasets/_samples_generator.py
| 573
|
[
"n_samples",
"random_state"
] | false
| 1
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
chunkObjectLiteralElements
|
function chunkObjectLiteralElements(elements: readonly ObjectLiteralElementLike[]): Expression[] {
let chunkObject: ObjectLiteralElementLike[] | undefined;
const objects: Expression[] = [];
for (const e of elements) {
if (e.kind === SyntaxKind.SpreadAssignment) {
if (chunkObject) {
objects.push(factory.createObjectLiteralExpression(chunkObject));
chunkObject = undefined;
}
const target = e.expression;
objects.push(visitNode(target, visitor, isExpression));
}
else {
chunkObject = append(
chunkObject,
e.kind === SyntaxKind.PropertyAssignment
? factory.createPropertyAssignment(e.name, visitNode(e.initializer, visitor, isExpression))
: visitNode(e, visitor, isObjectLiteralElementLike),
);
}
}
if (chunkObject) {
objects.push(factory.createObjectLiteralExpression(chunkObject));
}
return objects;
}
|
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the
expression of an `ExpressionStatement`).
|
typescript
|
src/compiler/transformers/es2018.ts
| 482
|
[
"elements"
] | true
| 6
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
min
|
public static double min(final double... array) {
Objects.requireNonNull(array, "array");
Validate.isTrue(array.length != 0, "Array cannot be empty.");
// Finds and returns min
double min = array[0];
for (int i = 1; i < array.length; i++) {
min = min(array[i], min);
}
return min;
}
|
Returns the minimum value in an array.
@param array an array, must not be null or empty.
@return the minimum value in the array.
@throws NullPointerException if {@code array} is {@code null}.
@throws IllegalArgumentException if {@code array} is empty.
@since 3.4 Changed signature from min(double[]) to min(double...).
|
java
|
src/main/java/org/apache/commons/lang3/math/IEEE754rUtils.java
| 151
|
[] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
resolve
|
def resolve(self, key: str, is_local: bool):
"""
Resolve a variable name in a possibly local context.
Parameters
----------
key : str
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError as err:
raise UndefinedVariableError(key, is_local) from err
|
Resolve a variable name in a possibly local context.
Parameters
----------
key : str
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
|
python
|
pandas/core/computation/scope.py
| 208
|
[
"self",
"key",
"is_local"
] | true
| 4
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
getPackageName
|
public static String getPackageName(final Object object, final String valueIfNull) {
if (object == null) {
return valueIfNull;
}
return getPackageName(object.getClass());
}
|
Gets the package name of an {@link Object}.
@param object the class to get the package name for, may be null.
@param valueIfNull the value to return if null.
@return the package name of the object, or the null value.
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 789
|
[
"object",
"valueIfNull"
] |
String
| true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
prepareTransaction
|
@Override
public PreparedTxnState prepareTransaction() throws ProducerFencedException {
throwIfNoTransactionManager();
throwIfProducerClosed();
throwIfInPreparedState();
if (!transactionManager.is2PCEnabled()) {
throw new InvalidTxnStateException("Cannot prepare a transaction when 2PC is not enabled");
}
long now = time.nanoseconds();
flush();
transactionManager.prepareTransaction();
producerMetrics.recordPrepareTxn(time.nanoseconds() - now);
ProducerIdAndEpoch producerIdAndEpoch = transactionManager.preparedTransactionState();
return new PreparedTxnState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
}
|
Prepares the current transaction for a two-phase commit. This method will flush all pending messages
and transition the producer into a mode where only {@link #commitTransaction()}, {@link #abortTransaction()},
or completeTransaction(PreparedTxnState) may be called.
<p>
This method is used as part of a two-phase commit protocol:
<ol>
<li>Prepare the transaction by calling this method. This returns a {@link PreparedTxnState} if successful.</li>
<li>Make any external system changes that need to be atomic with this transaction.</li>
<li>Complete the transaction by calling {@link #commitTransaction()}, {@link #abortTransaction()} or
completeTransaction(PreparedTxnState).</li>
</ol>
@return the prepared transaction state to use when completing the transaction
@throws IllegalStateException if no transactional.id has been configured or no transaction has been started yet.
@throws InvalidTxnStateException if the producer is not in a state where preparing
a transaction is possible or 2PC is not enabled.
@throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active
@throws UnsupportedVersionException fatal error indicating the broker
does not support transactions (i.e. if its version is lower than 0.11.0.0)
@throws AuthorizationException fatal error indicating that the configured
transactional.id is not authorized. See the exception for more details
@throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error
@throws TimeoutException if the time taken for preparing the transaction has surpassed <code>max.block.ms</code>
@throws InterruptException if the thread is interrupted while blocked
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
| 808
|
[] |
PreparedTxnState
| true
| 2
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
parse_pattern
|
def parse_pattern(
pattern: str, axes_lengths: Mapping[str, int]
) -> tuple[ParsedExpression, ParsedExpression]:
"""Parse an `einops`-style pattern into a left-hand side and right-hand side `ParsedExpression` object.
Args:
pattern (str): the `einops`-style rearrangement pattern
axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
Returns:
tuple[ParsedExpression, ParsedExpression]: a tuple containing the left-hand side and right-hand side expressions
"""
# adapted from einops.einops._prepare_transformation_recipe
# https://github.com/arogozhnikov/einops/blob/230ac1526c1f42c9e1f7373912c7f8047496df11/einops/einops.py
try:
left_str, right_str = pattern.split("->")
except ValueError:
raise ValueError("Pattern must contain a single '->' separator") from None
if _ellipsis in axes_lengths:
raise ValueError(f"'{_ellipsis}' is not an allowed axis identifier")
left = ParsedExpression(left_str)
right = ParsedExpression(right_str)
if not left.has_ellipsis and right.has_ellipsis:
raise ValueError(
f"Ellipsis found in right side, but not left side of a pattern {pattern}"
)
if left.has_ellipsis and left.has_ellipsis_parenthesized:
raise ValueError(
f"Ellipsis is parenthesis in the left side is not allowed: {pattern}"
)
return left, right
|
Parse an `einops`-style pattern into a left-hand side and right-hand side `ParsedExpression` object.
Args:
pattern (str): the `einops`-style rearrangement pattern
axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
Returns:
tuple[ParsedExpression, ParsedExpression]: a tuple containing the left-hand side and right-hand side expressions
|
python
|
functorch/einops/_parsing.py
| 212
|
[
"pattern",
"axes_lengths"
] |
tuple[ParsedExpression, ParsedExpression]
| true
| 6
| 7.44
|
pytorch/pytorch
| 96,034
|
google
| false
|
loadBeanDefinitions
|
public int loadBeanDefinitions(EncodedResource encodedResource) throws BeanDefinitionStoreException {
Assert.notNull(encodedResource, "EncodedResource must not be null");
if (logger.isTraceEnabled()) {
logger.trace("Loading XML bean definitions from " + encodedResource);
}
Set<EncodedResource> currentResources = this.resourcesCurrentlyBeingLoaded.get();
if (!currentResources.add(encodedResource)) {
throw new BeanDefinitionStoreException(
"Detected cyclic loading of " + encodedResource + " - check your import definitions!");
}
try (InputStream inputStream = encodedResource.getResource().getInputStream()) {
InputSource inputSource = new InputSource(inputStream);
if (encodedResource.getEncoding() != null) {
inputSource.setEncoding(encodedResource.getEncoding());
}
return doLoadBeanDefinitions(inputSource, encodedResource.getResource());
}
catch (IOException ex) {
throw new BeanDefinitionStoreException(
"IOException parsing XML document from " + encodedResource.getResource(), ex);
}
finally {
currentResources.remove(encodedResource);
if (currentResources.isEmpty()) {
this.resourcesCurrentlyBeingLoaded.remove();
}
}
}
|
Load bean definitions from the specified XML file.
@param encodedResource the resource descriptor for the XML file,
allowing to specify an encoding to use for parsing the file
@return the number of bean definitions found
@throws BeanDefinitionStoreException in case of loading or parsing errors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/XmlBeanDefinitionReader.java
| 327
|
[
"encodedResource"
] | true
| 6
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
isRequired
|
public boolean isRequired() {
if (!this.required) {
return false;
}
if (this.field != null) {
return !(this.field.getType() == Optional.class || Nullness.forField(this.field) == Nullness.NULLABLE);
}
else {
return !obtainMethodParameter().isOptional();
}
}
|
Return whether this dependency is required.
<p>Optional semantics are derived from Java 8's {@link java.util.Optional},
any variant of a parameter-level {@code Nullable} annotation (such as from
JSR-305 or the FindBugs set of annotations), or a language-level nullable
type declaration in Kotlin.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/DependencyDescriptor.java
| 157
|
[] | true
| 4
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
dataclasses_to_dicts
|
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
|
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
|
python
|
pandas/core/internals/construction.py
| 703
|
[
"data"
] | false
| 1
| 6.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
load
|
protected void load(ApplicationContext context, Object[] sources) {
if (logger.isDebugEnabled()) {
logger.debug("Loading source " + StringUtils.arrayToCommaDelimitedString(sources));
}
BeanDefinitionLoader loader = createBeanDefinitionLoader(getBeanDefinitionRegistry(context), sources);
if (this.beanNameGenerator != null) {
loader.setBeanNameGenerator(this.beanNameGenerator);
}
if (this.resourceLoader != null) {
loader.setResourceLoader(this.resourceLoader);
}
if (this.environment != null) {
loader.setEnvironment(this.environment);
}
loader.load();
}
|
Load beans into the application context.
@param context the context to load beans into
@param sources the sources to load
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 683
|
[
"context",
"sources"
] |
void
| true
| 5
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_levels_to_axis
|
def _levels_to_axis(
ss,
levels: tuple[int] | list[int],
valid_ilocs: npt.NDArray[np.intp],
sort_labels: bool = False,
) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:
"""
For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
where `ax_coords` are the coordinates along one of the two axes of the
destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
which correspond to these coordinates.
Parameters
----------
ss : Series
levels : tuple/list
valid_ilocs : numpy.ndarray
Array of integer positions of valid values for the sparse matrix in ss.
sort_labels : bool, default False
Sort the axis labels before forming the sparse matrix. When `levels`
refers to a single level, set to True for a faster execution.
Returns
-------
ax_coords : numpy.ndarray (axis coordinates)
ax_labels : list (axis labels)
"""
# Since the labels are sorted in `Index.levels`, when we wish to sort and
# there is only one level of the MultiIndex for this axis, the desired
# output can be obtained in the following simpler, more efficient way.
if sort_labels and len(levels) == 1:
ax_coords = ss.index.codes[levels[0]][valid_ilocs]
ax_labels = ss.index.levels[levels[0]]
else:
levels_values = lib.fast_zip(
[ss.index.get_level_values(lvl).to_numpy() for lvl in levels]
)
codes, ax_labels = factorize(levels_values, sort=sort_labels)
ax_coords = codes[valid_ilocs]
ax_labels = ax_labels.tolist()
return ax_coords, ax_labels # pyright: ignore[reportReturnType]
|
For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
where `ax_coords` are the coordinates along one of the two axes of the
destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
which correspond to these coordinates.
Parameters
----------
ss : Series
levels : tuple/list
valid_ilocs : numpy.ndarray
Array of integer positions of valid values for the sparse matrix in ss.
sort_labels : bool, default False
Sort the axis labels before forming the sparse matrix. When `levels`
refers to a single level, set to True for a faster execution.
Returns
-------
ax_coords : numpy.ndarray (axis coordinates)
ax_labels : list (axis labels)
|
python
|
pandas/core/arrays/sparse/scipy_sparse.py
| 40
|
[
"ss",
"levels",
"valid_ilocs",
"sort_labels"
] |
tuple[npt.NDArray[np.intp], list[IndexLabel]]
| true
| 4
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getAsShort
|
public static <E extends Throwable> short getAsShort(final FailableShortSupplier<E> supplier) {
try {
return supplier.getAsShort();
} catch (final Throwable t) {
throw rethrow(t);
}
}
|
Invokes a short supplier, and returns the result.
@param supplier The short supplier to invoke.
@param <E> The type of checked exception, which the supplier can throw.
@return The short, which has been created by the supplier
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 482
|
[
"supplier"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
bindProperty
|
private <T> @Nullable Object bindProperty(Bindable<T> target, Context context, ConfigurationProperty property) {
context.setConfigurationProperty(property);
Object result = property.getValue();
result = this.placeholdersResolver.resolvePlaceholders(result);
result = context.getConverter().convert(result, target);
return result;
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 487
|
[
"target",
"context",
"property"
] |
Object
| true
| 1
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isOTelDocument
|
static boolean isOTelDocument(Map<String, Object> source) {
Object resource = source.get(RESOURCE_KEY);
if (resource instanceof Map<?, ?> resourceMap) {
Object resourceAttributes = resourceMap.get(ATTRIBUTES_KEY);
if (resourceAttributes != null && (resourceAttributes instanceof Map) == false) {
return false;
}
} else {
return false;
}
Object scope = source.get(SCOPE_KEY);
if (scope != null && scope instanceof Map == false) {
return false;
}
Object attributes = source.get(ATTRIBUTES_KEY);
if (attributes != null && attributes instanceof Map == false) {
return false;
}
Object body = source.get(BODY_KEY);
if (body != null) {
if (body instanceof Map<?, ?> bodyMap) {
Object bodyText = bodyMap.get(TEXT_KEY);
if (bodyText != null && (bodyText instanceof String) == false) {
return false;
}
Object bodyStructured = bodyMap.get(STRUCTURED_KEY);
return (bodyStructured instanceof String) == false;
} else {
return false;
}
}
return true;
}
|
Checks if the given document is OpenTelemetry-compliant.
<p>A document is considered OpenTelemetry-compliant if it meets the following criteria:
<ul>
<li>The "resource" field is present and is a map
<li>The resource field either doesn't contain an "attributes" field, or the "attributes" field is a map.</li>
<li>The "scope" field is either absent or a map.</li>
<li>The "attributes" field is either absent or a map.</li>
<li>The "body" field is either absent or a map.</li>
<li>If exists, the "body" either doesn't contain a "text" field, or the "text" field is a string.</li>
<li>If exists, the "body" either doesn't contain a "structured" field, or the "structured" field is not a string.</li>
</ul>
@param source the document to check
@return {@code true} if the document is OpenTelemetry-compliant, {@code false} otherwise
|
java
|
modules/ingest-otel/src/main/java/org/elasticsearch/ingest/otel/NormalizeForStreamProcessor.java
| 209
|
[
"source"
] | true
| 12
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getTargetType
|
public @Nullable Class<?> getTargetType() {
if (this.resolvedTargetType != null) {
return this.resolvedTargetType;
}
ResolvableType targetType = this.targetType;
return (targetType != null ? targetType.resolve() : null);
}
|
Return the target type of this bean definition, if known
(either specified in advance or resolved on first instantiation).
@since 3.2.2
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RootBeanDefinition.java
| 332
|
[] | true
| 3
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
build
|
public ImmutableLongArray build() {
return count == 0 ? EMPTY : new ImmutableLongArray(array, 0, count);
}
|
Returns a new immutable array. The builder can continue to be used after this call, to append
more values and build again.
<p><b>Performance note:</b> the returned array is backed by the same array as the builder, so
no data is copied as part of this step, but this may occupy more memory than strictly
necessary. To copy the data to a right-sized backing array, use {@code .build().trimmed()}.
|
java
|
android/guava/src/com/google/common/primitives/ImmutableLongArray.java
| 334
|
[] |
ImmutableLongArray
| true
| 2
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
fast_xs
|
def fast_xs(self, loc: int) -> SingleBlockManager:
"""
Return the array corresponding to `frame.iloc[loc]`.
Parameters
----------
loc : int
Returns
-------
np.ndarray or ExtensionArray
"""
if len(self.blocks) == 1:
# TODO: this could be wrong if blk.mgr_locs is not slice(None)-like;
# is this ruled out in the general case?
result: np.ndarray | ExtensionArray = self.blocks[0].iget(
(slice(None), loc)
)
# in the case of a single block, the new block is a view
bp = BlockPlacement(slice(0, len(result)))
block = new_block(
result,
placement=bp,
ndim=1,
refs=self.blocks[0].refs,
)
return SingleBlockManager(block, self.axes[0])
dtype = interleaved_dtype([blk.dtype for blk in self.blocks])
n = len(self)
if isinstance(dtype, ExtensionDtype):
# TODO: use object dtype as workaround for non-performant
# EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__
# when iteratively setting individual values)
# https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
result = ensure_wrapped_if_datetimelike(result)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
item = blk.iget((i, loc))
if (
result.dtype.kind in "iub"
and lib.is_float(item)
and isna(item)
and isinstance(blk.dtype, CategoricalDtype)
):
# GH#58954 caused bc interleaved_dtype is wrong for Categorical
# TODO(GH#38240) this will be unnecessary
# Note that doing this in a try/except would work for the
# integer case, but not for bool, which will cast the NaN
# entry to True.
if result.dtype.kind == "b":
new_dtype = object
else:
new_dtype = np.float64
result = result.astype(new_dtype)
result[rl] = item
if isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
result = cls._from_sequence(result, dtype=dtype)
bp = BlockPlacement(slice(0, len(result)))
block = new_block(result, placement=bp, ndim=1)
return SingleBlockManager(block, self.axes[0])
|
Return the array corresponding to `frame.iloc[loc]`.
Parameters
----------
loc : int
Returns
-------
np.ndarray or ExtensionArray
|
python
|
pandas/core/internals/managers.py
| 1,108
|
[
"self",
"loc"
] |
SingleBlockManager
| true
| 13
| 6.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
closeAndWait
|
private void closeAndWait(ConfigurableApplicationContext context) {
if (!context.isActive()) {
return;
}
context.close();
try {
int waited = 0;
while (context.isActive()) {
if (waited > TIMEOUT) {
throw new TimeoutException();
}
Thread.sleep(SLEEP);
waited += SLEEP;
}
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
logger.warn("Interrupted waiting for application context " + context + " to become inactive");
}
catch (TimeoutException ex) {
logger.warn("Timed out waiting for application context " + context + " to become inactive", ex);
}
}
|
Call {@link ConfigurableApplicationContext#close()} and wait until the context
becomes inactive. We can't assume that just because the close method returns that
the context is actually inactive. It could be that another thread is still in the
process of disposing beans.
@param context the context to clean
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplicationShutdownHook.java
| 143
|
[
"context"
] |
void
| true
| 6
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
checkPositionIndex
|
@CanIgnoreReturnValue
public static int checkPositionIndex(int index, int size) {
return checkPositionIndex(index, size, "index");
}
|
Ensures that {@code index} specifies a valid <i>position</i> in an array, list or string of
size {@code size}. A position index may range from zero to {@code size}, inclusive.
<p><b>Java 9 users:</b> consider using {@link java.util.Objects#checkIndex(index, size)}
instead. However, note that {@code checkIndex()} throws {@code IndexOutOfBoundsException} when
{@code size} is negative, while this method throws {@code IllegalArgumentException}.
@param index a user-supplied index identifying a position in an array, list or string
@param size the size of that array, list or string
@return the value of {@code index}
@throws IndexOutOfBoundsException if {@code index} is negative or is greater than {@code size}
@throws IllegalArgumentException if {@code size} is negative
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 1,398
|
[
"index",
"size"
] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
format
|
static String format(final Token[] tokens, final long years, final long months, final long days, final long hours, final long minutes,
final long seconds,
final long milliseconds, final boolean padWithZeros) {
final StringBuilder buffer = new StringBuilder();
boolean lastOutputSeconds = false;
boolean lastOutputZero = false;
int optionalStart = -1;
boolean firstOptionalNonLiteral = false;
int optionalIndex = -1;
boolean inOptional = false;
for (final Token token : tokens) {
final Object value = token.getValue();
final boolean isLiteral = value instanceof StringBuilder;
final int count = token.getCount();
if (optionalIndex != token.optionalIndex) {
optionalIndex = token.optionalIndex;
if (optionalIndex > -1) {
//entering new optional block
optionalStart = buffer.length();
lastOutputZero = false;
inOptional = true;
firstOptionalNonLiteral = false;
} else {
//leaving optional block
inOptional = false;
}
}
if (isLiteral) {
if (!inOptional || !lastOutputZero) {
buffer.append(value.toString());
}
} else if (value.equals(y)) {
lastOutputSeconds = false;
lastOutputZero = years == 0;
if (!inOptional || !lastOutputZero) {
buffer.append(paddedValue(years, padWithZeros, count));
}
} else if (value.equals(M)) {
lastOutputSeconds = false;
lastOutputZero = months == 0;
if (!inOptional || !lastOutputZero) {
buffer.append(paddedValue(months, padWithZeros, count));
}
} else if (value.equals(d)) {
lastOutputSeconds = false;
lastOutputZero = days == 0;
if (!inOptional || !lastOutputZero) {
buffer.append(paddedValue(days, padWithZeros, count));
}
} else if (value.equals(H)) {
lastOutputSeconds = false;
lastOutputZero = hours == 0;
if (!inOptional || !lastOutputZero) {
buffer.append(paddedValue(hours, padWithZeros, count));
}
} else if (value.equals(m)) {
lastOutputSeconds = false;
lastOutputZero = minutes == 0;
if (!inOptional || !lastOutputZero) {
buffer.append(paddedValue(minutes, padWithZeros, count));
}
} else if (value.equals(s)) {
lastOutputSeconds = true;
lastOutputZero = seconds == 0;
if (!inOptional || !lastOutputZero) {
buffer.append(paddedValue(seconds, padWithZeros, count));
}
} else if (value.equals(S)) {
lastOutputZero = milliseconds == 0;
if (!inOptional || !lastOutputZero) {
if (lastOutputSeconds) {
// ensure at least 3 digits are displayed even if padding is not selected
final int width = padWithZeros ? Math.max(3, count) : 3;
buffer.append(paddedValue(milliseconds, true, width));
} else {
buffer.append(paddedValue(milliseconds, padWithZeros, count));
}
}
lastOutputSeconds = false;
}
//as soon as we hit first nonliteral in optional, check for literal prefix
if (inOptional && !isLiteral && !firstOptionalNonLiteral) {
firstOptionalNonLiteral = true;
if (lastOutputZero) {
buffer.delete(optionalStart, buffer.length());
}
}
}
return buffer.toString();
}
|
The internal method to do the formatting.
@param tokens the tokens
@param years the number of years
@param months the number of months
@param days the number of days
@param hours the number of hours
@param minutes the number of minutes
@param seconds the number of seconds
@param milliseconds the number of millis
@param padWithZeros whether to pad
@return the formatted string
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 239
|
[
"tokens",
"years",
"months",
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"padWithZeros"
] |
String
| true
| 33
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return MoreObjects.toStringHelper(ServiceManager.class)
.add("services", Collections2.filter(services, not(instanceOf(NoOpService.class))))
.toString();
}
|
Returns the service load times. This value will only return startup times for services that
have finished starting.
@return Map of services and their corresponding startup time, the map entries will be ordered
by startup time.
@since 33.4.0 (but since 31.0 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/util/concurrent/ServiceManager.java
| 441
|
[] |
String
| true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
compute
|
public double compute(Collection<? extends Number> dataset) {
return computeInPlace(Doubles.toArray(dataset));
}
|
Computes the quantile value of the given dataset.
@param dataset the dataset to do the calculation on, which must be non-empty, which will be
cast to doubles (with any associated lost of precision), and which will not be mutated by
this call (it is copied instead)
@return the quantile value
|
java
|
android/guava/src/com/google/common/math/Quantiles.java
| 242
|
[
"dataset"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
sem
|
def sem(
self, ddof: int = 1, numeric_only: bool = False, skipna: bool = True
) -> NDFrameT:
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
See Also
--------
DataFrame.sem : Return unbiased standard error of the mean over requested axis.
Series.sem : Return unbiased standard error of the mean over requested axis.
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([5, 10, 8, 14], index=lst)
>>> ser
a 5
a 10
b 8
b 14
dtype: int64
>>> ser.groupby(level=0).sem()
a 2.5
b 3.0
dtype: float64
For DataFrameGroupBy:
>>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]]
>>> df = pd.DataFrame(
... data,
... columns=["a", "b", "c"],
... index=["tuna", "salmon", "catfish", "goldfish"],
... )
>>> df
a b c
tuna 1 12 11
salmon 1 15 2
catfish 2 5 8
goldfish 2 6 12
>>> df.groupby("a").sem()
b c
a
1 1.5 4.5
2 0.5 2.0
For Resampler:
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 8],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").sem()
2023-01-01 0.577350
2023-02-01 1.527525
Freq: MS, dtype: float64
"""
if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype):
raise TypeError(
f"{type(self).__name__}.sem called with "
f"numeric_only={numeric_only} and dtype {self.obj.dtype}"
)
return self._cython_agg_general(
"sem",
alt=lambda x: Series(x, copy=False).sem(ddof=ddof, skipna=skipna),
numeric_only=numeric_only,
ddof=ddof,
skipna=skipna,
)
|
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
See Also
--------
DataFrame.sem : Return unbiased standard error of the mean over requested axis.
Series.sem : Return unbiased standard error of the mean over requested axis.
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([5, 10, 8, 14], index=lst)
>>> ser
a 5
a 10
b 8
b 14
dtype: int64
>>> ser.groupby(level=0).sem()
a 2.5
b 3.0
dtype: float64
For DataFrameGroupBy:
>>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]]
>>> df = pd.DataFrame(
... data,
... columns=["a", "b", "c"],
... index=["tuna", "salmon", "catfish", "goldfish"],
... )
>>> df
a b c
tuna 1 12 11
salmon 1 15 2
catfish 2 5 8
goldfish 2 6 12
>>> df.groupby("a").sem()
b c
a
1 1.5 4.5
2 0.5 2.0
For Resampler:
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 8],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").sem()
2023-01-01 0.577350
2023-02-01 1.527525
Freq: MS, dtype: float64
|
python
|
pandas/core/groupby/groupby.py
| 2,753
|
[
"self",
"ddof",
"numeric_only",
"skipna"
] |
NDFrameT
| true
| 4
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
lookupImplementationMethod
|
InstrumentationInfo lookupImplementationMethod(
Class<?> targetSuperclass,
String methodName,
Class<?> implementationClass,
Class<?> checkerClass,
String checkMethodName,
Class<?>... parameterTypes
) throws NoSuchMethodException, ClassNotFoundException;
|
This method uses the method names of the provided class to identify the JDK method to instrument; it examines all methods prefixed
by {@code check$}, and parses the rest of the name to extract the JDK method:
<ul>
<li>
Instance methods have the fully qualified class name (with . replaced by _), followed by $, followed by the method name. Example:
{@link org.elasticsearch.entitlement.bridge.EntitlementChecker#check$java_lang_Runtime$halt}
</li>
<li>
Static methods have the fully qualified class name (with . replaced by _), followed by $$, followed by the method name. Example:
{@link org.elasticsearch.entitlement.bridge.EntitlementChecker#check$java_lang_System$$exit}
</li>
<li>
Constructors have the fully qualified class name (with . replaced by _), followed by $ and nothing else. Example:
{@link org.elasticsearch.entitlement.bridge.EntitlementChecker#check$java_lang_ClassLoader$}
</li>
</ul>
<p>
<strong>NOTE:</strong> look up of methods using this convention is the primary way we use to identify which methods to instrument,
but other methods can be added to the map of methods to instrument. See
{@link org.elasticsearch.entitlement.initialization.EntitlementInitialization#initialize} for details.
</p>
@param clazz the class to inspect to find methods to instrument
@throws ClassNotFoundException if the class is not defined or cannot be inspected
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java
| 53
|
[
"targetSuperclass",
"methodName",
"implementationClass",
"checkerClass",
"checkMethodName"
] |
InstrumentationInfo
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
unescapeJson
|
public static final String unescapeJson(final String input) {
return UNESCAPE_JSON.translate(input);
}
|
Unescapes any Json literals found in the {@link String}.
<p>For example, it will turn a sequence of {@code '\'} and {@code 'n'}
into a newline character, unless the {@code '\'} is preceded by another
{@code '\'}.</p>
@see #unescapeJava(String)
@param input the {@link String} to unescape, may be null
@return A new unescaped {@link String}, {@code null} if null string input
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 758
|
[
"input"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_create_ranges_from_split_points
|
def _create_ranges_from_split_points(
split_points: list[int],
) -> list[tuple[int, int] | tuple[int, float]]:
"""Convert split points into ranges for autotuning dispatch.
Example:
split_points=[512, 2048]
returns:
[(1, 512), (513, 2048), (2049, float('inf'))]
"""
ranges: list[tuple[int, int] | tuple[int, float]] = []
start = 1
for split_point in split_points:
ranges.append((start, split_point))
start = split_point + 1
ranges.append((start, float("inf")))
return ranges
|
Convert split points into ranges for autotuning dispatch.
Example:
split_points=[512, 2048]
returns:
[(1, 512), (513, 2048), (2049, float('inf'))]
|
python
|
torch/_inductor/kernel/custom_op.py
| 335
|
[
"split_points"
] |
list[tuple[int, int] | tuple[int, float]]
| true
| 2
| 9.04
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
codegen_body
|
def codegen_body(self) -> None:
"""
Concat output code from index_code, loads, compute, stores,
suffix into self.body.
For pointwise kernels, this is called just once at the end.
For reduction kernels, this generates a loop over the reduction
axis.
"""
if self.multistage_reduction_entry:
with self.body.indent():
self.body.splice(self.loads)
self.body.splice(self.compute)
self.body.writeline("}" * len(self.multistage_reduction_entry))
# Invalidate variables instantiated inside loop
# But results of reduction alive. Reduction cache values can be
# either CSEVariable or tuple of CSEVariables, in which case all
# variables in the tuple must be preserved
self.cse.invalidate(
OrderedSet(
v
for item in self.cse.reduction_cache.values()
for v in (item if isinstance(item, tuple) else (item,))
)
)
# And loop codegen
while self.multistage_reduction_entry:
self.multistage_reduction_entry.pop().cache_clear()
else:
self.body.splice(self.loads)
self.body.splice(self.compute)
self.body.splice(self.stores)
self.loads.clear()
self.compute.clear()
self.stores.clear()
|
Concat output code from index_code, loads, compute, stores,
suffix into self.body.
For pointwise kernels, this is called just once at the end.
For reduction kernels, this generates a loop over the reduction
axis.
|
python
|
torch/_inductor/codegen/mps.py
| 815
|
[
"self"
] |
None
| true
| 5
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
value_counts
|
def value_counts(self, dropna: bool = True) -> Series:
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
from pandas import (
Index,
Series,
)
keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0 and (not self._null_fill_value or not dropna):
mask = isna(keys) if self._null_fill_value else keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
# error: Argument 1 to "insert" has incompatible type "Union[
# ExtensionArray,ndarray[Any, Any]]"; expected "Union[
# _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype
# [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence
# [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type]
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, ABCIndex):
index = Index(keys)
else:
index = keys
return Series(counts, index=index, copy=False)
|
Returns a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
|
python
|
pandas/core/arrays/sparse/array.py
| 929
|
[
"self",
"dropna"
] |
Series
| true
| 9
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getCommonPrefix
|
public static String getCommonPrefix(final String... strs) {
if (ArrayUtils.isEmpty(strs)) {
return EMPTY;
}
final int smallestIndexOfDiff = indexOfDifference(strs);
if (smallestIndexOfDiff == INDEX_NOT_FOUND) {
// all strings were identical
if (strs[0] == null) {
return EMPTY;
}
return strs[0];
}
if (smallestIndexOfDiff == 0) {
// there were no common initial characters
return EMPTY;
}
// we found a common initial character sequence
return strs[0].substring(0, smallestIndexOfDiff);
}
|
Compares all Strings in an array and returns the initial sequence of characters that is common to all of them.
<p>
For example, {@code getCommonPrefix("i am a machine", "i am a robot") -> "i am a "}
</p>
<pre>
StringUtils.getCommonPrefix(null) = ""
StringUtils.getCommonPrefix(new String[] {}) = ""
StringUtils.getCommonPrefix(new String[] {"abc"}) = "abc"
StringUtils.getCommonPrefix(new String[] {null, null}) = ""
StringUtils.getCommonPrefix(new String[] {"", ""}) = ""
StringUtils.getCommonPrefix(new String[] {"", null}) = ""
StringUtils.getCommonPrefix(new String[] {"abc", null, null}) = ""
StringUtils.getCommonPrefix(new String[] {null, null, "abc"}) = ""
StringUtils.getCommonPrefix(new String[] {"", "abc"}) = ""
StringUtils.getCommonPrefix(new String[] {"abc", ""}) = ""
StringUtils.getCommonPrefix(new String[] {"abc", "abc"}) = "abc"
StringUtils.getCommonPrefix(new String[] {"abc", "a"}) = "a"
StringUtils.getCommonPrefix(new String[] {"ab", "abxyz"}) = "ab"
StringUtils.getCommonPrefix(new String[] {"abcde", "abxyz"}) = "ab"
StringUtils.getCommonPrefix(new String[] {"abcde", "xyz"}) = ""
StringUtils.getCommonPrefix(new String[] {"xyz", "abcde"}) = ""
StringUtils.getCommonPrefix(new String[] {"i am a machine", "i am a robot"}) = "i am a "
</pre>
@param strs array of String objects, entries may be null.
@return the initial sequence of characters that are common to all Strings in the array; empty String if the array is null, the elements are all null or
if there is no common prefix.
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 2,001
|
[] |
String
| true
| 5
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
substringBetween
|
public static String substringBetween(final String str, final String tag) {
return substringBetween(str, tag, tag);
}
|
Gets the String that is nested in between two instances of the same String.
<p>
A {@code null} input String returns {@code null}. A {@code null} tag returns {@code null}.
</p>
<pre>
StringUtils.substringBetween(null, *) = null
StringUtils.substringBetween("", "") = ""
StringUtils.substringBetween("", "tag") = null
StringUtils.substringBetween("tagabctag", null) = null
StringUtils.substringBetween("tagabctag", "") = ""
StringUtils.substringBetween("tagabctag", "tag") = "abc"
</pre>
@param str the String containing the substring, may be null.
@param tag the String before and after the substring, may be null.
@return the substring, {@code null} if no match.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,476
|
[
"str",
"tag"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toString
|
public static String toString(final ClassLoader classLoader) {
if (classLoader instanceof URLClassLoader) {
return toString((URLClassLoader) classLoader);
}
return Objects.toString(classLoader);
}
|
Converts the given class loader to a String calling {@link #toString(URLClassLoader)}.
@param classLoader to URLClassLoader to convert.
@return the formatted string.
|
java
|
src/main/java/org/apache/commons/lang3/ClassLoaderUtils.java
| 64
|
[
"classLoader"
] |
String
| true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
generateValueCode
|
protected @Nullable CodeBlock generateValueCode(GenerationContext generationContext, String name, Object value) {
RegisteredBean innerRegisteredBean = getInnerRegisteredBean(value);
if (innerRegisteredBean != null) {
BeanDefinitionMethodGenerator methodGenerator = this.beanDefinitionMethodGeneratorFactory
.getBeanDefinitionMethodGenerator(innerRegisteredBean, name);
Assert.state(methodGenerator != null, "Unexpected filtering of inner-bean");
MethodReference generatedMethod = methodGenerator
.generateBeanDefinitionMethod(generationContext, this.beanRegistrationsCode);
return generatedMethod.toInvokeCodeBlock(ArgumentCodeGenerator.none());
}
return null;
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 180
|
[
"generationContext",
"name",
"value"
] |
CodeBlock
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getMergedLocalBeanDefinition
|
protected RootBeanDefinition getMergedLocalBeanDefinition(String beanName) throws BeansException {
// Quick check on the concurrent map first, with minimal locking.
RootBeanDefinition mbd = this.mergedBeanDefinitions.get(beanName);
if (mbd != null && !mbd.stale) {
return mbd;
}
return getMergedBeanDefinition(beanName, getBeanDefinition(beanName));
}
|
Return a merged RootBeanDefinition, traversing the parent bean definition
if the specified bean corresponds to a child bean definition.
@param beanName the name of the bean to retrieve the merged definition for
@return a (potentially merged) RootBeanDefinition for the given bean
@throws NoSuchBeanDefinitionException if there is no bean with the given name
@throws BeanDefinitionStoreException in case of an invalid bean definition
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,363
|
[
"beanName"
] |
RootBeanDefinition
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
maxBucketIndex
|
@Override
public OptionalLong maxBucketIndex() {
if (numBuckets == 0) {
return OptionalLong.empty();
} else {
return OptionalLong.of(bucketIndices[startSlot() + numBuckets - 1]);
}
}
|
@return the position of the first bucket of this set of buckets within {@link #bucketCounts} and {@link #bucketIndices}.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 294
|
[] |
OptionalLong
| true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
postprocess
|
function postprocess(analyzer, node) {
/**
* Ends the code path for the current node.
* @returns {void}
*/
function endCodePath() {
let codePath = analyzer.codePath;
// Mark the current path as the final node.
CodePath.getState(codePath).makeFinal();
// Emits onCodePathSegmentEnd event of the current segments.
leaveFromCurrentSegment(analyzer, node);
// Emits onCodePathEnd event of this code path.
analyzer.emitter.emit('onCodePathEnd', codePath, node);
codePath = analyzer.codePath = analyzer.codePath.upper;
}
switch (node.type) {
case 'Program':
case 'FunctionDeclaration':
case 'ComponentDeclaration':
case 'HookDeclaration':
case 'FunctionExpression':
case 'ArrowFunctionExpression':
case 'StaticBlock': {
endCodePath();
break;
}
// The `arguments.length >= 1` case is in `preprocess` function.
case 'CallExpression':
if (node.optional === true && node.arguments.length === 0) {
CodePath.getState(analyzer.codePath).makeOptionalRight();
}
break;
default:
break;
}
/*
* Special case: The right side of class field initializer is considered
* to be its own function, so we need to end a code path in this
* case.
*
* We need to check after the other checks in order to close the
* code paths in the correct order for code like this:
*
*
* class Foo {
* a = () => {}
* }
*
* In this case, The ArrowFunctionExpression code path is closed first
* and then we need to close the code path for the PropertyDefinition
* value.
*/
if (isPropertyDefinitionValue(node)) {
endCodePath();
}
}
|
Updates the code path to finalize the current code path.
@param {CodePathAnalyzer} analyzer The instance.
@param {ASTNode} node The current AST node.
@returns {void}
|
javascript
|
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-analyzer.js
| 650
|
[
"analyzer",
"node"
] | false
| 4
| 6.16
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
__call__
|
def __call__(self, num: float) -> str:
"""
Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)
>>> format_eng(0)
' 0'
>>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)
>>> format_eng(1_000_000)
' 1.0M'
>>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)
>>> format_eng("-1e-6")
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = Decimal(str(num))
if Decimal.is_nan(dnum):
return "NaN"
if Decimal.is_infinite(dnum):
return "inf"
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
elif int_pow10 < 0:
prefix = f"E-{-int_pow10:02d}"
else:
prefix = f"E+{int_pow10:02d}"
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted
|
Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)
>>> format_eng(0)
' 0'
>>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)
>>> format_eng(1_000_000)
' 1.0M'
>>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)
>>> format_eng("-1e-6")
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
|
python
|
pandas/io/formats/format.py
| 1,893
|
[
"self",
"num"
] |
str
| true
| 11
| 8.72
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
_validate_subplots_kwarg
|
def _validate_subplots_kwarg(
subplots: bool | Sequence[Sequence[str]], data: Series | DataFrame, kind: str
) -> bool | list[tuple[int, ...]]:
"""
Validate the subplots parameter
- check type and content
- check for duplicate columns
- check for invalid column names
- convert column names into indices
- add missing columns in a group of their own
See comments in code below for more details.
Parameters
----------
subplots : subplots parameters as passed to PlotAccessor
Returns
-------
validated subplots : a bool or a list of tuples of column indices. Columns
in the same tuple will be grouped together in the resulting plot.
"""
if isinstance(subplots, bool):
return subplots
elif not isinstance(subplots, Iterable):
raise ValueError("subplots should be a bool or an iterable")
supported_kinds = (
"line",
"bar",
"barh",
"hist",
"kde",
"density",
"area",
"pie",
)
if kind not in supported_kinds:
raise ValueError(
"When subplots is an iterable, kind must be "
f"one of {', '.join(supported_kinds)}. Got {kind}."
)
if isinstance(data, ABCSeries):
raise NotImplementedError(
"An iterable subplots for a Series is not supported."
)
columns = data.columns
if isinstance(columns, ABCMultiIndex):
raise NotImplementedError(
"An iterable subplots for a DataFrame with a MultiIndex column "
"is not supported."
)
if columns.nunique() != len(columns):
raise NotImplementedError(
"An iterable subplots for a DataFrame with non-unique column "
"labels is not supported."
)
# subplots is a list of tuples where each tuple is a group of
# columns to be grouped together (one ax per group).
# we consolidate the subplots list such that:
# - the tuples contain indices instead of column names
# - the columns that aren't yet in the list are added in a group
# of their own.
# For example with columns from a to g, and
# subplots = [(a, c), (b, f, e)],
# we end up with [(ai, ci), (bi, fi, ei), (di,), (gi,)]
# This way, we can handle self.subplots in a homogeneous manner
# later.
# TODO: also accept indices instead of just names?
out = []
seen_columns: set[Hashable] = set()
for group in subplots:
if not is_list_like(group):
raise ValueError(
"When subplots is an iterable, each entry "
"should be a list/tuple of column names."
)
idx_locs = columns.get_indexer_for(group)
if (idx_locs == -1).any():
bad_labels = np.extract(idx_locs == -1, group)
raise ValueError(
f"Column label(s) {list(bad_labels)} not found in the DataFrame."
)
unique_columns = set(group)
duplicates = seen_columns.intersection(unique_columns)
if duplicates:
raise ValueError(
"Each column should be in only one subplot. "
f"Columns {duplicates} were found in multiple subplots."
)
seen_columns = seen_columns.union(unique_columns)
out.append(tuple(idx_locs))
unseen_columns = columns.difference(seen_columns)
for column in unseen_columns:
idx_loc = columns.get_loc(column)
out.append((idx_loc,))
return out
|
Validate the subplots parameter
- check type and content
- check for duplicate columns
- check for invalid column names
- convert column names into indices
- add missing columns in a group of their own
See comments in code below for more details.
Parameters
----------
subplots : subplots parameters as passed to PlotAccessor
Returns
-------
validated subplots : a bool or a list of tuples of column indices. Columns
in the same tuple will be grouped together in the resulting plot.
|
python
|
pandas/plotting/_matplotlib/core.py
| 340
|
[
"subplots",
"data",
"kind"
] |
bool | list[tuple[int, ...]]
| true
| 12
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
run
|
final void run(PrintStream out, Deque<String> args) {
List<String> parameters = new ArrayList<>();
Map<Option, @Nullable String> options = new HashMap<>();
while (!args.isEmpty()) {
String arg = args.removeFirst();
Option option = this.options.find(arg);
if (option != null) {
options.put(option, option.claimArg(args));
}
else {
parameters.add(arg);
}
}
run(out, options, parameters);
}
|
Run the command by processing the remaining arguments.
@param out stream for command output
@param args a mutable deque of the remaining arguments
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 101
|
[
"out",
"args"
] |
void
| true
| 3
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Code2Doc: Function-Documentation Pairs Dataset
A curated dataset of 13,358 high-quality function-documentation pairs extracted from popular open-source repositories on GitHub. Designed for training models to generate documentation from code.
Dataset Description
This dataset contains functions paired with their docstrings/documentation comments from 5 programming languages, extracted from well-maintained, highly-starred GitHub repositories.
Languages Distribution
| Language | Train | Val | Test | Total |
|---|---|---|---|---|
| Java | 6,560 (61.4%) | 820 | 820 | 8,200 |
| Python | 2,885 (27.0%) | 360 | 362 | 3,607 |
| TypeScript | 681 (6.4%) | 85 | 86 | 852 |
| JavaScript | 428 (4.0%) | 53 | 55 | 536 |
| C++ | 130 (1.2%) | 16 | 17 | 163 |
| Total | 10,684 | 1,334 | 1,340 | 13,358 |
Source Repositories
The data was extracted from high-quality open-source projects including:
Python: Django, PyTorch, Pandas, NumPy, scikit-learn, FastAPI, Flask, Celery, Airflow, Requests
Java: Guava, Elasticsearch, Spring Framework, Spring Boot, Apache Kafka, Commons-Lang
TypeScript: TypeScript, VS Code, Angular, Prisma, Grafana, Storybook, NestJS
JavaScript: React, Node.js, Lodash, Axios, Express
C++: OpenCV, Protobuf, Folly, gRPC, LLVM, TensorFlow
Dataset Structure
Data Fields
| Field | Type | Description |
|---|---|---|
function_name |
string | Name of the function/method |
function_code |
string | Complete source code of the function |
documentation |
string | Extracted docstring/documentation |
language |
string | Programming language |
file_path |
string | Original file path in repository |
line_number |
int | Line number where function starts |
parameters |
list[string] | List of parameter names |
return_type |
string | Return type annotation (if available) |
has_type_hints |
bool | Whether function has type annotations |
complexity |
int | Cyclomatic complexity score |
quality_score |
float | Documentation quality score (0-10) |
repo_name |
string | Source repository (owner/repo) |
repo_stars |
int | Repository star count at extraction time |
docstring_style |
string | Documentation style (google, numpy, sphinx, jsdoc, javadoc, doxygen) |
is_async |
bool | Whether function is async |
Data Splits
- Train: 10,684 samples (80%)
- Validation: 1,334 samples (10%)
- Test: 1,340 samples (10%)
Splits are stratified by language to maintain consistent distribution across sets.
Data Processing Pipeline
The dataset was created through a multi-stage pipeline:
- Extraction: Used tree-sitter parsers to accurately extract functions with documentation
- Basic Filtering: Removed test functions, trivial functions, and applied length constraints
- Quality Scoring: Scored documentation completeness (parameters, returns, examples)
- Deduplication: Removed exact and near-duplicates using MinHash LSH
- AI Detection: Filtered potentially AI-generated documentation
Quality Criteria
- Minimum documentation length: 20 characters
- Maximum documentation length: 10,000 characters
- Minimum code length: 50 characters
- Excluded test functions and trivial getters/setters
- Required meaningful documentation structure
Usage
from datasets import load_dataset
dataset = load_dataset("kaanrkaraman/code2doc")
# Access splits
train_data = dataset["train"]
val_data = dataset["val"]
test_data = dataset["test"]
# Example: Get a Python function
python_samples = train_data.filter(lambda x: x["language"] == "python")
sample = python_samples[0]
print(f"Function: {sample['function_name']}")
print(f"Code:\n{sample['function_code']}")
print(f"Documentation:\n{sample['documentation']}")
For Fine-tuning
def format_for_training(example):
return {
"input": f"Generate documentation for the following {example['language']} function:\n\n{example['function_code']}",
"output": example["documentation"]
}
formatted_dataset = dataset.map(format_for_training)
Intended Use
- Training code documentation generation models
- Fine-tuning LLMs for code-to-text tasks
- Evaluating documentation quality metrics
- Research on code understanding and generation
Limitations
- Heavily weighted towards Java due to verbose documentation practices
- C++ representation is small due to different documentation conventions
- Documentation quality varies by repository coding standards
- Extracted from a specific snapshot in time (December 2025)
Citation
@misc{recep_kaan_karaman_2025,
author = {Recep Kaan Karaman and Meftun Akarsu},
title = {code2doc (Revision cadd4e4)},
year = 2025,
url = {https://huggingface.co/datasets/kaanrkaraman/code2doc},
doi = {10.57967/hf/7310},
publisher = {Hugging Face}
}
License
This dataset is released under the CC BY 4.0 License. The source code comes from repositories with permissive licenses (MIT, Apache 2.0, BSD).
- Downloads last month
- 19